text stringlengths 5 631k | id stringlengths 14 178 | metadata dict | __index_level_0__ int64 0 647 |
|---|---|---|---|
DEFAULT_CROP_PCT = 0.875
DEFAULT_CROP_MODE = 'center'
IMAGENET_DEFAULT_MEAN = (0.485, 0.456, 0.406)
IMAGENET_DEFAULT_STD = (0.229, 0.224, 0.225)
IMAGENET_INCEPTION_MEAN = (0.5, 0.5, 0.5)
IMAGENET_INCEPTION_STD = (0.5, 0.5, 0.5)
IMAGENET_DPN_MEAN = (124 / 255, 117 / 255, 104 / 255)
IMAGENET_DPN_STD = tuple([1 / (.0167 * 255)] * 3)
OPENAI_CLIP_MEAN = (0.48145466, 0.4578275, 0.40821073)
OPENAI_CLIP_STD = (0.26862954, 0.26130258, 0.27577711)
| pytorch-image-models/timm/data/constants.py/0 | {
"file_path": "pytorch-image-models/timm/data/constants.py",
"repo_id": "pytorch-image-models",
"token_count": 236
} | 237 |
from copy import deepcopy
__all__ = ['get_img_extensions', 'is_img_extension', 'set_img_extensions', 'add_img_extensions', 'del_img_extensions']
IMG_EXTENSIONS = ('.png', '.jpg', '.jpeg') # singleton, kept public for bwd compat use
_IMG_EXTENSIONS_SET = set(IMG_EXTENSIONS) # set version, private, kept in sync
def _set_extensions(extensions):
global IMG_EXTENSIONS
global _IMG_EXTENSIONS_SET
dedupe = set() # NOTE de-duping tuple while keeping original order
IMG_EXTENSIONS = tuple(x for x in extensions if x not in dedupe and not dedupe.add(x))
_IMG_EXTENSIONS_SET = set(extensions)
def _valid_extension(x: str):
return x and isinstance(x, str) and len(x) >= 2 and x.startswith('.')
def is_img_extension(ext):
return ext in _IMG_EXTENSIONS_SET
def get_img_extensions(as_set=False):
return deepcopy(_IMG_EXTENSIONS_SET if as_set else IMG_EXTENSIONS)
def set_img_extensions(extensions):
assert len(extensions)
for x in extensions:
assert _valid_extension(x)
_set_extensions(extensions)
def add_img_extensions(ext):
if not isinstance(ext, (list, tuple, set)):
ext = (ext,)
for x in ext:
assert _valid_extension(x)
extensions = IMG_EXTENSIONS + tuple(ext)
_set_extensions(extensions)
def del_img_extensions(ext):
if not isinstance(ext, (list, tuple, set)):
ext = (ext,)
extensions = tuple(x for x in IMG_EXTENSIONS if x not in ext)
_set_extensions(extensions)
| pytorch-image-models/timm/data/readers/img_extensions.py/0 | {
"file_path": "pytorch-image-models/timm/data/readers/img_extensions.py",
"repo_id": "pytorch-image-models",
"token_count": 582
} | 238 |
from typing import Callable, Dict, List, Optional, Union, Tuple, Type
import torch
from torch import nn
try:
# NOTE we wrap torchvision fns to use timm leaf / no trace definitions
from torchvision.models.feature_extraction import create_feature_extractor as _create_feature_extractor
from torchvision.models.feature_extraction import get_graph_node_names as _get_graph_node_names
has_fx_feature_extraction = True
except ImportError:
has_fx_feature_extraction = False
__all__ = [
'register_notrace_module',
'is_notrace_module',
'get_notrace_modules',
'register_notrace_function',
'is_notrace_function',
'get_notrace_functions',
'create_feature_extractor',
'get_graph_node_names',
]
# modules to treat as leafs when tracing
_leaf_modules = set()
def register_notrace_module(module: Type[nn.Module]):
"""
Any module not under timm.models.layers should get this decorator if we don't want to trace through it.
"""
_leaf_modules.add(module)
return module
def is_notrace_module(module: Type[nn.Module]):
return module in _leaf_modules
def get_notrace_modules():
return list(_leaf_modules)
# Functions we want to autowrap (treat them as leaves)
_autowrap_functions = set()
def register_notrace_function(name_or_fn):
_autowrap_functions.add(name_or_fn)
return name_or_fn
def is_notrace_function(func: Callable):
return func in _autowrap_functions
def get_notrace_functions():
return list(_autowrap_functions)
def get_graph_node_names(model: nn.Module) -> Tuple[List[str], List[str]]:
return _get_graph_node_names(
model,
tracer_kwargs={
'leaf_modules': list(_leaf_modules),
'autowrap_functions': list(_autowrap_functions)
}
)
def create_feature_extractor(model: nn.Module, return_nodes: Union[Dict[str, str], List[str]]):
assert has_fx_feature_extraction, 'Please update to PyTorch 1.10+, torchvision 0.11+ for FX feature extraction'
return _create_feature_extractor(
model, return_nodes,
tracer_kwargs={
'leaf_modules': list(_leaf_modules),
'autowrap_functions': list(_autowrap_functions)
}
) | pytorch-image-models/timm/layers/_fx.py/0 | {
"file_path": "pytorch-image-models/timm/layers/_fx.py",
"repo_id": "pytorch-image-models",
"token_count": 845
} | 239 |
""" Activation Factory
Hacked together by / Copyright 2020 Ross Wightman
"""
from typing import Callable, Optional, Type, Union
from .activations import *
from .activations_me import *
from .config import is_exportable, is_scriptable
from .typing import LayerType
# PyTorch has an optimized, native 'silu' (aka 'swish') operator as of PyTorch 1.7.
# Also hardsigmoid, hardswish, and soon mish. This code will use native version if present.
# Eventually, the custom SiLU, Mish, Hard*, layers will be removed and only native variants will be used.
_has_silu = 'silu' in dir(torch.nn.functional)
_has_hardswish = 'hardswish' in dir(torch.nn.functional)
_has_hardsigmoid = 'hardsigmoid' in dir(torch.nn.functional)
_has_mish = 'mish' in dir(torch.nn.functional)
_ACT_FN_DEFAULT = dict(
silu=F.silu if _has_silu else swish,
swish=F.silu if _has_silu else swish,
mish=F.mish if _has_mish else mish,
relu=F.relu,
relu6=F.relu6,
leaky_relu=F.leaky_relu,
elu=F.elu,
celu=F.celu,
selu=F.selu,
gelu=gelu,
gelu_tanh=gelu_tanh,
quick_gelu=quick_gelu,
sigmoid=sigmoid,
tanh=tanh,
hard_sigmoid=F.hardsigmoid if _has_hardsigmoid else hard_sigmoid,
hard_swish=F.hardswish if _has_hardswish else hard_swish,
hard_mish=hard_mish,
)
_ACT_FN_ME = dict(
silu=F.silu if _has_silu else swish_me,
swish=F.silu if _has_silu else swish_me,
mish=F.mish if _has_mish else mish_me,
hard_sigmoid=F.hardsigmoid if _has_hardsigmoid else hard_sigmoid_me,
hard_swish=F.hardswish if _has_hardswish else hard_swish_me,
hard_mish=hard_mish_me,
)
_ACT_FNS = (_ACT_FN_ME, _ACT_FN_DEFAULT)
for a in _ACT_FNS:
a.setdefault('hardsigmoid', a.get('hard_sigmoid'))
a.setdefault('hardswish', a.get('hard_swish'))
_ACT_LAYER_DEFAULT = dict(
silu=nn.SiLU if _has_silu else Swish,
swish=nn.SiLU if _has_silu else Swish,
mish=nn.Mish if _has_mish else Mish,
relu=nn.ReLU,
relu6=nn.ReLU6,
leaky_relu=nn.LeakyReLU,
elu=nn.ELU,
prelu=PReLU,
celu=nn.CELU,
selu=nn.SELU,
gelu=GELU,
gelu_tanh=GELUTanh,
quick_gelu=QuickGELU,
sigmoid=Sigmoid,
tanh=Tanh,
hard_sigmoid=nn.Hardsigmoid if _has_hardsigmoid else HardSigmoid,
hard_swish=nn.Hardswish if _has_hardswish else HardSwish,
hard_mish=HardMish,
identity=nn.Identity,
)
_ACT_LAYER_ME = dict(
silu=nn.SiLU if _has_silu else SwishMe,
swish=nn.SiLU if _has_silu else SwishMe,
mish=nn.Mish if _has_mish else MishMe,
hard_sigmoid=nn.Hardsigmoid if _has_hardsigmoid else HardSigmoidMe,
hard_swish=nn.Hardswish if _has_hardswish else HardSwishMe,
hard_mish=HardMishMe,
)
_ACT_LAYERS = (_ACT_LAYER_ME, _ACT_LAYER_DEFAULT)
for a in _ACT_LAYERS:
a.setdefault('hardsigmoid', a.get('hard_sigmoid'))
a.setdefault('hardswish', a.get('hard_swish'))
def get_act_fn(name: Optional[LayerType] = 'relu'):
""" Activation Function Factory
Fetching activation fns by name with this function allows export or torch script friendly
functions to be returned dynamically based on current config.
"""
if not name:
return None
if isinstance(name, Callable):
return name
name = name.lower()
if not (is_exportable() or is_scriptable()):
# If not exporting or scripting the model, first look for a memory-efficient version with
# custom autograd, then fallback
if name in _ACT_FN_ME:
return _ACT_FN_ME[name]
return _ACT_FN_DEFAULT[name]
def get_act_layer(name: Optional[LayerType] = 'relu'):
""" Activation Layer Factory
Fetching activation layers by name with this function allows export or torch script friendly
functions to be returned dynamically based on current config.
"""
if name is None:
return None
if not isinstance(name, str):
# callable, module, etc
return name
if not name:
return None
name = name.lower()
if not (is_exportable() or is_scriptable()):
if name in _ACT_LAYER_ME:
return _ACT_LAYER_ME[name]
return _ACT_LAYER_DEFAULT[name]
def create_act_layer(
name: Optional[LayerType],
inplace: Optional[bool] = None,
**kwargs
):
act_layer = get_act_layer(name)
if act_layer is None:
return None
if inplace is None:
return act_layer(**kwargs)
try:
return act_layer(inplace=inplace, **kwargs)
except TypeError:
# recover if act layer doesn't have inplace arg
return act_layer(**kwargs)
| pytorch-image-models/timm/layers/create_act.py/0 | {
"file_path": "pytorch-image-models/timm/layers/create_act.py",
"repo_id": "pytorch-image-models",
"token_count": 1997
} | 240 |
""" Layer/Module Helpers
Hacked together by / Copyright 2020 Ross Wightman
"""
from itertools import repeat
import collections.abc
# From PyTorch internals
def _ntuple(n):
def parse(x):
if isinstance(x, collections.abc.Iterable) and not isinstance(x, str):
return tuple(x)
return tuple(repeat(x, n))
return parse
to_1tuple = _ntuple(1)
to_2tuple = _ntuple(2)
to_3tuple = _ntuple(3)
to_4tuple = _ntuple(4)
to_ntuple = _ntuple
def make_divisible(v, divisor=8, min_value=None, round_limit=.9):
min_value = min_value or divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_v < round_limit * v:
new_v += divisor
return new_v
def extend_tuple(x, n):
# pads a tuple to specified n by padding with last value
if not isinstance(x, (tuple, list)):
x = (x,)
else:
x = tuple(x)
pad_n = n - len(x)
if pad_n <= 0:
return x[:n]
return x + (x[-1],) * pad_n
| pytorch-image-models/timm/layers/helpers.py/0 | {
"file_path": "pytorch-image-models/timm/layers/helpers.py",
"repo_id": "pytorch-image-models",
"token_count": 462
} | 241 |
""" Image to Patch Embedding using Conv2d
A convolution based approach to patchifying a 2D image w/ embedding projection.
Based on code in:
* https://github.com/google-research/vision_transformer
* https://github.com/google-research/big_vision/tree/main/big_vision
Hacked together by / Copyright 2020 Ross Wightman
"""
import logging
import math
from typing import Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn as nn
import torch.nn.functional as F
from .format import Format, nchw_to
from .helpers import to_2tuple
from .trace_utils import _assert
_logger = logging.getLogger(__name__)
class PatchEmbed(nn.Module):
""" 2D Image to Patch Embedding
"""
output_fmt: Format
dynamic_img_pad: torch.jit.Final[bool]
def __init__(
self,
img_size: Union[int, Tuple[int, int]] = 224,
patch_size: int = 16,
in_chans: int = 3,
embed_dim: int = 768,
norm_layer: Optional[Callable] = None,
flatten: bool = True,
output_fmt: Optional[str] = None,
bias: bool = True,
strict_img_size: bool = True,
dynamic_img_pad: bool = False,
):
super().__init__()
self.patch_size = to_2tuple(patch_size)
self.img_size, self.grid_size, self.num_patches = self._init_img_size(img_size)
if output_fmt is not None:
self.flatten = False
self.output_fmt = Format(output_fmt)
else:
# flatten spatial dim and transpose to channels last, kept for bwd compat
self.flatten = flatten
self.output_fmt = Format.NCHW
self.strict_img_size = strict_img_size
self.dynamic_img_pad = dynamic_img_pad
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size, bias=bias)
self.norm = norm_layer(embed_dim) if norm_layer else nn.Identity()
def _init_img_size(self, img_size: Union[int, Tuple[int, int]]):
assert self.patch_size
if img_size is None:
return None, None, None
img_size = to_2tuple(img_size)
grid_size = tuple([s // p for s, p in zip(img_size, self.patch_size)])
num_patches = grid_size[0] * grid_size[1]
return img_size, grid_size, num_patches
def set_input_size(
self,
img_size: Optional[Union[int, Tuple[int, int]]] = None,
patch_size: Optional[Union[int, Tuple[int, int]]] = None,
):
new_patch_size = None
if patch_size is not None:
new_patch_size = to_2tuple(patch_size)
if new_patch_size is not None and new_patch_size != self.patch_size:
with torch.no_grad():
new_proj = nn.Conv2d(
self.proj.in_channels,
self.proj.out_channels,
kernel_size=new_patch_size,
stride=new_patch_size,
bias=self.proj.bias is not None,
)
new_proj.weight.copy_(resample_patch_embed(self.proj.weight, new_patch_size, verbose=True))
if self.proj.bias is not None:
new_proj.bias.copy_(self.proj.bias)
self.proj = new_proj
self.patch_size = new_patch_size
img_size = img_size or self.img_size
if img_size != self.img_size or new_patch_size is not None:
self.img_size, self.grid_size, self.num_patches = self._init_img_size(img_size)
def feat_ratio(self, as_scalar=True) -> Union[Tuple[int, int], int]:
if as_scalar:
return max(self.patch_size)
else:
return self.patch_size
def dynamic_feat_size(self, img_size: Tuple[int, int]) -> Tuple[int, int]:
""" Get grid (feature) size for given image size taking account of dynamic padding.
NOTE: must be torchscript compatible so using fixed tuple indexing
"""
if self.dynamic_img_pad:
return math.ceil(img_size[0] / self.patch_size[0]), math.ceil(img_size[1] / self.patch_size[1])
else:
return img_size[0] // self.patch_size[0], img_size[1] // self.patch_size[1]
def forward(self, x):
B, C, H, W = x.shape
if self.img_size is not None:
if self.strict_img_size:
_assert(H == self.img_size[0], f"Input height ({H}) doesn't match model ({self.img_size[0]}).")
_assert(W == self.img_size[1], f"Input width ({W}) doesn't match model ({self.img_size[1]}).")
elif not self.dynamic_img_pad:
_assert(
H % self.patch_size[0] == 0,
f"Input height ({H}) should be divisible by patch size ({self.patch_size[0]})."
)
_assert(
W % self.patch_size[1] == 0,
f"Input width ({W}) should be divisible by patch size ({self.patch_size[1]})."
)
if self.dynamic_img_pad:
pad_h = (self.patch_size[0] - H % self.patch_size[0]) % self.patch_size[0]
pad_w = (self.patch_size[1] - W % self.patch_size[1]) % self.patch_size[1]
x = F.pad(x, (0, pad_w, 0, pad_h))
x = self.proj(x)
if self.flatten:
x = x.flatten(2).transpose(1, 2) # NCHW -> NLC
elif self.output_fmt != Format.NCHW:
x = nchw_to(x, self.output_fmt)
x = self.norm(x)
return x
class PatchEmbedWithSize(PatchEmbed):
""" 2D Image to Patch Embedding
"""
output_fmt: Format
def __init__(
self,
img_size: Optional[int] = 224,
patch_size: int = 16,
in_chans: int = 3,
embed_dim: int = 768,
norm_layer: Optional[Callable] = None,
flatten: bool = True,
output_fmt: Optional[str] = None,
bias: bool = True,
):
super().__init__(
img_size=img_size,
patch_size=patch_size,
in_chans=in_chans,
embed_dim=embed_dim,
norm_layer=norm_layer,
flatten=flatten,
output_fmt=output_fmt,
bias=bias,
)
def forward(self, x) -> Tuple[torch.Tensor, List[int]]:
B, C, H, W = x.shape
if self.img_size is not None:
_assert(H % self.patch_size[0] == 0, f"Input image height ({H}) must be divisible by patch size ({self.patch_size[0]}).")
_assert(W % self.patch_size[1] == 0, f"Input image width ({W}) must be divisible by patch size ({self.patch_size[1]}).")
x = self.proj(x)
feat_size = x.shape[-2:]
if self.flatten:
x = x.flatten(2).transpose(1, 2) # NCHW -> NLC
elif self.output_fmt != Format.NCHW:
x = nchw_to(x, self.output_fmt)
x = self.norm(x)
return x, feat_size
# FIXME to remove, keeping for comparison for now
def resample_patch_embed_old(
patch_embed,
new_size: List[int],
interpolation: str = 'bicubic',
antialias: bool = True,
verbose: bool = False,
):
"""Resample the weights of the patch embedding kernel to target resolution.
We resample the patch embedding kernel by approximately inverting the effect
of patch resizing.
Code based on:
https://github.com/google-research/big_vision/blob/b00544b81f8694488d5f36295aeb7972f3755ffe/big_vision/models/proj/flexi/vit.py
With this resizing, we can for example load a B/8 filter into a B/16 model
and, on 2x larger input image, the result will match.
Args:
patch_embed: original parameter to be resized.
new_size (tuple(int, int): target shape (height, width)-only.
interpolation (str): interpolation for resize
antialias (bool): use anti-aliasing filter in resize
verbose (bool): log operation
Returns:
Resized patch embedding kernel.
"""
import numpy as np
try:
from torch import vmap
except ImportError:
from functorch import vmap
assert len(patch_embed.shape) == 4, "Four dimensions expected"
assert len(new_size) == 2, "New shape should only be hw"
old_size = patch_embed.shape[-2:]
if tuple(old_size) == tuple(new_size):
return patch_embed
if verbose:
_logger.info(f"Resize patch embedding {patch_embed.shape} to {new_size}, w/ {interpolation} interpolation.")
def resize(x_np, _new_size):
x_tf = torch.Tensor(x_np)[None, None, ...]
x_upsampled = F.interpolate(
x_tf, size=_new_size, mode=interpolation, antialias=antialias)[0, 0, ...].numpy()
return x_upsampled
def get_resize_mat(_old_size, _new_size):
mat = []
for i in range(np.prod(_old_size)):
basis_vec = np.zeros(_old_size)
basis_vec[np.unravel_index(i, _old_size)] = 1.
mat.append(resize(basis_vec, _new_size).reshape(-1))
return np.stack(mat).T
resize_mat = get_resize_mat(old_size, new_size)
resize_mat_pinv = torch.tensor(np.linalg.pinv(resize_mat.T), device=patch_embed.device)
def resample_kernel(kernel):
resampled_kernel = resize_mat_pinv @ kernel.reshape(-1)
return resampled_kernel.reshape(new_size)
v_resample_kernel = vmap(vmap(resample_kernel, 0, 0), 1, 1)
orig_dtype = patch_embed.dtype
patch_embed = patch_embed.float()
patch_embed = v_resample_kernel(patch_embed)
patch_embed = patch_embed.to(orig_dtype)
return patch_embed
DTYPE_INTERMEDIATE = torch.float32
def _compute_resize_matrix(
old_size: Tuple[int, int],
new_size: Tuple[int, int],
interpolation: str,
antialias: bool,
device: torch.device,
dtype: torch.dtype = DTYPE_INTERMEDIATE
) -> torch.Tensor:
"""Computes the resize matrix basis vectors and interpolates them to new_size."""
old_h, old_w = old_size
new_h, new_w = new_size
old_total = old_h * old_w
new_total = new_h * new_w
eye_matrix = torch.eye(old_total, device=device, dtype=dtype)
basis_vectors_batch = eye_matrix.reshape(old_total, 1, old_h, old_w)
resized_basis_vectors_batch = F.interpolate(
basis_vectors_batch,
size=new_size,
mode=interpolation,
antialias=antialias,
align_corners=False
) # Output shape: (old_total, 1, new_h, new_w)
resize_matrix = resized_basis_vectors_batch.squeeze(1).permute(1, 2, 0).reshape(new_total, old_total)
return resize_matrix # Shape: (new_total, old_total)
def _apply_resampling(
patch_embed: torch.Tensor,
pinv_matrix: torch.Tensor,
new_size_tuple: Tuple[int, int],
orig_dtype: torch.dtype,
intermediate_dtype: torch.dtype = DTYPE_INTERMEDIATE
) -> torch.Tensor:
""" Simplified resampling w/o vmap use.
As proposed by https://github.com/stas-sl
"""
c_out, c_in, *_ = patch_embed.shape
patch_embed = patch_embed.reshape(c_out, c_in, -1).to(dtype=intermediate_dtype)
pinv_matrix = pinv_matrix.to(dtype=intermediate_dtype)
resampled_patch_embed = patch_embed @ pinv_matrix # (C_out, C_in, P_old * P_old) @ (P_old * P_old, P_new * P_new)
resampled_patch_embed = resampled_patch_embed.reshape(c_out, c_in, *new_size_tuple).to(dtype=orig_dtype)
return resampled_patch_embed
def resample_patch_embed(
patch_embed: torch.Tensor,
new_size: List[int],
interpolation: str = 'bicubic',
antialias: bool = True,
verbose: bool = False,
):
""" Standalone function (computes matrix on each call). """
assert len(patch_embed.shape) == 4, "Input tensor should be 4D (out_ch, in_ch, h, w)"
assert len(new_size) == 2, "New shape should only be hw (height, width)"
old_size_tuple: Tuple[int, int] = tuple(patch_embed.shape[-2:])
new_size_tuple: Tuple[int, int] = tuple(new_size)
if old_size_tuple == new_size_tuple:
return patch_embed
device = patch_embed.device
orig_dtype = patch_embed.dtype
resize_mat = _compute_resize_matrix(
old_size_tuple, new_size_tuple, interpolation, antialias, device, DTYPE_INTERMEDIATE
)
pinv_matrix = torch.linalg.pinv(resize_mat) # Calculates the pseudoinverse matrix used for resampling
resampled_patch_embed = _apply_resampling(
patch_embed, pinv_matrix, new_size_tuple, orig_dtype, DTYPE_INTERMEDIATE
)
return resampled_patch_embed
class PatchEmbedResamplerFixedOrigSize(nn.Module):
"""
Resample patch embedding weights from a fixed original size,
caching the pseudoinverse matrix based on the target size.
"""
def __init__(
self,
orig_size: Tuple[int, int],
interpolation: str = 'bicubic',
antialias: bool = True
):
"""
Args:
orig_size (Tuple[int, int]): The expected original (height, width) of input patch_embed tensors.
interpolation (str): Interpolation mode.
antialias (bool): Use anti-aliasing filter in resize.
"""
super().__init__()
assert isinstance(orig_size, tuple) and len(orig_size) == 2, \
"`orig_size` must be a tuple of (height, width)"
self.orig_size = orig_size # expected original size
self.interpolation = interpolation
self.antialias = antialias
# Cache map key is the target new_size tuple
self._pinv_cache_map: Dict[Tuple[int, int], str] = {}
def _get_or_create_pinv_matrix(
self,
new_size: Tuple[int, int],
device: torch.device,
dtype: torch.dtype = DTYPE_INTERMEDIATE
) -> torch.Tensor:
"""Retrieves the cached pinv matrix or computes and caches it for the given new_size."""
cache_key = new_size
buffer_name = self._pinv_cache_map.get(cache_key)
if buffer_name and hasattr(self, buffer_name):
pinv_matrix = getattr(self, buffer_name)
if pinv_matrix.device == device and pinv_matrix.dtype == dtype:
return pinv_matrix
# Calculate the matrix if not cached or needs update
resize_mat = _compute_resize_matrix(
self.orig_size, new_size, self.interpolation, self.antialias, device, dtype
)
pinv_matrix = torch.linalg.pinv(resize_mat) # Calculates the pseudoinverse matrix used for resampling
# Cache using register_buffer
buffer_name = f"pinv_{new_size[0]}x{new_size[1]}"
if hasattr(self, buffer_name):
delattr(self, buffer_name)
self.register_buffer(buffer_name, pinv_matrix)
self._pinv_cache_map[cache_key] = buffer_name # Map new_size key to buffer name
return pinv_matrix
def forward(self, patch_embed: torch.Tensor, new_size: List[int]) -> torch.Tensor:
""" Resamples the patch embedding weights to new_size.
Args:
patch_embed (torch.Tensor): Original weights (out_ch, in_ch, H_orig, W_orig).
new_size (List[int]): Target [height, width].
Returns:
torch.Tensor: Resampled weights.
"""
assert len(patch_embed.shape) == 4
assert len(new_size) == 2
# Input Validation
input_size = tuple(patch_embed.shape[-2:])
assert input_size == self.orig_size, \
f"Input patch_embed spatial size {input_size} does not match " \
f"module's expected original size {self.orig_size}"
new_size_tuple: Tuple[int, int] = tuple(new_size)
# Check no-op case against self.orig_size
if self.orig_size == new_size_tuple:
return patch_embed
device = patch_embed.device
orig_dtype = patch_embed.dtype
# Get or compute the required pseudoinverse matrix
pinv_matrix = self._get_or_create_pinv_matrix(new_size_tuple, device)
# Apply the resampling
resampled_patch_embed = _apply_resampling(patch_embed, pinv_matrix, new_size_tuple, orig_dtype)
return resampled_patch_embed
class PatchEmbedInterpolator(nn.Module):
"""Dynamically interpolates patch embedding weights for variable patch sizes.
This module wraps patch embedding weight resampling functionality to support
on-the-fly patch size variation during training. It handles both Conv2d and
Linear patch embeddings.
Args:
base_patch_size: The original patch size the model was initialized with
in_chans: Number of input channels
embed_dim: Embedding dimension
interpolation: Interpolation mode for resampling
antialias: Whether to use antialiasing during interpolation
"""
def __init__(
self,
base_patch_size: Tuple[int, int],
in_chans: int = 3,
embed_dim: int = 768,
interpolation: str = 'bicubic',
antialias: bool = True,
):
super().__init__()
self.base_patch_size = base_patch_size
self.in_chans = in_chans
self.embed_dim = embed_dim
self.interpolation = interpolation
self.antialias = antialias
def resample_linear_weight(
self,
weight: torch.Tensor,
target_patch_size: Tuple[int, int],
) -> torch.Tensor:
"""Resample linear patch embedding weights for a new patch size.
Args:
weight: Linear weight tensor of shape [embed_dim, patch_h * patch_w * in_chans]
target_patch_size: Target (patch_h, patch_w) to resample to
Returns:
Resampled weight tensor
"""
if target_patch_size == self.base_patch_size:
return weight
embed_dim = weight.shape[0]
base_ph, base_pw = self.base_patch_size
target_ph, target_pw = target_patch_size
# Reshape linear weight to conv2d format
# [embed_dim, ph*pw*C] -> [embed_dim, C, ph, pw]
weight_conv = weight.reshape(embed_dim, base_ph, base_pw, self.in_chans)
weight_conv = weight_conv.permute(0, 3, 1, 2)
# Resample using existing function
weight_conv_resampled = resample_patch_embed(
weight_conv,
new_size=[target_ph, target_pw],
interpolation=self.interpolation,
antialias=self.antialias,
verbose=False,
)
# Reshape back to linear format
# [embed_dim, C, ph, pw] -> [embed_dim, ph*pw*C]
weight_resampled = weight_conv_resampled.permute(0, 2, 3, 1)
weight_resampled = weight_resampled.reshape(embed_dim, -1)
return weight_resampled
def resample_conv_weight(
self,
weight: torch.Tensor,
target_patch_size: Tuple[int, int],
) -> torch.Tensor:
"""Resample conv2d patch embedding weights for a new patch size.
Args:
weight: Conv2d weight tensor of shape [embed_dim, in_chans, patch_h, patch_w]
target_patch_size: Target (patch_h, patch_w) to resample to
Returns:
Resampled weight tensor
"""
if target_patch_size == self.base_patch_size:
return weight
# Resample using existing function
weight_resampled = resample_patch_embed(
weight,
new_size=list(target_patch_size),
interpolation=self.interpolation,
antialias=self.antialias,
verbose=False,
)
return weight_resampled
def forward(
self,
patches: torch.Tensor,
proj_weight: torch.Tensor,
proj_bias: Optional[torch.Tensor] = None,
patch_size: Optional[Tuple[int, int]] = None,
is_linear: bool = True,
) -> torch.Tensor:
"""Apply patch embedding with dynamic weight resampling.
Args:
patches: Input patches
- For linear mode with resampling: [B, N, Ph, Pw, C]
- For linear mode without resampling: [B, N, Ph*Pw*C]
- For conv mode: [B, C, H, W]
proj_weight: Original projection weight
proj_bias: Optional projection bias
patch_size: Current patch size (if None, uses base_patch_size)
is_linear: Whether using linear (True) or conv2d (False) projection
Returns:
Embedded patches
"""
if patch_size is None:
patch_size = self.base_patch_size
if is_linear:
if patch_size != self.base_patch_size:
# Need to resample - expects unflattened patches
assert patches.ndim == 5, "Patches must be [B, N, Ph, Pw, C] for resampling"
B, N, Ph, Pw, C = patches.shape
# Resample the weight
weight_resampled = self.resample_linear_weight(proj_weight, patch_size)
# Flatten patches and apply linear projection
patches_flat = patches.reshape(B, N, -1)
output = torch.nn.functional.linear(patches_flat, weight_resampled, proj_bias)
else:
# No resampling needed, patches can be pre-flattened
if patches.ndim == 5:
B, N, Ph, Pw, C = patches.shape
patches = patches.reshape(B, N, -1)
output = torch.nn.functional.linear(patches, proj_weight, proj_bias)
else:
# Conv mode
if patch_size != self.base_patch_size:
weight_resampled = self.resample_conv_weight(proj_weight, patch_size)
output = torch.nn.functional.conv2d(
patches, weight_resampled, proj_bias,
stride=patch_size, padding=0
)
else:
output = torch.nn.functional.conv2d(
patches, proj_weight, proj_bias,
stride=patch_size, padding=0
)
return output
# def divs(n, m=None):
# m = m or n // 2
# if m == 1:
# return [1]
# if n % m == 0:
# return [m] + divs(n, m - 1)
# return divs(n, m - 1)
#
#
# class FlexiPatchEmbed(nn.Module):
# """ 2D Image to Patch Embedding w/ Flexible Patch sizes (FlexiViT)
# FIXME WIP
# """
# def __init__(
# self,
# img_size=240,
# patch_size=16,
# in_chans=3,
# embed_dim=768,
# base_img_size=240,
# base_patch_size=32,
# norm_layer=None,
# flatten=True,
# bias=True,
# ):
# super().__init__()
# self.img_size = to_2tuple(img_size)
# self.patch_size = to_2tuple(patch_size)
# self.num_patches = 0
#
# # full range for 240 = (5, 6, 8, 10, 12, 14, 15, 16, 20, 24, 30, 40, 48)
# self.seqhw = (6, 8, 10, 12, 14, 15, 16, 20, 24, 30)
#
# self.base_img_size = to_2tuple(base_img_size)
# self.base_patch_size = to_2tuple(base_patch_size)
# self.base_grid_size = tuple([i // p for i, p in zip(self.base_img_size, self.base_patch_size)])
# self.base_num_patches = self.base_grid_size[0] * self.base_grid_size[1]
#
# self.flatten = flatten
# self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=self.patch_size, stride=self.patch_size, bias=bias)
# self.norm = norm_layer(embed_dim) if norm_layer else nn.Identity()
#
# def forward(self, x):
# B, C, H, W = x.shape
#
# if self.patch_size == self.base_patch_size:
# weight = self.proj.weight
# else:
# weight = resample_patch_embed(self.proj.weight, self.patch_size)
# patch_size = self.patch_size
# x = F.conv2d(x, weight, bias=self.proj.bias, stride=patch_size)
# if self.flatten:
# x = x.flatten(2).transpose(1, 2) # BCHW -> BNC
# x = self.norm(x)
# return x
| pytorch-image-models/timm/layers/patch_embed.py/0 | {
"file_path": "pytorch-image-models/timm/layers/patch_embed.py",
"repo_id": "pytorch-image-models",
"token_count": 11216
} | 242 |
import torch
import math
import warnings
from torch import nn
from torch.nn.init import _calculate_fan_in_and_fan_out
def _trunc_normal_(tensor, mean, std, a, b):
# Cut & paste from PyTorch official master until it's in a few official releases - RW
# Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf
def norm_cdf(x):
# Computes standard normal cumulative distribution function
return (1. + math.erf(x / math.sqrt(2.))) / 2.
if (mean < a - 2 * std) or (mean > b + 2 * std):
warnings.warn("mean is more than 2 std from [a, b] in nn.init.trunc_normal_. "
"The distribution of values may be incorrect.",
stacklevel=2)
# Values are generated by using a truncated uniform distribution and
# then using the inverse CDF for the normal distribution.
# Get upper and lower cdf values
l = norm_cdf((a - mean) / std)
u = norm_cdf((b - mean) / std)
# Uniformly fill tensor with values from [l, u], then translate to
# [2l-1, 2u-1].
tensor.uniform_(2 * l - 1, 2 * u - 1)
# Use inverse cdf transform for normal distribution to get truncated
# standard normal
tensor.erfinv_()
# Transform to proper mean, std
tensor.mul_(std * math.sqrt(2.))
tensor.add_(mean)
# Clamp to ensure it's in the proper range
tensor.clamp_(min=a, max=b)
return tensor
def trunc_normal_(tensor, mean=0., std=1., a=-2., b=2.):
# type: (Tensor, float, float, float, float) -> Tensor
r"""Fills the input Tensor with values drawn from a truncated
normal distribution. The values are effectively drawn from the
normal distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)`
with values outside :math:`[a, b]` redrawn until they are within
the bounds. The method used for generating the random values works
best when :math:`a \leq \text{mean} \leq b`.
NOTE: this impl is similar to the PyTorch trunc_normal_, the bounds [a, b] are
applied while sampling the normal with mean/std applied, therefore a, b args
should be adjusted to match the range of mean, std args.
Args:
tensor: an n-dimensional `torch.Tensor`
mean: the mean of the normal distribution
std: the standard deviation of the normal distribution
a: the minimum cutoff value
b: the maximum cutoff value
Examples:
>>> w = torch.empty(3, 5)
>>> nn.init.trunc_normal_(w)
"""
with torch.no_grad():
return _trunc_normal_(tensor, mean, std, a, b)
def trunc_normal_tf_(tensor, mean=0., std=1., a=-2., b=2.):
# type: (Tensor, float, float, float, float) -> Tensor
r"""Fills the input Tensor with values drawn from a truncated
normal distribution. The values are effectively drawn from the
normal distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)`
with values outside :math:`[a, b]` redrawn until they are within
the bounds. The method used for generating the random values works
best when :math:`a \leq \text{mean} \leq b`.
NOTE: this 'tf' variant behaves closer to Tensorflow / JAX impl where the
bounds [a, b] are applied when sampling the normal distribution with mean=0, std=1.0
and the result is subsequently scaled and shifted by the mean and std args.
Args:
tensor: an n-dimensional `torch.Tensor`
mean: the mean of the normal distribution
std: the standard deviation of the normal distribution
a: the minimum cutoff value
b: the maximum cutoff value
Examples:
>>> w = torch.empty(3, 5)
>>> nn.init.trunc_normal_(w)
"""
with torch.no_grad():
_trunc_normal_(tensor, 0, 1.0, a, b)
tensor.mul_(std).add_(mean)
return tensor
def variance_scaling_(tensor, scale=1.0, mode='fan_in', distribution='normal'):
fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor)
if mode == 'fan_in':
denom = fan_in
elif mode == 'fan_out':
denom = fan_out
elif mode == 'fan_avg':
denom = (fan_in + fan_out) / 2
variance = scale / denom
if distribution == "truncated_normal":
# constant is stddev of standard normal truncated to (-2, 2)
trunc_normal_tf_(tensor, std=math.sqrt(variance) / .87962566103423978)
elif distribution == "normal":
with torch.no_grad():
tensor.normal_(std=math.sqrt(variance))
elif distribution == "uniform":
bound = math.sqrt(3 * variance)
with torch.no_grad():
tensor.uniform_(-bound, bound)
else:
raise ValueError(f"invalid distribution {distribution}")
def lecun_normal_(tensor):
variance_scaling_(tensor, mode='fan_in', distribution='truncated_normal')
def init_weight_vit(
module: nn.Module,
name: str,
init_bias: float = 0.02,
head_bias: float = 0.,
classifier_name: str = 'head'
):
if isinstance(module, (nn.Linear, nn.Conv1d, nn.Conv2d, nn.Conv3d)):
if name.startswith(classifier_name):
nn.init.zeros_(module.weight)
nn.init.constant_(module.bias, head_bias)
else:
nn.init.trunc_normal_(module.weight, std=0.02)
if isinstance(module, nn.Linear) and module.bias is not None:
nn.init.constant_(module.bias, init_bias)
elif hasattr(module, 'init_weights'):
module.init_weights()
def init_weight_jax(
module: nn.Module,
name: str,
head_bias: float = 0.,
classifier_name: str = 'head',
):
if isinstance(module, nn.Linear):
if name.startswith(classifier_name):
nn.init.zeros_(module.weight)
nn.init.constant_(module.bias, head_bias)
else:
nn.init.xavier_uniform_(module.weight)
if module.bias is not None:
nn.init.normal_(module.bias, std=1e-6) if 'mlp' in name else nn.init.zeros_(module.bias)
elif isinstance(module, nn.Conv2d):
lecun_normal_(module.weight)
if module.bias is not None:
nn.init.zeros_(module.bias)
elif hasattr(module, 'init_weights'):
module.init_weights()
| pytorch-image-models/timm/layers/weight_init.py/0 | {
"file_path": "pytorch-image-models/timm/layers/weight_init.py",
"repo_id": "pytorch-image-models",
"token_count": 2577
} | 243 |
import copy
from collections import deque, defaultdict
from dataclasses import dataclass, field, replace, asdict
from typing import Any, Deque, Dict, Tuple, Optional, Union
__all__ = ['PretrainedCfg', 'filter_pretrained_cfg', 'DefaultCfg']
@dataclass
class PretrainedCfg:
"""
"""
# weight source locations
url: Optional[Union[str, Tuple[str, str]]] = None # remote URL
file: Optional[str] = None # local / shared filesystem path
state_dict: Optional[Dict[str, Any]] = None # in-memory state dict
hf_hub_id: Optional[str] = None # Hugging Face Hub model id ('organization/model')
hf_hub_filename: Optional[str] = None # Hugging Face Hub filename (overrides default)
source: Optional[str] = None # source of cfg / weight location used (url, file, hf-hub)
architecture: Optional[str] = None # architecture variant can be set when not implicit
tag: Optional[str] = None # pretrained tag of source
custom_load: bool = False # use custom model specific model.load_pretrained() (ie for npz files)
# input / data config
input_size: Tuple[int, int, int] = (3, 224, 224)
test_input_size: Optional[Tuple[int, int, int]] = None
min_input_size: Optional[Tuple[int, int, int]] = None
fixed_input_size: bool = False
interpolation: str = 'bicubic'
crop_pct: float = 0.875
test_crop_pct: Optional[float] = None
crop_mode: str = 'center'
mean: Tuple[float, ...] = (0.485, 0.456, 0.406)
std: Tuple[float, ...] = (0.229, 0.224, 0.225)
# head / classifier config and meta-data
num_classes: int = 1000
label_offset: Optional[int] = None
label_names: Optional[Tuple[str]] = None
label_descriptions: Optional[Dict[str, str]] = None
# model attributes that vary with above or required for pretrained adaptation
pool_size: Optional[Tuple[int, ...]] = None
test_pool_size: Optional[Tuple[int, ...]] = None
first_conv: Optional[str] = None
classifier: Optional[str] = None
license: Optional[str] = None
description: Optional[str] = None
origin_url: Optional[str] = None
paper_name: Optional[str] = None
paper_ids: Optional[Union[str, Tuple[str]]] = None
notes: Optional[Tuple[str]] = None
@property
def has_weights(self):
return self.url or self.file or self.hf_hub_id
def to_dict(self, remove_source=False, remove_null=True):
return filter_pretrained_cfg(
asdict(self),
remove_source=remove_source,
remove_null=remove_null
)
def filter_pretrained_cfg(cfg, remove_source=False, remove_null=True):
filtered_cfg = {}
keep_null = {'pool_size', 'first_conv', 'classifier'} # always keep these keys, even if none
for k, v in cfg.items():
if remove_source and k in {'url', 'file', 'hf_hub_id', 'hf_hub_id', 'hf_hub_filename', 'source'}:
continue
if remove_null and v is None and k not in keep_null:
continue
filtered_cfg[k] = v
return filtered_cfg
@dataclass
class DefaultCfg:
tags: Deque[str] = field(default_factory=deque) # priority queue of tags (first is default)
cfgs: Dict[str, PretrainedCfg] = field(default_factory=dict) # pretrained cfgs by tag
is_pretrained: bool = False # at least one of the configs has a pretrained source set
@property
def default(self):
return self.cfgs[self.tags[0]]
@property
def default_with_tag(self):
tag = self.tags[0]
return tag, self.cfgs[tag]
| pytorch-image-models/timm/models/_pretrained.py/0 | {
"file_path": "pytorch-image-models/timm/models/_pretrained.py",
"repo_id": "pytorch-image-models",
"token_count": 1341
} | 244 |
""" CrossViT Model
@inproceedings{
chen2021crossvit,
title={{CrossViT: Cross-Attention Multi-Scale Vision Transformer for Image Classification}},
author={Chun-Fu (Richard) Chen and Quanfu Fan and Rameswar Panda},
booktitle={International Conference on Computer Vision (ICCV)},
year={2021}
}
Paper link: https://arxiv.org/abs/2103.14899
Original code: https://github.com/IBM/CrossViT/blob/main/models/crossvit.py
NOTE: model names have been renamed from originals to represent actual input res all *_224 -> *_240 and *_384 -> *_408
Modifications and additions for timm hacked together by / Copyright 2021, Ross Wightman
Modified from Timm. https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py
"""
# Copyright IBM All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
from functools import partial
from typing import List, Optional, Tuple
import torch
import torch.nn as nn
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.layers import DropPath, to_2tuple, trunc_normal_, _assert
from ._builder import build_model_with_cfg
from ._features_fx import register_notrace_function
from ._registry import register_model, generate_default_cfgs
from .vision_transformer import Block
__all__ = ['CrossVit'] # model_registry will add each entrypoint fn to this
class PatchEmbed(nn.Module):
""" Image to Patch Embedding
"""
def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768, multi_conv=False):
super().__init__()
img_size = to_2tuple(img_size)
patch_size = to_2tuple(patch_size)
num_patches = (img_size[1] // patch_size[1]) * (img_size[0] // patch_size[0])
self.img_size = img_size
self.patch_size = patch_size
self.num_patches = num_patches
if multi_conv:
if patch_size[0] == 12:
self.proj = nn.Sequential(
nn.Conv2d(in_chans, embed_dim // 4, kernel_size=7, stride=4, padding=3),
nn.ReLU(inplace=True),
nn.Conv2d(embed_dim // 4, embed_dim // 2, kernel_size=3, stride=3, padding=0),
nn.ReLU(inplace=True),
nn.Conv2d(embed_dim // 2, embed_dim, kernel_size=3, stride=1, padding=1),
)
elif patch_size[0] == 16:
self.proj = nn.Sequential(
nn.Conv2d(in_chans, embed_dim // 4, kernel_size=7, stride=4, padding=3),
nn.ReLU(inplace=True),
nn.Conv2d(embed_dim // 4, embed_dim // 2, kernel_size=3, stride=2, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(embed_dim // 2, embed_dim, kernel_size=3, stride=2, padding=1),
)
else:
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
def forward(self, x):
B, C, H, W = x.shape
# FIXME look at relaxing size constraints
_assert(H == self.img_size[0],
f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]}).")
_assert(W == self.img_size[1],
f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]}).")
x = self.proj(x).flatten(2).transpose(1, 2)
return x
class CrossAttention(nn.Module):
def __init__(
self,
dim,
num_heads=8,
qkv_bias=False,
attn_drop=0.,
proj_drop=0.,
):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
# NOTE scale factor was wrong in my original version, can set manually to be compat with prev weights
self.scale = head_dim ** -0.5
self.wq = nn.Linear(dim, dim, bias=qkv_bias)
self.wk = nn.Linear(dim, dim, bias=qkv_bias)
self.wv = nn.Linear(dim, dim, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x):
B, N, C = x.shape
# B1C -> B1H(C/H) -> BH1(C/H)
q = self.wq(x[:, 0:1, ...]).reshape(B, 1, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3)
# BNC -> BNH(C/H) -> BHN(C/H)
k = self.wk(x).reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3)
# BNC -> BNH(C/H) -> BHN(C/H)
v = self.wv(x).reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3)
attn = (q @ k.transpose(-2, -1)) * self.scale # BH1(C/H) @ BH(C/H)N -> BH1N
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, 1, C) # (BH1N @ BHN(C/H)) -> BH1(C/H) -> B1H(C/H) -> B1C
x = self.proj(x)
x = self.proj_drop(x)
return x
class CrossAttentionBlock(nn.Module):
def __init__(
self,
dim,
num_heads,
mlp_ratio=4.,
qkv_bias=False,
proj_drop=0.,
attn_drop=0.,
drop_path=0.,
act_layer=nn.GELU,
norm_layer=nn.LayerNorm,
):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = CrossAttention(
dim,
num_heads=num_heads,
qkv_bias=qkv_bias,
attn_drop=attn_drop,
proj_drop=proj_drop,
)
# NOTE: drop path for stochastic depth, we shall see if this is better than dropout here
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
def forward(self, x):
x = x[:, 0:1, ...] + self.drop_path(self.attn(self.norm1(x)))
return x
class MultiScaleBlock(nn.Module):
def __init__(
self,
dim,
patches,
depth,
num_heads,
mlp_ratio,
qkv_bias=False,
proj_drop=0.,
attn_drop=0.,
drop_path=0.,
act_layer=nn.GELU,
norm_layer=nn.LayerNorm,
):
super().__init__()
num_branches = len(dim)
self.num_branches = num_branches
# different branch could have different embedding size, the first one is the base
self.blocks = nn.ModuleList()
for d in range(num_branches):
tmp = []
for i in range(depth[d]):
tmp.append(Block(
dim=dim[d],
num_heads=num_heads[d],
mlp_ratio=mlp_ratio[d],
qkv_bias=qkv_bias,
proj_drop=proj_drop,
attn_drop=attn_drop,
drop_path=drop_path[i],
norm_layer=norm_layer,
))
if len(tmp) != 0:
self.blocks.append(nn.Sequential(*tmp))
if len(self.blocks) == 0:
self.blocks = None
self.projs = nn.ModuleList()
for d in range(num_branches):
if dim[d] == dim[(d + 1) % num_branches] and False:
tmp = [nn.Identity()]
else:
tmp = [norm_layer(dim[d]), act_layer(), nn.Linear(dim[d], dim[(d + 1) % num_branches])]
self.projs.append(nn.Sequential(*tmp))
self.fusion = nn.ModuleList()
for d in range(num_branches):
d_ = (d + 1) % num_branches
nh = num_heads[d_]
if depth[-1] == 0: # backward capability:
self.fusion.append(
CrossAttentionBlock(
dim=dim[d_],
num_heads=nh,
mlp_ratio=mlp_ratio[d],
qkv_bias=qkv_bias,
proj_drop=proj_drop,
attn_drop=attn_drop,
drop_path=drop_path[-1],
norm_layer=norm_layer,
))
else:
tmp = []
for _ in range(depth[-1]):
tmp.append(CrossAttentionBlock(
dim=dim[d_],
num_heads=nh,
mlp_ratio=mlp_ratio[d],
qkv_bias=qkv_bias,
proj_drop=proj_drop,
attn_drop=attn_drop,
drop_path=drop_path[-1],
norm_layer=norm_layer,
))
self.fusion.append(nn.Sequential(*tmp))
self.revert_projs = nn.ModuleList()
for d in range(num_branches):
if dim[(d + 1) % num_branches] == dim[d] and False:
tmp = [nn.Identity()]
else:
tmp = [norm_layer(dim[(d + 1) % num_branches]), act_layer(),
nn.Linear(dim[(d + 1) % num_branches], dim[d])]
self.revert_projs.append(nn.Sequential(*tmp))
def forward(self, x: List[torch.Tensor]) -> List[torch.Tensor]:
outs_b = []
for i, block in enumerate(self.blocks):
outs_b.append(block(x[i]))
# only take the cls token out
proj_cls_token = torch.jit.annotate(List[torch.Tensor], [])
for i, proj in enumerate(self.projs):
proj_cls_token.append(proj(outs_b[i][:, 0:1, ...]))
# cross attention
outs = []
for i, (fusion, revert_proj) in enumerate(zip(self.fusion, self.revert_projs)):
tmp = torch.cat((proj_cls_token[i], outs_b[(i + 1) % self.num_branches][:, 1:, ...]), dim=1)
tmp = fusion(tmp)
reverted_proj_cls_token = revert_proj(tmp[:, 0:1, ...])
tmp = torch.cat((reverted_proj_cls_token, outs_b[i][:, 1:, ...]), dim=1)
outs.append(tmp)
return outs
def _compute_num_patches(img_size, patches):
return [i[0] // p * i[1] // p for i, p in zip(img_size, patches)]
@register_notrace_function
def scale_image(x, ss: Tuple[int, int], crop_scale: bool = False): # annotations for torchscript
"""
Pulled out of CrossViT.forward_features to bury conditional logic in a leaf node for FX tracing.
Args:
x (Tensor): input image
ss (tuple[int, int]): height and width to scale to
crop_scale (bool): whether to crop instead of interpolate to achieve the desired scale. Defaults to False
Returns:
Tensor: the "scaled" image batch tensor
"""
H, W = x.shape[-2:]
if H != ss[0] or W != ss[1]:
if crop_scale and ss[0] <= H and ss[1] <= W:
cu, cl = int(round((H - ss[0]) / 2.)), int(round((W - ss[1]) / 2.))
x = x[:, :, cu:cu + ss[0], cl:cl + ss[1]]
else:
x = torch.nn.functional.interpolate(x, size=ss, mode='bicubic', align_corners=False)
return x
class CrossVit(nn.Module):
""" Vision Transformer with support for patch or hybrid CNN input stage
"""
def __init__(
self,
img_size=224,
img_scale=(1.0, 1.0),
patch_size=(8, 16),
in_chans=3,
num_classes=1000,
embed_dim=(192, 384),
depth=((1, 3, 1), (1, 3, 1), (1, 3, 1)),
num_heads=(6, 12),
mlp_ratio=(2., 2., 4.),
multi_conv=False,
crop_scale=False,
qkv_bias=True,
drop_rate=0.,
pos_drop_rate=0.,
proj_drop_rate=0.,
attn_drop_rate=0.,
drop_path_rate=0.,
norm_layer=partial(nn.LayerNorm, eps=1e-6),
global_pool='token',
):
super().__init__()
assert global_pool in ('token', 'avg')
self.num_classes = num_classes
self.global_pool = global_pool
self.img_size = to_2tuple(img_size)
img_scale = to_2tuple(img_scale)
self.img_size_scaled = [tuple([int(sj * si) for sj in self.img_size]) for si in img_scale]
self.crop_scale = crop_scale # crop instead of interpolate for scale
num_patches = _compute_num_patches(self.img_size_scaled, patch_size)
self.num_branches = len(patch_size)
self.embed_dim = embed_dim
self.num_features = self.head_hidden_size = sum(embed_dim)
self.patch_embed = nn.ModuleList()
# hard-coded for torch jit script
for i in range(self.num_branches):
setattr(self, f'pos_embed_{i}', nn.Parameter(torch.zeros(1, 1 + num_patches[i], embed_dim[i])))
setattr(self, f'cls_token_{i}', nn.Parameter(torch.zeros(1, 1, embed_dim[i])))
for im_s, p, d in zip(self.img_size_scaled, patch_size, embed_dim):
self.patch_embed.append(
PatchEmbed(
img_size=im_s,
patch_size=p,
in_chans=in_chans,
embed_dim=d,
multi_conv=multi_conv,
))
self.pos_drop = nn.Dropout(p=pos_drop_rate)
total_depth = sum([sum(x[-2:]) for x in depth])
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, total_depth)] # stochastic depth decay rule
dpr_ptr = 0
self.blocks = nn.ModuleList()
for idx, block_cfg in enumerate(depth):
curr_depth = max(block_cfg[:-1]) + block_cfg[-1]
dpr_ = dpr[dpr_ptr:dpr_ptr + curr_depth]
blk = MultiScaleBlock(
embed_dim,
num_patches,
block_cfg,
num_heads=num_heads,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
proj_drop=proj_drop_rate,
attn_drop=attn_drop_rate,
drop_path=dpr_,
norm_layer=norm_layer,
)
dpr_ptr += curr_depth
self.blocks.append(blk)
self.norm = nn.ModuleList([norm_layer(embed_dim[i]) for i in range(self.num_branches)])
self.head_drop = nn.Dropout(drop_rate)
self.head = nn.ModuleList([
nn.Linear(embed_dim[i], num_classes) if num_classes > 0 else nn.Identity()
for i in range(self.num_branches)])
for i in range(self.num_branches):
trunc_normal_(getattr(self, f'pos_embed_{i}'), std=.02)
trunc_normal_(getattr(self, f'cls_token_{i}'), std=.02)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
@torch.jit.ignore
def no_weight_decay(self):
out = set()
for i in range(self.num_branches):
out.add(f'cls_token_{i}')
pe = getattr(self, f'pos_embed_{i}', None)
if pe is not None and pe.requires_grad:
out.add(f'pos_embed_{i}')
return out
@torch.jit.ignore
def group_matcher(self, coarse=False):
return dict(
stem=r'^cls_token|pos_embed|patch_embed', # stem and embed
blocks=[(r'^blocks\.(\d+)', None), (r'^norm', (99999,))]
)
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
assert not enable, 'gradient checkpointing not supported'
@torch.jit.ignore
def get_classifier(self) -> nn.Module:
return self.head
def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None):
self.num_classes = num_classes
if global_pool is not None:
assert global_pool in ('token', 'avg')
self.global_pool = global_pool
self.head = nn.ModuleList([
nn.Linear(self.embed_dim[i], num_classes) if num_classes > 0 else nn.Identity()
for i in range(self.num_branches)
])
def forward_features(self, x) -> List[torch.Tensor]:
B = x.shape[0]
xs = []
for i, patch_embed in enumerate(self.patch_embed):
x_ = x
ss = self.img_size_scaled[i]
x_ = scale_image(x_, ss, self.crop_scale)
x_ = patch_embed(x_)
cls_tokens = self.cls_token_0 if i == 0 else self.cls_token_1 # hard-coded for torch jit script
cls_tokens = cls_tokens.expand(B, -1, -1)
x_ = torch.cat((cls_tokens, x_), dim=1)
pos_embed = self.pos_embed_0 if i == 0 else self.pos_embed_1 # hard-coded for torch jit script
x_ = x_ + pos_embed
x_ = self.pos_drop(x_)
xs.append(x_)
for i, blk in enumerate(self.blocks):
xs = blk(xs)
# NOTE: was before branch token section, move to here to assure all branch token are before layer norm
xs = [norm(xs[i]) for i, norm in enumerate(self.norm)]
return xs
def forward_head(self, xs: List[torch.Tensor], pre_logits: bool = False) -> torch.Tensor:
xs = [x[:, 1:].mean(dim=1) for x in xs] if self.global_pool == 'avg' else [x[:, 0] for x in xs]
xs = [self.head_drop(x) for x in xs]
if pre_logits or isinstance(self.head[0], nn.Identity):
return torch.cat([x for x in xs], dim=1)
return torch.mean(torch.stack([head(xs[i]) for i, head in enumerate(self.head)], dim=0), dim=0)
def forward(self, x):
xs = self.forward_features(x)
x = self.forward_head(xs)
return x
def _create_crossvit(variant, pretrained=False, **kwargs):
if kwargs.get('features_only', None):
raise RuntimeError('features_only not implemented for Vision Transformer models.')
def pretrained_filter_fn(state_dict):
new_state_dict = {}
for key in state_dict.keys():
if 'pos_embed' in key or 'cls_token' in key:
new_key = key.replace(".", "_")
else:
new_key = key
new_state_dict[new_key] = state_dict[key]
return new_state_dict
return build_model_with_cfg(
CrossVit,
variant,
pretrained,
pretrained_filter_fn=pretrained_filter_fn,
**kwargs,
)
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 240, 240), 'pool_size': None, 'crop_pct': 0.875,
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'fixed_input_size': True,
'first_conv': ('patch_embed.0.proj', 'patch_embed.1.proj'),
'classifier': ('head.0', 'head.1'),
**kwargs
}
default_cfgs = generate_default_cfgs({
'crossvit_15_240.in1k': _cfg(hf_hub_id='timm/'),
'crossvit_15_dagger_240.in1k': _cfg(
hf_hub_id='timm/',
first_conv=('patch_embed.0.proj.0', 'patch_embed.1.proj.0'),
),
'crossvit_15_dagger_408.in1k': _cfg(
hf_hub_id='timm/',
input_size=(3, 408, 408), first_conv=('patch_embed.0.proj.0', 'patch_embed.1.proj.0'), crop_pct=1.0,
),
'crossvit_18_240.in1k': _cfg(hf_hub_id='timm/'),
'crossvit_18_dagger_240.in1k': _cfg(
hf_hub_id='timm/',
first_conv=('patch_embed.0.proj.0', 'patch_embed.1.proj.0'),
),
'crossvit_18_dagger_408.in1k': _cfg(
hf_hub_id='timm/',
input_size=(3, 408, 408), first_conv=('patch_embed.0.proj.0', 'patch_embed.1.proj.0'), crop_pct=1.0,
),
'crossvit_9_240.in1k': _cfg(hf_hub_id='timm/'),
'crossvit_9_dagger_240.in1k': _cfg(
hf_hub_id='timm/',
first_conv=('patch_embed.0.proj.0', 'patch_embed.1.proj.0'),
),
'crossvit_base_240.in1k': _cfg(hf_hub_id='timm/'),
'crossvit_small_240.in1k': _cfg(hf_hub_id='timm/'),
'crossvit_tiny_240.in1k': _cfg(hf_hub_id='timm/'),
})
@register_model
def crossvit_tiny_240(pretrained=False, **kwargs) -> CrossVit:
model_args = dict(
img_scale=(1.0, 224/240), patch_size=[12, 16], embed_dim=[96, 192], depth=[[1, 4, 0], [1, 4, 0], [1, 4, 0]],
num_heads=[3, 3], mlp_ratio=[4, 4, 1])
model = _create_crossvit(variant='crossvit_tiny_240', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def crossvit_small_240(pretrained=False, **kwargs) -> CrossVit:
model_args = dict(
img_scale=(1.0, 224/240), patch_size=[12, 16], embed_dim=[192, 384], depth=[[1, 4, 0], [1, 4, 0], [1, 4, 0]],
num_heads=[6, 6], mlp_ratio=[4, 4, 1])
model = _create_crossvit(variant='crossvit_small_240', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def crossvit_base_240(pretrained=False, **kwargs) -> CrossVit:
model_args = dict(
img_scale=(1.0, 224/240), patch_size=[12, 16], embed_dim=[384, 768], depth=[[1, 4, 0], [1, 4, 0], [1, 4, 0]],
num_heads=[12, 12], mlp_ratio=[4, 4, 1])
model = _create_crossvit(variant='crossvit_base_240', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def crossvit_9_240(pretrained=False, **kwargs) -> CrossVit:
model_args = dict(
img_scale=(1.0, 224/240), patch_size=[12, 16], embed_dim=[128, 256], depth=[[1, 3, 0], [1, 3, 0], [1, 3, 0]],
num_heads=[4, 4], mlp_ratio=[3, 3, 1])
model = _create_crossvit(variant='crossvit_9_240', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def crossvit_15_240(pretrained=False, **kwargs) -> CrossVit:
model_args = dict(
img_scale=(1.0, 224/240), patch_size=[12, 16], embed_dim=[192, 384], depth=[[1, 5, 0], [1, 5, 0], [1, 5, 0]],
num_heads=[6, 6], mlp_ratio=[3, 3, 1])
model = _create_crossvit(variant='crossvit_15_240', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def crossvit_18_240(pretrained=False, **kwargs) -> CrossVit:
model_args = dict(
img_scale=(1.0, 224 / 240), patch_size=[12, 16], embed_dim=[224, 448], depth=[[1, 6, 0], [1, 6, 0], [1, 6, 0]],
num_heads=[7, 7], mlp_ratio=[3, 3, 1], **kwargs)
model = _create_crossvit(variant='crossvit_18_240', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def crossvit_9_dagger_240(pretrained=False, **kwargs) -> CrossVit:
model_args = dict(
img_scale=(1.0, 224 / 240), patch_size=[12, 16], embed_dim=[128, 256], depth=[[1, 3, 0], [1, 3, 0], [1, 3, 0]],
num_heads=[4, 4], mlp_ratio=[3, 3, 1], multi_conv=True)
model = _create_crossvit(variant='crossvit_9_dagger_240', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def crossvit_15_dagger_240(pretrained=False, **kwargs) -> CrossVit:
model_args = dict(
img_scale=(1.0, 224/240), patch_size=[12, 16], embed_dim=[192, 384], depth=[[1, 5, 0], [1, 5, 0], [1, 5, 0]],
num_heads=[6, 6], mlp_ratio=[3, 3, 1], multi_conv=True)
model = _create_crossvit(variant='crossvit_15_dagger_240', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def crossvit_15_dagger_408(pretrained=False, **kwargs) -> CrossVit:
model_args = dict(
img_scale=(1.0, 384/408), patch_size=[12, 16], embed_dim=[192, 384], depth=[[1, 5, 0], [1, 5, 0], [1, 5, 0]],
num_heads=[6, 6], mlp_ratio=[3, 3, 1], multi_conv=True)
model = _create_crossvit(variant='crossvit_15_dagger_408', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def crossvit_18_dagger_240(pretrained=False, **kwargs) -> CrossVit:
model_args = dict(
img_scale=(1.0, 224/240), patch_size=[12, 16], embed_dim=[224, 448], depth=[[1, 6, 0], [1, 6, 0], [1, 6, 0]],
num_heads=[7, 7], mlp_ratio=[3, 3, 1], multi_conv=True)
model = _create_crossvit(variant='crossvit_18_dagger_240', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def crossvit_18_dagger_408(pretrained=False, **kwargs) -> CrossVit:
model_args = dict(
img_scale=(1.0, 384/408), patch_size=[12, 16], embed_dim=[224, 448], depth=[[1, 6, 0], [1, 6, 0], [1, 6, 0]],
num_heads=[7, 7], mlp_ratio=[3, 3, 1], multi_conv=True)
model = _create_crossvit(variant='crossvit_18_dagger_408', pretrained=pretrained, **dict(model_args, **kwargs))
return model
| pytorch-image-models/timm/models/crossvit.py/0 | {
"file_path": "pytorch-image-models/timm/models/crossvit.py",
"repo_id": "pytorch-image-models",
"token_count": 12479
} | 245 |
# FastViT for PyTorch
#
# Original implementation and weights from https://github.com/apple/ml-fastvit
#
# For licensing see accompanying LICENSE file at https://github.com/apple/ml-fastvit/tree/main
# Original work is copyright (C) 2023 Apple Inc. All Rights Reserved.
#
import os
from functools import partial
from typing import List, Optional, Tuple, Type, Union
import torch
import torch.nn as nn
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.layers import DropPath, trunc_normal_, create_conv2d, ConvNormAct, SqueezeExcite, use_fused_attn, \
ClassifierHead
from ._builder import build_model_with_cfg
from ._features import feature_take_indices
from ._manipulate import checkpoint_seq
from ._registry import register_model, generate_default_cfgs
__all__ = ['FastVit']
def num_groups(group_size, channels):
if not group_size: # 0 or None
return 1 # normal conv with 1 group
else:
# NOTE group_size == 1 -> depthwise conv
assert channels % group_size == 0
return channels // group_size
class MobileOneBlock(nn.Module):
"""MobileOne building block.
This block has a multi-branched architecture at train-time
and plain-CNN style architecture at inference time
For more details, please refer to our paper:
`An Improved One millisecond Mobile Backbone` -
https://arxiv.org/pdf/2206.04040.pdf
"""
def __init__(
self,
in_chs: int,
out_chs: int,
kernel_size: int,
stride: int = 1,
dilation: int = 1,
group_size: int = 0,
inference_mode: bool = False,
use_se: bool = False,
use_act: bool = True,
use_scale_branch: bool = True,
num_conv_branches: int = 1,
act_layer: Type[nn.Module] = nn.GELU,
) -> None:
"""Construct a MobileOneBlock module.
Args:
in_chs: Number of channels in the input.
out_chs: Number of channels produced by the block.
kernel_size: Size of the convolution kernel.
stride: Stride size.
dilation: Kernel dilation factor.
group_size: Convolution group size.
inference_mode: If True, instantiates model in inference mode.
use_se: Whether to use SE-ReLU activations.
use_act: Whether to use activation. Default: ``True``
use_scale_branch: Whether to use scale branch. Default: ``True``
num_conv_branches: Number of linear conv branches.
"""
super(MobileOneBlock, self).__init__()
self.inference_mode = inference_mode
self.groups = num_groups(group_size, in_chs)
self.stride = stride
self.dilation = dilation
self.kernel_size = kernel_size
self.in_chs = in_chs
self.out_chs = out_chs
self.num_conv_branches = num_conv_branches
# Check if SE-ReLU is requested
self.se = SqueezeExcite(out_chs, rd_divisor=1) if use_se else nn.Identity()
if inference_mode:
self.reparam_conv = create_conv2d(
in_chs,
out_chs,
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
groups=self.groups,
bias=True,
)
else:
# Re-parameterizable skip connection
self.reparam_conv = None
self.identity = (
nn.BatchNorm2d(num_features=in_chs)
if out_chs == in_chs and stride == 1
else None
)
# Re-parameterizable conv branches
if num_conv_branches > 0:
self.conv_kxk = nn.ModuleList([
ConvNormAct(
self.in_chs,
self.out_chs,
kernel_size=kernel_size,
stride=self.stride,
groups=self.groups,
apply_act=False,
) for _ in range(self.num_conv_branches)
])
else:
self.conv_kxk = None
# Re-parameterizable scale branch
self.conv_scale = None
if kernel_size > 1 and use_scale_branch:
self.conv_scale = ConvNormAct(
self.in_chs,
self.out_chs,
kernel_size=1,
stride=self.stride,
groups=self.groups,
apply_act=False
)
self.act = act_layer() if use_act else nn.Identity()
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""Apply forward pass."""
# Inference mode forward pass.
if self.reparam_conv is not None:
return self.act(self.se(self.reparam_conv(x)))
# Multi-branched train-time forward pass.
# Identity branch output
identity_out = 0
if self.identity is not None:
identity_out = self.identity(x)
# Scale branch output
scale_out = 0
if self.conv_scale is not None:
scale_out = self.conv_scale(x)
# Other kxk conv branches
out = scale_out + identity_out
if self.conv_kxk is not None:
for rc in self.conv_kxk:
out += rc(x)
return self.act(self.se(out))
def reparameterize(self):
"""Following works like `RepVGG: Making VGG-style ConvNets Great Again` -
https://arxiv.org/pdf/2101.03697.pdf. We re-parameterize multi-branched
architecture used at training time to obtain a plain CNN-like structure
for inference.
"""
if self.reparam_conv is not None:
return
kernel, bias = self._get_kernel_bias()
self.reparam_conv = create_conv2d(
in_channels=self.in_chs,
out_channels=self.out_chs,
kernel_size=self.kernel_size,
stride=self.stride,
dilation=self.dilation,
groups=self.groups,
bias=True,
)
self.reparam_conv.weight.data = kernel
self.reparam_conv.bias.data = bias
# Delete un-used branches
for name, para in self.named_parameters():
if 'reparam_conv' in name:
continue
para.detach_()
self.__delattr__("conv_kxk")
self.__delattr__("conv_scale")
if hasattr(self, "identity"):
self.__delattr__("identity")
self.inference_mode = True
def _get_kernel_bias(self) -> Tuple[torch.Tensor, torch.Tensor]:
"""Method to obtain re-parameterized kernel and bias.
Reference: https://github.com/DingXiaoH/RepVGG/blob/main/repvgg.py#L83
Returns:
Tuple of (kernel, bias) after fusing branches.
"""
# get weights and bias of scale branch
kernel_scale = 0
bias_scale = 0
if self.conv_scale is not None:
kernel_scale, bias_scale = self._fuse_bn_tensor(self.conv_scale)
# Pad scale branch kernel to match conv branch kernel size.
pad = self.kernel_size // 2
kernel_scale = torch.nn.functional.pad(kernel_scale, [pad, pad, pad, pad])
# get weights and bias of skip branch
kernel_identity = 0
bias_identity = 0
if self.identity is not None:
kernel_identity, bias_identity = self._fuse_bn_tensor(self.identity)
# get weights and bias of conv branches
kernel_conv = 0
bias_conv = 0
if self.conv_kxk is not None:
for ix in range(self.num_conv_branches):
_kernel, _bias = self._fuse_bn_tensor(self.conv_kxk[ix])
kernel_conv += _kernel
bias_conv += _bias
kernel_final = kernel_conv + kernel_scale + kernel_identity
bias_final = bias_conv + bias_scale + bias_identity
return kernel_final, bias_final
def _fuse_bn_tensor(
self, branch: Union[nn.Sequential, nn.BatchNorm2d]
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Method to fuse batchnorm layer with preceding conv layer.
Reference: https://github.com/DingXiaoH/RepVGG/blob/main/repvgg.py#L95
Args:
branch: Sequence of ops to be fused.
Returns:
Tuple of (kernel, bias) after fusing batchnorm.
"""
if isinstance(branch, ConvNormAct):
kernel = branch.conv.weight
running_mean = branch.bn.running_mean
running_var = branch.bn.running_var
gamma = branch.bn.weight
beta = branch.bn.bias
eps = branch.bn.eps
else:
assert isinstance(branch, nn.BatchNorm2d)
if not hasattr(self, "id_tensor"):
input_dim = self.in_chs // self.groups
kernel_value = torch.zeros(
(self.in_chs, input_dim, self.kernel_size, self.kernel_size),
dtype=branch.weight.dtype,
device=branch.weight.device,
)
for i in range(self.in_chs):
kernel_value[
i, i % input_dim, self.kernel_size // 2, self.kernel_size // 2
] = 1
self.id_tensor = kernel_value
kernel = self.id_tensor
running_mean = branch.running_mean
running_var = branch.running_var
gamma = branch.weight
beta = branch.bias
eps = branch.eps
std = (running_var + eps).sqrt()
t = (gamma / std).reshape(-1, 1, 1, 1)
return kernel * t, beta - running_mean * gamma / std
class ReparamLargeKernelConv(nn.Module):
"""Building Block of RepLKNet
This class defines overparameterized large kernel conv block
introduced in `RepLKNet <https://arxiv.org/abs/2203.06717>`_
Reference: https://github.com/DingXiaoH/RepLKNet-pytorch
"""
def __init__(
self,
in_chs: int,
out_chs: int,
kernel_size: int,
stride: int,
group_size: int,
small_kernel: Optional[int] = None,
use_se: bool = False,
act_layer: Optional[nn.Module] = None,
inference_mode: bool = False,
) -> None:
"""Construct a ReparamLargeKernelConv module.
Args:
in_chs: Number of input channels.
out_chs: Number of output channels.
kernel_size: Kernel size of the large kernel conv branch.
stride: Stride size. Default: 1
group_size: Group size. Default: 1
small_kernel: Kernel size of small kernel conv branch.
act_layer: Activation module. Default: ``nn.GELU``
inference_mode: If True, instantiates model in inference mode. Default: ``False``
"""
super(ReparamLargeKernelConv, self).__init__()
self.stride = stride
self.groups = num_groups(group_size, in_chs)
self.in_chs = in_chs
self.out_chs = out_chs
self.kernel_size = kernel_size
self.small_kernel = small_kernel
if inference_mode:
self.reparam_conv = create_conv2d(
in_chs,
out_chs,
kernel_size=kernel_size,
stride=stride,
dilation=1,
groups=self.groups,
bias=True,
)
else:
self.reparam_conv = None
self.large_conv = ConvNormAct(
in_chs,
out_chs,
kernel_size=kernel_size,
stride=self.stride,
groups=self.groups,
apply_act=False,
)
if small_kernel is not None:
assert (
small_kernel <= kernel_size
), "The kernel size for re-param cannot be larger than the large kernel!"
self.small_conv = ConvNormAct(
in_chs,
out_chs,
kernel_size=small_kernel,
stride=self.stride,
groups=self.groups,
apply_act=False,
)
self.se = SqueezeExcite(out_chs, rd_ratio=0.25) if use_se else nn.Identity()
# FIXME output of this act was not used in original impl, likely due to bug
self.act = act_layer() if act_layer is not None else nn.Identity()
def forward(self, x: torch.Tensor) -> torch.Tensor:
if self.reparam_conv is not None:
out = self.reparam_conv(x)
else:
out = self.large_conv(x)
if self.small_conv is not None:
out = out + self.small_conv(x)
out = self.se(out)
out = self.act(out)
return out
def get_kernel_bias(self) -> Tuple[torch.Tensor, torch.Tensor]:
"""Method to obtain re-parameterized kernel and bias.
Reference: https://github.com/DingXiaoH/RepLKNet-pytorch
Returns:
Tuple of (kernel, bias) after fusing branches.
"""
eq_k, eq_b = self._fuse_bn(self.large_conv.conv, self.large_conv.bn)
if hasattr(self, "small_conv"):
small_k, small_b = self._fuse_bn(self.small_conv.conv, self.small_conv.bn)
eq_b += small_b
eq_k += nn.functional.pad(
small_k, [(self.kernel_size - self.small_kernel) // 2] * 4
)
return eq_k, eq_b
def reparameterize(self) -> None:
"""
Following works like `RepVGG: Making VGG-style ConvNets Great Again` -
https://arxiv.org/pdf/2101.03697.pdf. We re-parameterize multi-branched
architecture used at training time to obtain a plain CNN-like structure
for inference.
"""
eq_k, eq_b = self.get_kernel_bias()
self.reparam_conv = create_conv2d(
self.in_chs,
self.out_chs,
kernel_size=self.kernel_size,
stride=self.stride,
groups=self.groups,
bias=True,
)
self.reparam_conv.weight.data = eq_k
self.reparam_conv.bias.data = eq_b
self.__delattr__("large_conv")
if hasattr(self, "small_conv"):
self.__delattr__("small_conv")
@staticmethod
def _fuse_bn(
conv: nn.Conv2d, bn: nn.BatchNorm2d
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Method to fuse batchnorm layer with conv layer.
Args:
conv: Convolutional kernel weights.
bn: Batchnorm 2d layer.
Returns:
Tuple of (kernel, bias) after fusing batchnorm.
"""
kernel = conv.weight
running_mean = bn.running_mean
running_var = bn.running_var
gamma = bn.weight
beta = bn.bias
eps = bn.eps
std = (running_var + eps).sqrt()
t = (gamma / std).reshape(-1, 1, 1, 1)
return kernel * t, beta - running_mean * gamma / std
def convolutional_stem(
in_chs: int,
out_chs: int,
act_layer: Type[nn.Module] = nn.GELU,
inference_mode: bool = False
) -> nn.Sequential:
"""Build convolutional stem with MobileOne blocks.
Args:
in_chs: Number of input channels.
out_chs: Number of output channels.
inference_mode: Flag to instantiate model in inference mode. Default: ``False``
Returns:
nn.Sequential object with stem elements.
"""
return nn.Sequential(
MobileOneBlock(
in_chs=in_chs,
out_chs=out_chs,
kernel_size=3,
stride=2,
act_layer=act_layer,
inference_mode=inference_mode,
),
MobileOneBlock(
in_chs=out_chs,
out_chs=out_chs,
kernel_size=3,
stride=2,
group_size=1,
act_layer=act_layer,
inference_mode=inference_mode,
),
MobileOneBlock(
in_chs=out_chs,
out_chs=out_chs,
kernel_size=1,
stride=1,
act_layer=act_layer,
inference_mode=inference_mode,
),
)
class Attention(nn.Module):
"""Multi-headed Self Attention module.
Source modified from:
https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py
"""
fused_attn: torch.jit.Final[bool]
def __init__(
self,
dim: int,
head_dim: int = 32,
qkv_bias: bool = False,
attn_drop: float = 0.0,
proj_drop: float = 0.0,
) -> None:
"""Build MHSA module that can handle 3D or 4D input tensors.
Args:
dim: Number of embedding dimensions.
head_dim: Number of hidden dimensions per head. Default: ``32``
qkv_bias: Use bias or not. Default: ``False``
attn_drop: Dropout rate for attention tensor.
proj_drop: Dropout rate for projection tensor.
"""
super().__init__()
assert dim % head_dim == 0, "dim should be divisible by head_dim"
self.head_dim = head_dim
self.num_heads = dim // head_dim
self.scale = head_dim ** -0.5
self.fused_attn = use_fused_attn()
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x: torch.Tensor) -> torch.Tensor:
B, C, H, W = x.shape
N = H * W
x = x.flatten(2).transpose(-2, -1) # (B, N, C)
qkv = (
self.qkv(x)
.reshape(B, N, 3, self.num_heads, self.head_dim)
.permute(2, 0, 3, 1, 4)
)
q, k, v = qkv.unbind(0) # make torchscript happy (cannot use tensor as tuple)
if self.fused_attn:
x = torch.nn.functional.scaled_dot_product_attention(
q, k, v,
dropout_p=self.attn_drop.p if self.training else 0.,
)
else:
q = q * self.scale
attn = q @ k.transpose(-2, -1)
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = attn @ v
x = x.transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
x = x.transpose(-2, -1).reshape(B, C, H, W)
return x
class PatchEmbed(nn.Module):
"""Convolutional patch embedding layer."""
def __init__(
self,
patch_size: int,
stride: int,
in_chs: int,
embed_dim: int,
act_layer: Type[nn.Module] = nn.GELU,
lkc_use_act: bool = False,
use_se: bool = False,
inference_mode: bool = False,
) -> None:
"""Build patch embedding layer.
Args:
patch_size: Patch size for embedding computation.
stride: Stride for convolutional embedding layer.
in_chs: Number of channels of input tensor.
embed_dim: Number of embedding dimensions.
inference_mode: Flag to instantiate model in inference mode. Default: ``False``
"""
super().__init__()
self.proj = nn.Sequential(
ReparamLargeKernelConv(
in_chs=in_chs,
out_chs=embed_dim,
kernel_size=patch_size,
stride=stride,
group_size=1,
small_kernel=3,
use_se=use_se,
act_layer=act_layer if lkc_use_act else None, # NOTE original weights didn't use this act
inference_mode=inference_mode,
),
MobileOneBlock(
in_chs=embed_dim,
out_chs=embed_dim,
kernel_size=1,
stride=1,
use_se=False,
act_layer=act_layer,
inference_mode=inference_mode,
)
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.proj(x)
return x
class LayerScale2d(nn.Module):
def __init__(self, dim, init_values=1e-5, inplace=False):
super().__init__()
self.inplace = inplace
self.gamma = nn.Parameter(init_values * torch.ones(dim, 1, 1))
def forward(self, x):
return x.mul_(self.gamma) if self.inplace else x * self.gamma
class RepMixer(nn.Module):
"""Reparameterizable token mixer.
For more details, please refer to our paper:
`FastViT: A Fast Hybrid Vision Transformer using Structural Reparameterization <https://arxiv.org/pdf/2303.14189.pdf>`_
"""
def __init__(
self,
dim,
kernel_size=3,
layer_scale_init_value=1e-5,
inference_mode: bool = False,
):
"""Build RepMixer Module.
Args:
dim: Input feature map dimension. :math:`C_{in}` from an expected input of size :math:`(B, C_{in}, H, W)`.
kernel_size: Kernel size for spatial mixing. Default: 3
layer_scale_init_value: Initial value for layer scale. Default: 1e-5
inference_mode: If True, instantiates model in inference mode. Default: ``False``
"""
super().__init__()
self.dim = dim
self.kernel_size = kernel_size
self.inference_mode = inference_mode
if inference_mode:
self.reparam_conv = nn.Conv2d(
self.dim,
self.dim,
kernel_size=self.kernel_size,
stride=1,
padding=self.kernel_size // 2,
groups=self.dim,
bias=True,
)
else:
self.reparam_conv = None
self.norm = MobileOneBlock(
dim,
dim,
kernel_size,
group_size=1,
use_act=False,
use_scale_branch=False,
num_conv_branches=0,
)
self.mixer = MobileOneBlock(
dim,
dim,
kernel_size,
group_size=1,
use_act=False,
)
if layer_scale_init_value is not None:
self.layer_scale = LayerScale2d(dim, layer_scale_init_value)
else:
self.layer_scale = nn.Identity()
def forward(self, x: torch.Tensor) -> torch.Tensor:
if self.reparam_conv is not None:
x = self.reparam_conv(x)
else:
x = x + self.layer_scale(self.mixer(x) - self.norm(x))
return x
def reparameterize(self) -> None:
"""Reparameterize mixer and norm into a single
convolutional layer for efficient inference.
"""
if self.inference_mode:
return
self.mixer.reparameterize()
self.norm.reparameterize()
if isinstance(self.layer_scale, LayerScale2d):
w = self.mixer.id_tensor + self.layer_scale.gamma.unsqueeze(-1) * (
self.mixer.reparam_conv.weight - self.norm.reparam_conv.weight
)
b = torch.squeeze(self.layer_scale.gamma) * (
self.mixer.reparam_conv.bias - self.norm.reparam_conv.bias
)
else:
w = (
self.mixer.id_tensor
+ self.mixer.reparam_conv.weight
- self.norm.reparam_conv.weight
)
b = self.mixer.reparam_conv.bias - self.norm.reparam_conv.bias
self.reparam_conv = create_conv2d(
self.dim,
self.dim,
kernel_size=self.kernel_size,
stride=1,
groups=self.dim,
bias=True,
)
self.reparam_conv.weight.data = w
self.reparam_conv.bias.data = b
for name, para in self.named_parameters():
if 'reparam_conv' in name:
continue
para.detach_()
self.__delattr__("mixer")
self.__delattr__("norm")
self.__delattr__("layer_scale")
class ConvMlp(nn.Module):
"""Convolutional FFN Module."""
def __init__(
self,
in_chs: int,
hidden_channels: Optional[int] = None,
out_chs: Optional[int] = None,
act_layer: Type[nn.Module] = nn.GELU,
drop: float = 0.0,
) -> None:
"""Build convolutional FFN module.
Args:
in_chs: Number of input channels.
hidden_channels: Number of channels after expansion. Default: None
out_chs: Number of output channels. Default: None
act_layer: Activation layer. Default: ``GELU``
drop: Dropout rate. Default: ``0.0``.
"""
super().__init__()
out_chs = out_chs or in_chs
hidden_channels = hidden_channels or in_chs
self.conv = ConvNormAct(
in_chs,
out_chs,
kernel_size=7,
groups=in_chs,
apply_act=False,
)
self.fc1 = nn.Conv2d(in_chs, hidden_channels, kernel_size=1)
self.act = act_layer()
self.fc2 = nn.Conv2d(hidden_channels, out_chs, kernel_size=1)
self.drop = nn.Dropout(drop)
self.apply(self._init_weights)
def _init_weights(self, m: nn.Module) -> None:
if isinstance(m, nn.Conv2d):
trunc_normal_(m.weight, std=0.02)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.conv(x)
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class RepConditionalPosEnc(nn.Module):
"""Implementation of conditional positional encoding.
For more details refer to paper:
`Conditional Positional Encodings for Vision Transformers <https://arxiv.org/pdf/2102.10882.pdf>`_
In our implementation, we can reparameterize this module to eliminate a skip connection.
"""
def __init__(
self,
dim: int,
dim_out: Optional[int] = None,
spatial_shape: Union[int, Tuple[int, int]] = (7, 7),
inference_mode=False,
) -> None:
"""Build reparameterizable conditional positional encoding
Args:
dim: Number of input channels.
dim_out: Number of embedding dimensions. Default: 768
spatial_shape: Spatial shape of kernel for positional encoding. Default: (7, 7)
inference_mode: Flag to instantiate block in inference mode. Default: ``False``
"""
super(RepConditionalPosEnc, self).__init__()
if isinstance(spatial_shape, int):
spatial_shape = tuple([spatial_shape] * 2)
assert isinstance(spatial_shape, Tuple), (
f'"spatial_shape" must by a sequence or int, '
f"get {type(spatial_shape)} instead."
)
assert len(spatial_shape) == 2, (
f'Length of "spatial_shape" should be 2, '
f"got {len(spatial_shape)} instead."
)
self.spatial_shape = spatial_shape
self.dim = dim
self.dim_out = dim_out or dim
self.groups = dim
if inference_mode:
self.reparam_conv = nn.Conv2d(
self.dim,
self.dim_out,
kernel_size=self.spatial_shape,
stride=1,
padding=spatial_shape[0] // 2,
groups=self.groups,
bias=True,
)
else:
self.reparam_conv = None
self.pos_enc = nn.Conv2d(
self.dim,
self.dim_out,
spatial_shape,
1,
int(spatial_shape[0] // 2),
groups=self.groups,
bias=True,
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
if self.reparam_conv is not None:
x = self.reparam_conv(x)
else:
x = self.pos_enc(x) + x
return x
def reparameterize(self) -> None:
# Build equivalent Id tensor
input_dim = self.dim // self.groups
kernel_value = torch.zeros(
(
self.dim,
input_dim,
self.spatial_shape[0],
self.spatial_shape[1],
),
dtype=self.pos_enc.weight.dtype,
device=self.pos_enc.weight.device,
)
for i in range(self.dim):
kernel_value[
i,
i % input_dim,
self.spatial_shape[0] // 2,
self.spatial_shape[1] // 2,
] = 1
id_tensor = kernel_value
# Reparameterize Id tensor and conv
w_final = id_tensor + self.pos_enc.weight
b_final = self.pos_enc.bias
# Introduce reparam conv
self.reparam_conv = nn.Conv2d(
self.dim,
self.dim_out,
kernel_size=self.spatial_shape,
stride=1,
padding=int(self.spatial_shape[0] // 2),
groups=self.groups,
bias=True,
)
self.reparam_conv.weight.data = w_final
self.reparam_conv.bias.data = b_final
for name, para in self.named_parameters():
if 'reparam_conv' in name:
continue
para.detach_()
self.__delattr__("pos_enc")
class RepMixerBlock(nn.Module):
"""Implementation of Metaformer block with RepMixer as token mixer.
For more details on Metaformer structure, please refer to:
`MetaFormer Is Actually What You Need for Vision <https://arxiv.org/pdf/2111.11418.pdf>`_
"""
def __init__(
self,
dim: int,
kernel_size: int = 3,
mlp_ratio: float = 4.0,
act_layer: Type[nn.Module] = nn.GELU,
proj_drop: float = 0.0,
drop_path: float = 0.0,
layer_scale_init_value: float = 1e-5,
inference_mode: bool = False,
):
"""Build RepMixer Block.
Args:
dim: Number of embedding dimensions.
kernel_size: Kernel size for repmixer. Default: 3
mlp_ratio: MLP expansion ratio. Default: 4.0
act_layer: Activation layer. Default: ``nn.GELU``
proj_drop: Dropout rate. Default: 0.0
drop_path: Drop path rate. Default: 0.0
layer_scale_init_value: Layer scale value at initialization. Default: 1e-5
inference_mode: Flag to instantiate block in inference mode. Default: ``False``
"""
super().__init__()
self.token_mixer = RepMixer(
dim,
kernel_size=kernel_size,
layer_scale_init_value=layer_scale_init_value,
inference_mode=inference_mode,
)
self.mlp = ConvMlp(
in_chs=dim,
hidden_channels=int(dim * mlp_ratio),
act_layer=act_layer,
drop=proj_drop,
)
if layer_scale_init_value is not None:
self.layer_scale = LayerScale2d(dim, layer_scale_init_value)
else:
self.layer_scale = nn.Identity()
self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity()
def forward(self, x):
x = self.token_mixer(x)
x = x + self.drop_path(self.layer_scale(self.mlp(x)))
return x
class AttentionBlock(nn.Module):
"""Implementation of metaformer block with MHSA as token mixer.
For more details on Metaformer structure, please refer to:
`MetaFormer Is Actually What You Need for Vision <https://arxiv.org/pdf/2111.11418.pdf>`_
"""
def __init__(
self,
dim: int,
mlp_ratio: float = 4.0,
act_layer: Type[nn.Module] = nn.GELU,
norm_layer: Type[nn.Module] = nn.BatchNorm2d,
proj_drop: float = 0.0,
drop_path: float = 0.0,
layer_scale_init_value: float = 1e-5,
):
"""Build Attention Block.
Args:
dim: Number of embedding dimensions.
mlp_ratio: MLP expansion ratio. Default: 4.0
act_layer: Activation layer. Default: ``nn.GELU``
norm_layer: Normalization layer. Default: ``nn.BatchNorm2d``
proj_drop: Dropout rate. Default: 0.0
drop_path: Drop path rate. Default: 0.0
layer_scale_init_value: Layer scale value at initialization. Default: 1e-5
"""
super().__init__()
self.norm = norm_layer(dim)
self.token_mixer = Attention(dim=dim)
if layer_scale_init_value is not None:
self.layer_scale_1 = LayerScale2d(dim, layer_scale_init_value)
else:
self.layer_scale_1 = nn.Identity()
self.drop_path1 = DropPath(drop_path) if drop_path > 0.0 else nn.Identity()
self.mlp = ConvMlp(
in_chs=dim,
hidden_channels=int(dim * mlp_ratio),
act_layer=act_layer,
drop=proj_drop,
)
if layer_scale_init_value is not None:
self.layer_scale_2 = LayerScale2d(dim, layer_scale_init_value)
else:
self.layer_scale_2 = nn.Identity()
self.drop_path2 = DropPath(drop_path) if drop_path > 0.0 else nn.Identity()
def forward(self, x):
x = x + self.drop_path1(self.layer_scale_1(self.token_mixer(self.norm(x))))
x = x + self.drop_path2(self.layer_scale_2(self.mlp(x)))
return x
class FastVitStage(nn.Module):
def __init__(
self,
dim: int,
dim_out: int,
depth: int,
token_mixer_type: str,
downsample: bool = True,
se_downsample: bool = False,
down_patch_size: int = 7,
down_stride: int = 2,
pos_emb_layer: Optional[nn.Module] = None,
kernel_size: int = 3,
mlp_ratio: float = 4.0,
act_layer: Type[nn.Module] = nn.GELU,
norm_layer: Type[nn.Module] = nn.BatchNorm2d,
proj_drop_rate: float = 0.0,
drop_path_rate: float = 0.0,
layer_scale_init_value: Optional[float] = 1e-5,
lkc_use_act=False,
inference_mode=False,
):
"""FastViT stage.
Args:
dim: Number of embedding dimensions.
depth: Number of blocks in stage
token_mixer_type: Token mixer type.
kernel_size: Kernel size for repmixer.
mlp_ratio: MLP expansion ratio.
act_layer: Activation layer.
norm_layer: Normalization layer.
proj_drop_rate: Dropout rate.
drop_path_rate: Drop path rate.
layer_scale_init_value: Layer scale value at initialization.
inference_mode: Flag to instantiate block in inference mode.
"""
super().__init__()
self.grad_checkpointing = False
if downsample:
self.downsample = PatchEmbed(
patch_size=down_patch_size,
stride=down_stride,
in_chs=dim,
embed_dim=dim_out,
use_se=se_downsample,
act_layer=act_layer,
lkc_use_act=lkc_use_act,
inference_mode=inference_mode,
)
else:
assert dim == dim_out
self.downsample = nn.Identity()
if pos_emb_layer is not None:
self.pos_emb = pos_emb_layer(dim_out, inference_mode=inference_mode)
else:
self.pos_emb = nn.Identity()
blocks = []
for block_idx in range(depth):
if token_mixer_type == "repmixer":
blocks.append(RepMixerBlock(
dim_out,
kernel_size=kernel_size,
mlp_ratio=mlp_ratio,
act_layer=act_layer,
proj_drop=proj_drop_rate,
drop_path=drop_path_rate[block_idx],
layer_scale_init_value=layer_scale_init_value,
inference_mode=inference_mode,
))
elif token_mixer_type == "attention":
blocks.append(AttentionBlock(
dim_out,
mlp_ratio=mlp_ratio,
act_layer=act_layer,
norm_layer=norm_layer,
proj_drop=proj_drop_rate,
drop_path=drop_path_rate[block_idx],
layer_scale_init_value=layer_scale_init_value,
))
else:
raise ValueError(
"Token mixer type: {} not supported".format(token_mixer_type)
)
self.blocks = nn.Sequential(*blocks)
def forward(self, x):
x = self.downsample(x)
x = self.pos_emb(x)
if self.grad_checkpointing and not torch.jit.is_scripting():
x = checkpoint_seq(self.blocks, x)
else:
x = self.blocks(x)
return x
class FastVit(nn.Module):
fork_feat: torch.jit.Final[bool]
"""
This class implements `FastViT architecture <https://arxiv.org/pdf/2303.14189.pdf>`_
"""
def __init__(
self,
in_chans: int = 3,
layers: Tuple[int, ...] = (2, 2, 6, 2),
token_mixers: Tuple[str, ...] = ("repmixer", "repmixer", "repmixer", "repmixer"),
embed_dims: Tuple[int, ...] = (64, 128, 256, 512),
mlp_ratios: Tuple[float, ...] = (4,) * 4,
downsamples: Tuple[bool, ...] = (False, True, True, True),
se_downsamples: Tuple[bool, ...] = (False, False, False, False),
repmixer_kernel_size: int = 3,
num_classes: int = 1000,
pos_embs: Tuple[Optional[nn.Module], ...] = (None,) * 4,
down_patch_size: int = 7,
down_stride: int = 2,
drop_rate: float = 0.0,
proj_drop_rate: float = 0.0,
drop_path_rate: float = 0.0,
layer_scale_init_value: float = 1e-5,
lkc_use_act: bool = False,
fork_feat: bool = False,
cls_ratio: float = 2.0,
global_pool: str = 'avg',
norm_layer: Type[nn.Module] = nn.BatchNorm2d,
act_layer: Type[nn.Module] = nn.GELU,
inference_mode: bool = False,
) -> None:
super().__init__()
self.num_classes = 0 if fork_feat else num_classes
self.fork_feat = fork_feat
self.global_pool = global_pool
self.feature_info = []
# Convolutional stem
self.stem = convolutional_stem(
in_chans,
embed_dims[0],
act_layer,
inference_mode,
)
# Build the main stages of the network architecture
prev_dim = embed_dims[0]
scale = 1
dpr = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(layers)).split(layers)]
stages = []
for i in range(len(layers)):
downsample = downsamples[i] or prev_dim != embed_dims[i]
stage = FastVitStage(
dim=prev_dim,
dim_out=embed_dims[i],
depth=layers[i],
downsample=downsample,
se_downsample=se_downsamples[i],
down_patch_size=down_patch_size,
down_stride=down_stride,
pos_emb_layer=pos_embs[i],
token_mixer_type=token_mixers[i],
kernel_size=repmixer_kernel_size,
mlp_ratio=mlp_ratios[i],
act_layer=act_layer,
norm_layer=norm_layer,
proj_drop_rate=proj_drop_rate,
drop_path_rate=dpr[i],
layer_scale_init_value=layer_scale_init_value,
lkc_use_act=lkc_use_act,
inference_mode=inference_mode,
)
stages.append(stage)
prev_dim = embed_dims[i]
if downsample:
scale *= 2
self.feature_info += [dict(num_chs=prev_dim, reduction=4 * scale, module=f'stages.{i}')]
self.stages = nn.Sequential(*stages)
self.num_stages = len(self.stages)
self.num_features = self.head_hidden_size = prev_dim
# For segmentation and detection, extract intermediate output
if self.fork_feat:
# Add a norm layer for each output. self.stages is slightly different than self.network
# in the original code, the PatchEmbed layer is part of self.stages in this code where
# it was part of self.network in the original code. So we do not need to skip out indices.
self.out_indices = [0, 1, 2, 3]
for i_emb, i_layer in enumerate(self.out_indices):
if i_emb == 0 and os.environ.get("FORK_LAST3", None):
"""For RetinaNet, `start_level=1`. The first norm layer will not used.
cmd: `FORK_LAST3=1 python -m torch.distributed.launch ...`
"""
layer = nn.Identity()
else:
layer = norm_layer(embed_dims[i_emb])
layer_name = f"norm{i_layer}"
self.add_module(layer_name, layer)
else:
# Classifier head
self.num_features = self.head_hidden_size = final_features = int(embed_dims[-1] * cls_ratio)
self.final_conv = MobileOneBlock(
in_chs=embed_dims[-1],
out_chs=final_features,
kernel_size=3,
stride=1,
group_size=1,
inference_mode=inference_mode,
use_se=True,
act_layer=act_layer,
num_conv_branches=1,
)
self.head = ClassifierHead(
final_features,
num_classes,
pool_type=global_pool,
drop_rate=drop_rate,
)
self.apply(self._init_weights)
def _init_weights(self, m: nn.Module) -> None:
"""Init. for classification"""
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=0.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
@torch.jit.ignore
def no_weight_decay(self):
return set()
@torch.jit.ignore
def group_matcher(self, coarse=False):
return dict(
stem=r'^stem', # stem and embed
blocks=r'^stages\.(\d+)' if coarse else [
(r'^stages\.(\d+).downsample', (0,)),
(r'^stages\.(\d+).pos_emb', (0,)),
(r'^stages\.(\d+)\.\w+\.(\d+)', None),
]
)
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
for s in self.stages:
s.grad_checkpointing = enable
@torch.jit.ignore
def get_classifier(self) -> nn.Module:
return self.head.fc
def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None):
self.num_classes = num_classes
self.head.reset(num_classes, global_pool)
def forward_intermediates(
self,
x: torch.Tensor,
indices: Optional[Union[int, List[int]]] = None,
norm: bool = False,
stop_early: bool = False,
output_fmt: str = 'NCHW',
intermediates_only: bool = False,
) -> Union[List[torch.Tensor], Tuple[torch.Tensor, List[torch.Tensor]]]:
""" Forward features that returns intermediates.
Args:
x: Input image tensor
indices: Take last n blocks if int, all if None, select matching indices if sequence
norm: Apply norm layer to compatible intermediates
stop_early: Stop iterating over blocks when last desired intermediate hit
output_fmt: Shape of intermediate feature outputs
intermediates_only: Only return intermediate features
Returns:
"""
assert output_fmt in ('NCHW',), 'Output shape must be NCHW.'
intermediates = []
take_indices, max_index = feature_take_indices(len(self.stages), indices)
# forward pass
x = self.stem(x)
last_idx = self.num_stages - 1
if torch.jit.is_scripting() or not stop_early: # can't slice blocks in torchscript
stages = self.stages
else:
stages = self.stages[:max_index + 1]
feat_idx = 0
for feat_idx, stage in enumerate(stages):
x = stage(x)
if feat_idx in take_indices:
intermediates.append(x)
if intermediates_only:
return intermediates
if feat_idx == last_idx:
x = self.final_conv(x)
return x, intermediates
def prune_intermediate_layers(
self,
indices: Union[int, List[int]] = 1,
prune_norm: bool = False,
prune_head: bool = True,
):
""" Prune layers not required for specified intermediates.
"""
take_indices, max_index = feature_take_indices(len(self.stages), indices)
self.stages = self.stages[:max_index + 1] # truncate blocks w/ stem as idx 0
if prune_head:
self.reset_classifier(0, '')
return take_indices
def forward_features(self, x: torch.Tensor) -> torch.Tensor:
# input embedding
x = self.stem(x)
outs = []
for idx, block in enumerate(self.stages):
x = block(x)
if self.fork_feat:
if idx in self.out_indices:
norm_layer = getattr(self, f"norm{idx}")
x_out = norm_layer(x)
outs.append(x_out)
if self.fork_feat:
# output the features of four stages for dense prediction
return outs
x = self.final_conv(x)
return x
def forward_head(self, x: torch.Tensor, pre_logits: bool = False):
return self.head(x, pre_logits=True) if pre_logits else self.head(x)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.forward_features(x)
if self.fork_feat:
return x
x = self.forward_head(x)
return x
def _cfg(url="", **kwargs):
return {
"url": url,
"num_classes": 1000,
"input_size": (3, 256, 256),
"pool_size": (8, 8),
"crop_pct": 0.9,
"interpolation": "bicubic",
"mean": IMAGENET_DEFAULT_MEAN,
"std": IMAGENET_DEFAULT_STD,
'first_conv': ('stem.0.conv_kxk.0.conv', 'stem.0.conv_scale.conv'),
"classifier": "head.fc",
**kwargs,
}
default_cfgs = generate_default_cfgs({
"fastvit_t8.apple_in1k": _cfg(
hf_hub_id='timm/'),
"fastvit_t12.apple_in1k": _cfg(
hf_hub_id='timm/'),
"fastvit_s12.apple_in1k": _cfg(
hf_hub_id='timm/'),
"fastvit_sa12.apple_in1k": _cfg(
hf_hub_id='timm/'),
"fastvit_sa24.apple_in1k": _cfg(
hf_hub_id='timm/'),
"fastvit_sa36.apple_in1k": _cfg(
hf_hub_id='timm/'),
"fastvit_ma36.apple_in1k": _cfg(
hf_hub_id='timm/',
crop_pct=0.95),
"fastvit_t8.apple_dist_in1k": _cfg(
hf_hub_id='timm/'),
"fastvit_t12.apple_dist_in1k": _cfg(
hf_hub_id='timm/'),
"fastvit_s12.apple_dist_in1k": _cfg(
hf_hub_id='timm/',),
"fastvit_sa12.apple_dist_in1k": _cfg(
hf_hub_id='timm/',),
"fastvit_sa24.apple_dist_in1k": _cfg(
hf_hub_id='timm/',),
"fastvit_sa36.apple_dist_in1k": _cfg(
hf_hub_id='timm/',),
"fastvit_ma36.apple_dist_in1k": _cfg(
hf_hub_id='timm/',
crop_pct=0.95
),
"fastvit_mci0.apple_mclip": _cfg(
hf_hub_id='apple/mobileclip_s0_timm',
url='https://docs-assets.developer.apple.com/ml-research/datasets/mobileclip/mobileclip_s0.pt',
crop_pct=0.95,
num_classes=512, # CLIP proj dim
mean=(0., 0., 0.), std=(1., 1., 1.)
),
"fastvit_mci1.apple_mclip": _cfg(
hf_hub_id='apple/mobileclip_s1_timm',
url='https://docs-assets.developer.apple.com/ml-research/datasets/mobileclip/mobileclip_s1.pt',
crop_pct=0.95,
num_classes=512, # CLIP proj dim
mean=(0., 0., 0.), std=(1., 1., 1.)
),
"fastvit_mci2.apple_mclip": _cfg(
hf_hub_id='apple/mobileclip_s2_timm',
url='https://docs-assets.developer.apple.com/ml-research/datasets/mobileclip/mobileclip_s2.pt',
crop_pct=0.95,
num_classes=512, # CLIP proj dim
mean=(0., 0., 0.), std=(1., 1., 1.)
),
})
def checkpoint_filter_fn(state_dict, model):
""" Remap original checkpoints -> timm """
if 'stem.0.conv_kxk.0.conv.weight' in state_dict:
return state_dict # non-original checkpoint, no remapping needed
state_dict = state_dict.get('state_dict', state_dict)
if 'image_encoder.model.patch_embed.0.rbr_conv.0.conv.weight' in state_dict:
# remap MobileCLIP checkpoints
prefix = 'image_encoder.model.'
else:
prefix = ''
import re
import bisect
# find stage ends by locating downsample layers
stage_ends = []
for k, v in state_dict.items():
match = re.match(r'^(.*?)network\.(\d+)\.proj.*', k)
if match:
stage_ends.append(int(match.group(2)))
stage_ends = list(sorted(set(stage_ends)))
out_dict = {}
for k, v in state_dict.items():
if prefix:
if prefix not in k:
continue
k = k.replace(prefix, '')
# remap renamed layers
k = k.replace('patch_embed', 'stem')
k = k.replace('rbr_conv', 'conv_kxk')
k = k.replace('rbr_scale', 'conv_scale')
k = k.replace('rbr_skip', 'identity')
k = k.replace('conv_exp', 'final_conv') # to match byobnet, regnet, nfnet
k = k.replace('lkb_origin', 'large_conv')
k = k.replace('convffn', 'mlp')
k = k.replace('se.reduce', 'se.fc1')
k = k.replace('se.expand', 'se.fc2')
k = re.sub(r'layer_scale_([0-9])', r'layer_scale_\1.gamma', k)
if k.endswith('layer_scale'):
k = k.replace('layer_scale', 'layer_scale.gamma')
k = k.replace('dist_head', 'head_dist')
if k.startswith('head.'):
if k == 'head.proj' and hasattr(model.head, 'fc') and isinstance(model.head.fc, nn.Linear):
# if CLIP projection, map to head.fc w/ bias = zeros
k = k.replace('head.proj', 'head.fc.weight')
v = v.T
out_dict['head.fc.bias'] = torch.zeros(v.shape[0])
else:
k = k.replace('head.', 'head.fc.')
# remap flat sequential network to stages
match = re.match(r'^network\.(\d+)', k)
stage_idx, net_idx = None, None
if match:
net_idx = int(match.group(1))
stage_idx = bisect.bisect_right(stage_ends, net_idx)
if stage_idx is not None:
net_prefix = f'network.{net_idx}'
stage_prefix = f'stages.{stage_idx}'
if net_prefix + '.proj' in k:
k = k.replace(net_prefix + '.proj', stage_prefix + '.downsample.proj')
elif net_prefix + '.pe' in k:
k = k.replace(net_prefix + '.pe', stage_prefix + '.pos_emb.pos_enc')
else:
k = k.replace(net_prefix, stage_prefix + '.blocks')
out_dict[k] = v
return out_dict
def _create_fastvit(variant, pretrained=False, **kwargs):
out_indices = kwargs.pop('out_indices', (0, 1, 2, 3))
model = build_model_with_cfg(
FastVit,
variant,
pretrained,
pretrained_filter_fn=checkpoint_filter_fn,
feature_cfg=dict(flatten_sequential=True, out_indices=out_indices),
**kwargs
)
return model
@register_model
def fastvit_t8(pretrained=False, **kwargs):
"""Instantiate FastViT-T8 model variant."""
model_args = dict(
layers=(2, 2, 4, 2),
embed_dims=(48, 96, 192, 384),
mlp_ratios=(3, 3, 3, 3),
token_mixers=("repmixer", "repmixer", "repmixer", "repmixer")
)
return _create_fastvit('fastvit_t8', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def fastvit_t12(pretrained=False, **kwargs):
"""Instantiate FastViT-T12 model variant."""
model_args = dict(
layers=(2, 2, 6, 2),
embed_dims=(64, 128, 256, 512),
mlp_ratios=(3, 3, 3, 3),
token_mixers=("repmixer", "repmixer", "repmixer", "repmixer"),
)
return _create_fastvit('fastvit_t12', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def fastvit_s12(pretrained=False, **kwargs):
"""Instantiate FastViT-S12 model variant."""
model_args = dict(
layers=(2, 2, 6, 2),
embed_dims=(64, 128, 256, 512),
mlp_ratios=(4, 4, 4, 4),
token_mixers=("repmixer", "repmixer", "repmixer", "repmixer"),
)
return _create_fastvit('fastvit_s12', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def fastvit_sa12(pretrained=False, **kwargs):
"""Instantiate FastViT-SA12 model variant."""
model_args = dict(
layers=(2, 2, 6, 2),
embed_dims=(64, 128, 256, 512),
mlp_ratios=(4, 4, 4, 4),
pos_embs=(None, None, None, partial(RepConditionalPosEnc, spatial_shape=(7, 7))),
token_mixers=("repmixer", "repmixer", "repmixer", "attention"),
)
return _create_fastvit('fastvit_sa12', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def fastvit_sa24(pretrained=False, **kwargs):
"""Instantiate FastViT-SA24 model variant."""
model_args = dict(
layers=(4, 4, 12, 4),
embed_dims=(64, 128, 256, 512),
mlp_ratios=(4, 4, 4, 4),
pos_embs=(None, None, None, partial(RepConditionalPosEnc, spatial_shape=(7, 7))),
token_mixers=("repmixer", "repmixer", "repmixer", "attention"),
)
return _create_fastvit('fastvit_sa24', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def fastvit_sa36(pretrained=False, **kwargs):
"""Instantiate FastViT-SA36 model variant."""
model_args = dict(
layers=(6, 6, 18, 6),
embed_dims=(64, 128, 256, 512),
mlp_ratios=(4, 4, 4, 4),
pos_embs=(None, None, None, partial(RepConditionalPosEnc, spatial_shape=(7, 7))),
token_mixers=("repmixer", "repmixer", "repmixer", "attention"),
)
return _create_fastvit('fastvit_sa36', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def fastvit_ma36(pretrained=False, **kwargs):
"""Instantiate FastViT-MA36 model variant."""
model_args = dict(
layers=(6, 6, 18, 6),
embed_dims=(76, 152, 304, 608),
mlp_ratios=(4, 4, 4, 4),
pos_embs=(None, None, None, partial(RepConditionalPosEnc, spatial_shape=(7, 7))),
token_mixers=("repmixer", "repmixer", "repmixer", "attention")
)
return _create_fastvit('fastvit_ma36', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def fastvit_mci0(pretrained=False, **kwargs):
"""Instantiate MCi0 model variant."""
model_args = dict(
layers=(2, 6, 10, 2),
embed_dims=(64, 128, 256, 512),
mlp_ratios=(3, 3, 3, 3),
se_downsamples=(False, False, True, True),
pos_embs=(None, None, None, partial(RepConditionalPosEnc, spatial_shape=(7, 7))),
token_mixers=("repmixer", "repmixer", "repmixer", "attention"),
lkc_use_act=True,
)
return _create_fastvit('fastvit_mci0', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def fastvit_mci1(pretrained=False, **kwargs):
"""Instantiate MCi1 model variant."""
model_args = dict(
layers=(4, 12, 20, 4),
embed_dims=(64, 128, 256, 512),
mlp_ratios=(3, 3, 3, 3),
se_downsamples=(False, False, True, True),
pos_embs=(None, None, None, partial(RepConditionalPosEnc, spatial_shape=(7, 7))),
token_mixers=("repmixer", "repmixer", "repmixer", "attention"),
lkc_use_act=True,
)
return _create_fastvit('fastvit_mci1', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def fastvit_mci2(pretrained=False, **kwargs):
"""Instantiate MCi2 model variant."""
model_args = dict(
layers=(4, 12, 24, 4),
embed_dims=(80, 160, 320, 640),
mlp_ratios=(3, 3, 3, 3),
se_downsamples=(False, False, True, True),
pos_embs=(None, None, None, partial(RepConditionalPosEnc, spatial_shape=(7, 7))),
token_mixers=("repmixer", "repmixer", "repmixer", "attention"),
lkc_use_act=True,
)
return _create_fastvit('fastvit_mci2', pretrained=pretrained, **dict(model_args, **kwargs))
| pytorch-image-models/timm/models/fastvit.py/0 | {
"file_path": "pytorch-image-models/timm/models/fastvit.py",
"repo_id": "pytorch-image-models",
"token_count": 29338
} | 246 |
""" Pytorch Inception-V4 implementation
Sourced from https://github.com/Cadene/tensorflow-model-zoo.torch (MIT License) which is
based upon Google's Tensorflow implementation and pretrained weights (Apache 2.0 License)
"""
from functools import partial
from typing import List, Optional, Tuple, Union
import torch
import torch.nn as nn
from timm.data import IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD
from timm.layers import create_classifier, ConvNormAct
from ._builder import build_model_with_cfg
from ._features import feature_take_indices
from ._registry import register_model, generate_default_cfgs
__all__ = ['InceptionV4']
class Mixed3a(nn.Module):
def __init__(self, conv_block=ConvNormAct):
super(Mixed3a, self).__init__()
self.maxpool = nn.MaxPool2d(3, stride=2)
self.conv = conv_block(64, 96, kernel_size=3, stride=2)
def forward(self, x):
x0 = self.maxpool(x)
x1 = self.conv(x)
out = torch.cat((x0, x1), 1)
return out
class Mixed4a(nn.Module):
def __init__(self, conv_block=ConvNormAct):
super(Mixed4a, self).__init__()
self.branch0 = nn.Sequential(
conv_block(160, 64, kernel_size=1, stride=1),
conv_block(64, 96, kernel_size=3, stride=1)
)
self.branch1 = nn.Sequential(
conv_block(160, 64, kernel_size=1, stride=1),
conv_block(64, 64, kernel_size=(1, 7), stride=1, padding=(0, 3)),
conv_block(64, 64, kernel_size=(7, 1), stride=1, padding=(3, 0)),
conv_block(64, 96, kernel_size=(3, 3), stride=1)
)
def forward(self, x):
x0 = self.branch0(x)
x1 = self.branch1(x)
out = torch.cat((x0, x1), 1)
return out
class Mixed5a(nn.Module):
def __init__(self, conv_block=ConvNormAct):
super(Mixed5a, self).__init__()
self.conv = conv_block(192, 192, kernel_size=3, stride=2)
self.maxpool = nn.MaxPool2d(3, stride=2)
def forward(self, x):
x0 = self.conv(x)
x1 = self.maxpool(x)
out = torch.cat((x0, x1), 1)
return out
class InceptionA(nn.Module):
def __init__(self, conv_block=ConvNormAct):
super(InceptionA, self).__init__()
self.branch0 = conv_block(384, 96, kernel_size=1, stride=1)
self.branch1 = nn.Sequential(
conv_block(384, 64, kernel_size=1, stride=1),
conv_block(64, 96, kernel_size=3, stride=1, padding=1)
)
self.branch2 = nn.Sequential(
conv_block(384, 64, kernel_size=1, stride=1),
conv_block(64, 96, kernel_size=3, stride=1, padding=1),
conv_block(96, 96, kernel_size=3, stride=1, padding=1)
)
self.branch3 = nn.Sequential(
nn.AvgPool2d(3, stride=1, padding=1, count_include_pad=False),
conv_block(384, 96, kernel_size=1, stride=1)
)
def forward(self, x):
x0 = self.branch0(x)
x1 = self.branch1(x)
x2 = self.branch2(x)
x3 = self.branch3(x)
out = torch.cat((x0, x1, x2, x3), 1)
return out
class ReductionA(nn.Module):
def __init__(self, conv_block=ConvNormAct):
super(ReductionA, self).__init__()
self.branch0 = conv_block(384, 384, kernel_size=3, stride=2)
self.branch1 = nn.Sequential(
conv_block(384, 192, kernel_size=1, stride=1),
conv_block(192, 224, kernel_size=3, stride=1, padding=1),
conv_block(224, 256, kernel_size=3, stride=2)
)
self.branch2 = nn.MaxPool2d(3, stride=2)
def forward(self, x):
x0 = self.branch0(x)
x1 = self.branch1(x)
x2 = self.branch2(x)
out = torch.cat((x0, x1, x2), 1)
return out
class InceptionB(nn.Module):
def __init__(self, conv_block=ConvNormAct):
super(InceptionB, self).__init__()
self.branch0 = conv_block(1024, 384, kernel_size=1, stride=1)
self.branch1 = nn.Sequential(
conv_block(1024, 192, kernel_size=1, stride=1),
conv_block(192, 224, kernel_size=(1, 7), stride=1, padding=(0, 3)),
conv_block(224, 256, kernel_size=(7, 1), stride=1, padding=(3, 0))
)
self.branch2 = nn.Sequential(
conv_block(1024, 192, kernel_size=1, stride=1),
conv_block(192, 192, kernel_size=(7, 1), stride=1, padding=(3, 0)),
conv_block(192, 224, kernel_size=(1, 7), stride=1, padding=(0, 3)),
conv_block(224, 224, kernel_size=(7, 1), stride=1, padding=(3, 0)),
conv_block(224, 256, kernel_size=(1, 7), stride=1, padding=(0, 3))
)
self.branch3 = nn.Sequential(
nn.AvgPool2d(3, stride=1, padding=1, count_include_pad=False),
conv_block(1024, 128, kernel_size=1, stride=1)
)
def forward(self, x):
x0 = self.branch0(x)
x1 = self.branch1(x)
x2 = self.branch2(x)
x3 = self.branch3(x)
out = torch.cat((x0, x1, x2, x3), 1)
return out
class ReductionB(nn.Module):
def __init__(self, conv_block=ConvNormAct):
super(ReductionB, self).__init__()
self.branch0 = nn.Sequential(
conv_block(1024, 192, kernel_size=1, stride=1),
conv_block(192, 192, kernel_size=3, stride=2)
)
self.branch1 = nn.Sequential(
conv_block(1024, 256, kernel_size=1, stride=1),
conv_block(256, 256, kernel_size=(1, 7), stride=1, padding=(0, 3)),
conv_block(256, 320, kernel_size=(7, 1), stride=1, padding=(3, 0)),
conv_block(320, 320, kernel_size=3, stride=2)
)
self.branch2 = nn.MaxPool2d(3, stride=2)
def forward(self, x):
x0 = self.branch0(x)
x1 = self.branch1(x)
x2 = self.branch2(x)
out = torch.cat((x0, x1, x2), 1)
return out
class InceptionC(nn.Module):
def __init__(self, conv_block=ConvNormAct):
super(InceptionC, self).__init__()
self.branch0 = conv_block(1536, 256, kernel_size=1, stride=1)
self.branch1_0 = conv_block(1536, 384, kernel_size=1, stride=1)
self.branch1_1a = conv_block(384, 256, kernel_size=(1, 3), stride=1, padding=(0, 1))
self.branch1_1b = conv_block(384, 256, kernel_size=(3, 1), stride=1, padding=(1, 0))
self.branch2_0 = conv_block(1536, 384, kernel_size=1, stride=1)
self.branch2_1 = conv_block(384, 448, kernel_size=(3, 1), stride=1, padding=(1, 0))
self.branch2_2 = conv_block(448, 512, kernel_size=(1, 3), stride=1, padding=(0, 1))
self.branch2_3a = conv_block(512, 256, kernel_size=(1, 3), stride=1, padding=(0, 1))
self.branch2_3b = conv_block(512, 256, kernel_size=(3, 1), stride=1, padding=(1, 0))
self.branch3 = nn.Sequential(
nn.AvgPool2d(3, stride=1, padding=1, count_include_pad=False),
conv_block(1536, 256, kernel_size=1, stride=1)
)
def forward(self, x):
x0 = self.branch0(x)
x1_0 = self.branch1_0(x)
x1_1a = self.branch1_1a(x1_0)
x1_1b = self.branch1_1b(x1_0)
x1 = torch.cat((x1_1a, x1_1b), 1)
x2_0 = self.branch2_0(x)
x2_1 = self.branch2_1(x2_0)
x2_2 = self.branch2_2(x2_1)
x2_3a = self.branch2_3a(x2_2)
x2_3b = self.branch2_3b(x2_2)
x2 = torch.cat((x2_3a, x2_3b), 1)
x3 = self.branch3(x)
out = torch.cat((x0, x1, x2, x3), 1)
return out
class InceptionV4(nn.Module):
def __init__(
self,
num_classes=1000,
in_chans=3,
output_stride=32,
drop_rate=0.,
global_pool='avg',
norm_layer='batchnorm2d',
norm_eps=1e-3,
act_layer='relu',
):
super(InceptionV4, self).__init__()
assert output_stride == 32
self.num_classes = num_classes
self.num_features = self.head_hidden_size = 1536
conv_block = partial(
ConvNormAct,
padding=0,
norm_layer=norm_layer,
act_layer=act_layer,
norm_kwargs=dict(eps=norm_eps),
act_kwargs=dict(inplace=True),
)
features = [
conv_block(in_chans, 32, kernel_size=3, stride=2),
conv_block(32, 32, kernel_size=3, stride=1),
conv_block(32, 64, kernel_size=3, stride=1, padding=1),
Mixed3a(conv_block),
Mixed4a(conv_block),
Mixed5a(conv_block),
]
features += [InceptionA(conv_block) for _ in range(4)]
features += [ReductionA(conv_block)] # Mixed6a
features += [InceptionB(conv_block) for _ in range(7)]
features += [ReductionB(conv_block)] # Mixed7a
features += [InceptionC(conv_block) for _ in range(3)]
self.features = nn.Sequential(*features)
self.feature_info = [
dict(num_chs=64, reduction=2, module='features.2'),
dict(num_chs=160, reduction=4, module='features.3'),
dict(num_chs=384, reduction=8, module='features.9'),
dict(num_chs=1024, reduction=16, module='features.17'),
dict(num_chs=1536, reduction=32, module='features.21'),
]
self.global_pool, self.head_drop, self.last_linear = create_classifier(
self.num_features, self.num_classes, pool_type=global_pool, drop_rate=drop_rate)
@torch.jit.ignore
def group_matcher(self, coarse=False):
return dict(
stem=r'^features\.[012]\.',
blocks=r'^features\.(\d+)'
)
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
assert not enable, 'gradient checkpointing not supported'
@torch.jit.ignore
def get_classifier(self) -> nn.Module:
return self.last_linear
def reset_classifier(self, num_classes: int, global_pool: str = 'avg'):
self.num_classes = num_classes
self.global_pool, self.last_linear = create_classifier(
self.num_features, self.num_classes, pool_type=global_pool)
def forward_intermediates(
self,
x: torch.Tensor,
indices: Optional[Union[int, List[int]]] = None,
norm: bool = False,
stop_early: bool = False,
output_fmt: str = 'NCHW',
intermediates_only: bool = False,
) -> Union[List[torch.Tensor], Tuple[torch.Tensor, List[torch.Tensor]]]:
""" Forward features that returns intermediates.
Args:
x: Input image tensor
indices: Take last n blocks if int, all if None, select matching indices if sequence
norm: Apply norm layer to compatible intermediates
stop_early: Stop iterating over blocks when last desired intermediate hit
output_fmt: Shape of intermediate feature outputs
intermediates_only: Only return intermediate features
Returns:
"""
assert output_fmt in ('NCHW',), 'Output shape must be NCHW.'
intermediates = []
stage_ends = [int(info['module'].split('.')[-1]) for info in self.feature_info]
take_indices, max_index = feature_take_indices(len(stage_ends), indices)
take_indices = [stage_ends[i] for i in take_indices]
max_index = stage_ends[max_index]
# forward pass
if torch.jit.is_scripting() or not stop_early: # can't slice blocks in torchscript
stages = self.features
else:
stages = self.features[:max_index + 1]
for feat_idx, stage in enumerate(stages):
x = stage(x)
if feat_idx in take_indices:
intermediates.append(x)
if intermediates_only:
return intermediates
return x, intermediates
def prune_intermediate_layers(
self,
indices: Union[int, List[int]] = 1,
prune_norm: bool = False,
prune_head: bool = True,
):
""" Prune layers not required for specified intermediates.
"""
stage_ends = [int(info['module'].split('.')[-1]) for info in self.feature_info]
take_indices, max_index = feature_take_indices(len(stage_ends), indices)
max_index = stage_ends[max_index]
self.features = self.features[:max_index + 1] # truncate blocks w/ stem as idx 0
if prune_head:
self.reset_classifier(0, '')
return take_indices
def forward_features(self, x):
return self.features(x)
def forward_head(self, x, pre_logits: bool = False):
x = self.global_pool(x)
x = self.head_drop(x)
return x if pre_logits else self.last_linear(x)
def forward(self, x):
x = self.forward_features(x)
x = self.forward_head(x)
return x
def _create_inception_v4(variant, pretrained=False, **kwargs) -> InceptionV4:
return build_model_with_cfg(
InceptionV4,
variant,
pretrained,
feature_cfg=dict(flatten_sequential=True),
**kwargs,
)
default_cfgs = generate_default_cfgs({
'inception_v4.tf_in1k': {
'hf_hub_id': 'timm/',
'num_classes': 1000, 'input_size': (3, 299, 299), 'pool_size': (8, 8),
'crop_pct': 0.875, 'interpolation': 'bicubic',
'mean': IMAGENET_INCEPTION_MEAN, 'std': IMAGENET_INCEPTION_STD,
'first_conv': 'features.0.conv', 'classifier': 'last_linear',
}
})
@register_model
def inception_v4(pretrained=False, **kwargs):
return _create_inception_v4('inception_v4', pretrained, **kwargs)
| pytorch-image-models/timm/models/inception_v4.py/0 | {
"file_path": "pytorch-image-models/timm/models/inception_v4.py",
"repo_id": "pytorch-image-models",
"token_count": 6625
} | 247 |
""" Pooling-based Vision Transformer (PiT) in PyTorch
A PyTorch implement of Pooling-based Vision Transformers as described in
'Rethinking Spatial Dimensions of Vision Transformers' - https://arxiv.org/abs/2103.16302
This code was adapted from the original version at https://github.com/naver-ai/pit, original copyright below.
Modifications for timm by / Copyright 2020 Ross Wightman
"""
# PiT
# Copyright 2021-present NAVER Corp.
# Apache License v2.0
import math
import re
from functools import partial
from typing import List, Optional, Sequence, Tuple, Union
import torch
from torch import nn
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.layers import trunc_normal_, to_2tuple
from ._builder import build_model_with_cfg
from ._features import feature_take_indices
from ._registry import register_model, generate_default_cfgs
from .vision_transformer import Block
__all__ = ['PoolingVisionTransformer'] # model_registry will add each entrypoint fn to this
class SequentialTuple(nn.Sequential):
""" This module exists to work around torchscript typing issues list -> list"""
def __init__(self, *args):
super(SequentialTuple, self).__init__(*args)
def forward(self, x: Tuple[torch.Tensor, torch.Tensor]) -> Tuple[torch.Tensor, torch.Tensor]:
for module in self:
x = module(x)
return x
class Transformer(nn.Module):
def __init__(
self,
base_dim,
depth,
heads,
mlp_ratio,
pool=None,
proj_drop=.0,
attn_drop=.0,
drop_path_prob=None,
norm_layer=None,
):
super(Transformer, self).__init__()
embed_dim = base_dim * heads
self.pool = pool
self.norm = norm_layer(embed_dim) if norm_layer else nn.Identity()
self.blocks = nn.Sequential(*[
Block(
dim=embed_dim,
num_heads=heads,
mlp_ratio=mlp_ratio,
qkv_bias=True,
proj_drop=proj_drop,
attn_drop=attn_drop,
drop_path=drop_path_prob[i],
norm_layer=partial(nn.LayerNorm, eps=1e-6)
)
for i in range(depth)])
def forward(self, x: Tuple[torch.Tensor, torch.Tensor]) -> Tuple[torch.Tensor, torch.Tensor]:
x, cls_tokens = x
token_length = cls_tokens.shape[1]
if self.pool is not None:
x, cls_tokens = self.pool(x, cls_tokens)
B, C, H, W = x.shape
x = x.flatten(2).transpose(1, 2)
x = torch.cat((cls_tokens, x), dim=1)
x = self.norm(x)
x = self.blocks(x)
cls_tokens = x[:, :token_length]
x = x[:, token_length:]
x = x.transpose(1, 2).reshape(B, C, H, W)
return x, cls_tokens
class Pooling(nn.Module):
def __init__(self, in_feature, out_feature, stride, padding_mode='zeros'):
super(Pooling, self).__init__()
self.conv = nn.Conv2d(
in_feature,
out_feature,
kernel_size=stride + 1,
padding=stride // 2,
stride=stride,
padding_mode=padding_mode,
groups=in_feature,
)
self.fc = nn.Linear(in_feature, out_feature)
def forward(self, x, cls_token) -> Tuple[torch.Tensor, torch.Tensor]:
x = self.conv(x)
cls_token = self.fc(cls_token)
return x, cls_token
class ConvEmbedding(nn.Module):
def __init__(
self,
in_channels,
out_channels,
img_size: int = 224,
patch_size: int = 16,
stride: int = 8,
padding: int = 0,
):
super(ConvEmbedding, self).__init__()
padding = padding
self.img_size = to_2tuple(img_size)
self.patch_size = to_2tuple(patch_size)
self.height = math.floor((self.img_size[0] + 2 * padding - self.patch_size[0]) / stride + 1)
self.width = math.floor((self.img_size[1] + 2 * padding - self.patch_size[1]) / stride + 1)
self.grid_size = (self.height, self.width)
self.conv = nn.Conv2d(
in_channels, out_channels, kernel_size=patch_size,
stride=stride, padding=padding, bias=True)
def forward(self, x):
x = self.conv(x)
return x
class PoolingVisionTransformer(nn.Module):
""" Pooling-based Vision Transformer
A PyTorch implement of 'Rethinking Spatial Dimensions of Vision Transformers'
- https://arxiv.org/abs/2103.16302
"""
def __init__(
self,
img_size: int = 224,
patch_size: int = 16,
stride: int = 8,
stem_type: str = 'overlap',
base_dims: Sequence[int] = (48, 48, 48),
depth: Sequence[int] = (2, 6, 4),
heads: Sequence[int] = (2, 4, 8),
mlp_ratio: float = 4,
num_classes=1000,
in_chans=3,
global_pool='token',
distilled=False,
drop_rate=0.,
pos_drop_drate=0.,
proj_drop_rate=0.,
attn_drop_rate=0.,
drop_path_rate=0.,
):
super(PoolingVisionTransformer, self).__init__()
assert global_pool in ('token',)
self.base_dims = base_dims
self.heads = heads
embed_dim = base_dims[0] * heads[0]
self.num_classes = num_classes
self.global_pool = global_pool
self.num_tokens = 2 if distilled else 1
self.feature_info = []
self.patch_embed = ConvEmbedding(in_chans, embed_dim, img_size, patch_size, stride)
self.pos_embed = nn.Parameter(torch.randn(1, embed_dim, self.patch_embed.height, self.patch_embed.width))
self.cls_token = nn.Parameter(torch.randn(1, self.num_tokens, embed_dim))
self.pos_drop = nn.Dropout(p=pos_drop_drate)
transformers = []
# stochastic depth decay rule
dpr = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(depth)).split(depth)]
prev_dim = embed_dim
for i in range(len(depth)):
pool = None
embed_dim = base_dims[i] * heads[i]
if i > 0:
pool = Pooling(
prev_dim,
embed_dim,
stride=2,
)
transformers += [Transformer(
base_dims[i],
depth[i],
heads[i],
mlp_ratio,
pool=pool,
proj_drop=proj_drop_rate,
attn_drop=attn_drop_rate,
drop_path_prob=dpr[i],
)]
prev_dim = embed_dim
self.feature_info += [dict(num_chs=prev_dim, reduction=(stride - 1) * 2**i, module=f'transformers.{i}')]
self.transformers = SequentialTuple(*transformers)
self.norm = nn.LayerNorm(base_dims[-1] * heads[-1], eps=1e-6)
self.num_features = self.head_hidden_size = self.embed_dim = embed_dim
# Classifier head
self.head_drop = nn.Dropout(drop_rate)
self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity()
self.head_dist = None
if distilled:
self.head_dist = nn.Linear(self.embed_dim, self.num_classes) if num_classes > 0 else nn.Identity()
self.distilled_training = False # must set this True to train w/ distillation token
trunc_normal_(self.pos_embed, std=.02)
trunc_normal_(self.cls_token, std=.02)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
@torch.jit.ignore
def no_weight_decay(self):
return {'pos_embed', 'cls_token'}
@torch.jit.ignore
def set_distilled_training(self, enable=True):
self.distilled_training = enable
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
assert not enable, 'gradient checkpointing not supported'
def get_classifier(self) -> nn.Module:
if self.head_dist is not None:
return self.head, self.head_dist
else:
return self.head
def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None):
self.num_classes = num_classes
if global_pool is not None:
self.global_pool = global_pool
self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity()
if self.head_dist is not None:
self.head_dist = nn.Linear(self.embed_dim, self.num_classes) if num_classes > 0 else nn.Identity()
def forward_intermediates(
self,
x: torch.Tensor,
indices: Optional[Union[int, List[int]]] = None,
norm: bool = False,
stop_early: bool = False,
output_fmt: str = 'NCHW',
intermediates_only: bool = False,
) -> Union[List[torch.Tensor], Tuple[torch.Tensor, List[torch.Tensor]]]:
""" Forward features that returns intermediates.
Args:
x: Input image tensor
indices: Take last n blocks if int, all if None, select matching indices if sequence
norm: Apply norm layer to compatible intermediates
stop_early: Stop iterating over blocks when last desired intermediate hit
output_fmt: Shape of intermediate feature outputs
intermediates_only: Only return intermediate features
Returns:
"""
assert output_fmt in ('NCHW',), 'Output shape must be NCHW.'
intermediates = []
take_indices, max_index = feature_take_indices(len(self.transformers), indices)
# forward pass
x = self.patch_embed(x)
x = self.pos_drop(x + self.pos_embed)
cls_tokens = self.cls_token.expand(x.shape[0], -1, -1)
last_idx = len(self.transformers) - 1
if torch.jit.is_scripting() or not stop_early: # can't slice blocks in torchscript
stages = self.transformers
else:
stages = self.transformers[:max_index + 1]
for feat_idx, stage in enumerate(stages):
x, cls_tokens = stage((x, cls_tokens))
if feat_idx in take_indices:
intermediates.append(x)
if intermediates_only:
return intermediates
if feat_idx == last_idx:
cls_tokens = self.norm(cls_tokens)
return cls_tokens, intermediates
def prune_intermediate_layers(
self,
indices: Union[int, List[int]] = 1,
prune_norm: bool = False,
prune_head: bool = True,
):
""" Prune layers not required for specified intermediates.
"""
take_indices, max_index = feature_take_indices(len(self.transformers), indices)
self.transformers = self.transformers[:max_index + 1] # truncate blocks w/ stem as idx 0
if prune_norm:
self.norm = nn.Identity()
if prune_head:
self.reset_classifier(0, '')
return take_indices
def forward_features(self, x):
x = self.patch_embed(x)
x = self.pos_drop(x + self.pos_embed)
cls_tokens = self.cls_token.expand(x.shape[0], -1, -1)
x, cls_tokens = self.transformers((x, cls_tokens))
cls_tokens = self.norm(cls_tokens)
return cls_tokens
def forward_head(self, x, pre_logits: bool = False) -> torch.Tensor:
if self.head_dist is not None:
assert self.global_pool == 'token'
x, x_dist = x[:, 0], x[:, 1]
x = self.head_drop(x)
x_dist = self.head_drop(x)
if not pre_logits:
x = self.head(x)
x_dist = self.head_dist(x_dist)
if self.distilled_training and self.training and not torch.jit.is_scripting():
# only return separate classification predictions when training in distilled mode
return x, x_dist
else:
# during standard train / finetune, inference average the classifier predictions
return (x + x_dist) / 2
else:
if self.global_pool == 'token':
x = x[:, 0]
x = self.head_drop(x)
if not pre_logits:
x = self.head(x)
return x
def forward(self, x):
x = self.forward_features(x)
x = self.forward_head(x)
return x
def checkpoint_filter_fn(state_dict, model):
""" preprocess checkpoints """
out_dict = {}
p_blocks = re.compile(r'pools\.(\d)\.')
for k, v in state_dict.items():
# FIXME need to update resize for PiT impl
# if k == 'pos_embed' and v.shape != model.pos_embed.shape:
# # To resize pos embedding when using model at different size from pretrained weights
# v = resize_pos_embed(v, model.pos_embed)
k = p_blocks.sub(lambda exp: f'transformers.{int(exp.group(1)) + 1}.pool.', k)
out_dict[k] = v
return out_dict
def _create_pit(variant, pretrained=False, **kwargs):
default_out_indices = tuple(range(3))
out_indices = kwargs.pop('out_indices', default_out_indices)
model = build_model_with_cfg(
PoolingVisionTransformer,
variant,
pretrained,
pretrained_filter_fn=checkpoint_filter_fn,
feature_cfg=dict(feature_cls='hook', out_indices=out_indices),
**kwargs,
)
return model
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None,
'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True,
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'patch_embed.conv', 'classifier': 'head',
**kwargs
}
default_cfgs = generate_default_cfgs({
# deit models (FB weights)
'pit_ti_224.in1k': _cfg(hf_hub_id='timm/'),
'pit_xs_224.in1k': _cfg(hf_hub_id='timm/'),
'pit_s_224.in1k': _cfg(hf_hub_id='timm/'),
'pit_b_224.in1k': _cfg(hf_hub_id='timm/'),
'pit_ti_distilled_224.in1k': _cfg(
hf_hub_id='timm/',
classifier=('head', 'head_dist')),
'pit_xs_distilled_224.in1k': _cfg(
hf_hub_id='timm/',
classifier=('head', 'head_dist')),
'pit_s_distilled_224.in1k': _cfg(
hf_hub_id='timm/',
classifier=('head', 'head_dist')),
'pit_b_distilled_224.in1k': _cfg(
hf_hub_id='timm/',
classifier=('head', 'head_dist')),
})
@register_model
def pit_b_224(pretrained=False, **kwargs) -> PoolingVisionTransformer:
model_args = dict(
patch_size=14,
stride=7,
base_dims=[64, 64, 64],
depth=[3, 6, 4],
heads=[4, 8, 16],
mlp_ratio=4,
)
return _create_pit('pit_b_224', pretrained, **dict(model_args, **kwargs))
@register_model
def pit_s_224(pretrained=False, **kwargs) -> PoolingVisionTransformer:
model_args = dict(
patch_size=16,
stride=8,
base_dims=[48, 48, 48],
depth=[2, 6, 4],
heads=[3, 6, 12],
mlp_ratio=4,
)
return _create_pit('pit_s_224', pretrained, **dict(model_args, **kwargs))
@register_model
def pit_xs_224(pretrained=False, **kwargs) -> PoolingVisionTransformer:
model_args = dict(
patch_size=16,
stride=8,
base_dims=[48, 48, 48],
depth=[2, 6, 4],
heads=[2, 4, 8],
mlp_ratio=4,
)
return _create_pit('pit_xs_224', pretrained, **dict(model_args, **kwargs))
@register_model
def pit_ti_224(pretrained=False, **kwargs) -> PoolingVisionTransformer:
model_args = dict(
patch_size=16,
stride=8,
base_dims=[32, 32, 32],
depth=[2, 6, 4],
heads=[2, 4, 8],
mlp_ratio=4,
)
return _create_pit('pit_ti_224', pretrained, **dict(model_args, **kwargs))
@register_model
def pit_b_distilled_224(pretrained=False, **kwargs) -> PoolingVisionTransformer:
model_args = dict(
patch_size=14,
stride=7,
base_dims=[64, 64, 64],
depth=[3, 6, 4],
heads=[4, 8, 16],
mlp_ratio=4,
distilled=True,
)
return _create_pit('pit_b_distilled_224', pretrained, **dict(model_args, **kwargs))
@register_model
def pit_s_distilled_224(pretrained=False, **kwargs) -> PoolingVisionTransformer:
model_args = dict(
patch_size=16,
stride=8,
base_dims=[48, 48, 48],
depth=[2, 6, 4],
heads=[3, 6, 12],
mlp_ratio=4,
distilled=True,
)
return _create_pit('pit_s_distilled_224', pretrained, **dict(model_args, **kwargs))
@register_model
def pit_xs_distilled_224(pretrained=False, **kwargs) -> PoolingVisionTransformer:
model_args = dict(
patch_size=16,
stride=8,
base_dims=[48, 48, 48],
depth=[2, 6, 4],
heads=[2, 4, 8],
mlp_ratio=4,
distilled=True,
)
return _create_pit('pit_xs_distilled_224', pretrained, **dict(model_args, **kwargs))
@register_model
def pit_ti_distilled_224(pretrained=False, **kwargs) -> PoolingVisionTransformer:
model_args = dict(
patch_size=16,
stride=8,
base_dims=[32, 32, 32],
depth=[2, 6, 4],
heads=[2, 4, 8],
mlp_ratio=4,
distilled=True,
)
return _create_pit('pit_ti_distilled_224', pretrained, **dict(model_args, **kwargs))
| pytorch-image-models/timm/models/pit.py/0 | {
"file_path": "pytorch-image-models/timm/models/pit.py",
"repo_id": "pytorch-image-models",
"token_count": 8538
} | 248 |
"""SHViT
SHViT: Single-Head Vision Transformer with Memory Efficient Macro Design
Code: https://github.com/ysj9909/SHViT
Paper: https://arxiv.org/abs/2401.16456
@inproceedings{yun2024shvit,
author={Yun, Seokju and Ro, Youngmin},
title={SHViT: Single-Head Vision Transformer with Memory Efficient Macro Design},
booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)},
pages={5756--5767},
year={2024}
}
"""
from typing import Any, Dict, List, Optional, Set, Tuple, Union
import torch
import torch.nn as nn
import torch.nn.functional as F
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.layers import GroupNorm1, SqueezeExcite, SelectAdaptivePool2d, LayerType, trunc_normal_
from ._builder import build_model_with_cfg
from ._features import feature_take_indices
from ._manipulate import checkpoint_seq
from ._registry import register_model, generate_default_cfgs
__all__ = ['SHViT']
class Residual(nn.Module):
def __init__(self, m: nn.Module):
super().__init__()
self.m = m
def forward(self, x: torch.Tensor) -> torch.Tensor:
return x + self.m(x)
@torch.no_grad()
def fuse(self) -> nn.Module:
if isinstance(self.m, Conv2dNorm):
m = self.m.fuse()
assert(m.groups == m.in_channels)
identity = torch.ones(m.weight.shape[0], m.weight.shape[1], 1, 1)
identity = F.pad(identity, [1,1,1,1])
m.weight += identity.to(m.weight.device)
return m
else:
return self
class Conv2dNorm(nn.Sequential):
def __init__(
self,
in_channels: int,
out_channels: int,
kernel_size: int = 1,
stride: int = 1,
padding: int = 0,
bn_weight_init: int = 1,
**kwargs,
):
super().__init__()
self.add_module('c', nn.Conv2d(
in_channels, out_channels, kernel_size, stride, padding, bias=False, **kwargs))
self.add_module('bn', nn.BatchNorm2d(out_channels))
nn.init.constant_(self.bn.weight, bn_weight_init)
nn.init.constant_(self.bn.bias, 0)
@torch.no_grad()
def fuse(self) -> nn.Conv2d:
c, bn = self._modules.values()
w = bn.weight / (bn.running_var + bn.eps) ** 0.5
w = c.weight * w[:, None, None, None]
b = bn.bias - bn.running_mean * bn.weight / (bn.running_var + bn.eps) ** 0.5
m = nn.Conv2d(
in_channels=w.size(1) * self.c.groups,
out_channels=w.size(0),
kernel_size=w.shape[2:],
stride=self.c.stride,
padding=self.c.padding,
dilation=self.c.dilation,
groups=self.c.groups,
device=c.weight.device,
dtype=c.weight.dtype,
)
m.weight.data.copy_(w)
m.bias.data.copy_(b)
return m
class NormLinear(nn.Sequential):
def __init__(
self,
in_features: int,
out_features: int,
bias: bool = True,
std: float = 0.02,
):
super().__init__()
self.add_module('bn', nn.BatchNorm1d(in_features))
self.add_module('l', nn.Linear(in_features, out_features, bias=bias))
trunc_normal_(self.l.weight, std=std)
if bias:
nn.init.constant_(self.l.bias, 0)
@torch.no_grad()
def fuse(self) -> nn.Linear:
bn, l = self._modules.values()
w = bn.weight / (bn.running_var + bn.eps) ** 0.5
b = bn.bias - self.bn.running_mean * self.bn.weight / (bn.running_var + bn.eps) ** 0.5
w = l.weight * w[None, :]
if l.bias is None:
b = b @ self.l.weight.T
else:
b = (l.weight @ b[:, None]).view(-1) + self.l.bias
m = nn.Linear(w.size(1), w.size(0), device=l.weight.device, dtype=l.weight.dtype)
m.weight.data.copy_(w)
m.bias.data.copy_(b)
return m
class PatchMerging(nn.Module):
def __init__(self, dim: int, out_dim: int, act_layer: LayerType = nn.ReLU):
super().__init__()
hid_dim = int(dim * 4)
self.conv1 = Conv2dNorm(dim, hid_dim)
self.act1 = act_layer()
self.conv2 = Conv2dNorm(hid_dim, hid_dim, 3, 2, 1, groups=hid_dim)
self.act2 = act_layer()
self.se = SqueezeExcite(hid_dim, 0.25)
self.conv3 = Conv2dNorm(hid_dim, out_dim)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.conv1(x)
x = self.act1(x)
x = self.conv2(x)
x = self.act2(x)
x = self.se(x)
x = self.conv3(x)
return x
class FFN(nn.Module):
def __init__(self, dim: int, embed_dim: int, act_layer: LayerType = nn.ReLU):
super().__init__()
self.pw1 = Conv2dNorm(dim, embed_dim)
self.act = act_layer()
self.pw2 = Conv2dNorm(embed_dim, dim, bn_weight_init=0)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.pw1(x)
x = self.act(x)
x = self.pw2(x)
return x
class SHSA(nn.Module):
"""Single-Head Self-Attention"""
def __init__(
self,
dim: int,
qk_dim: int,
pdim: int,
norm_layer: LayerType = GroupNorm1,
act_layer: LayerType = nn.ReLU,
):
super().__init__()
self.scale = qk_dim ** -0.5
self.qk_dim = qk_dim
self.dim = dim
self.pdim = pdim
self.pre_norm = norm_layer(pdim)
self.qkv = Conv2dNorm(pdim, qk_dim * 2 + pdim)
self.proj = nn.Sequential(act_layer(), Conv2dNorm(dim, dim, bn_weight_init=0))
def forward(self, x: torch.Tensor) -> torch.Tensor:
B, _, H, W = x.shape
x1, x2 = torch.split(x, [self.pdim, self.dim - self.pdim], dim = 1)
x1 = self.pre_norm(x1)
qkv = self.qkv(x1)
q, k, v = torch.split(qkv, [self.qk_dim, self.qk_dim, self.pdim], dim=1)
q, k, v = q.flatten(2), k.flatten(2), v.flatten(2)
attn = (q.transpose(-2, -1) @ k) * self.scale
attn = attn.softmax(dim=-1)
x1 = (v @ attn.transpose(-2, -1)).reshape(B, self.pdim, H, W)
x = self.proj(torch.cat([x1, x2], dim = 1))
return x
class BasicBlock(nn.Module):
def __init__(
self,
dim: int,
qk_dim: int,
pdim: int,
type: str,
norm_layer: LayerType = GroupNorm1,
act_layer: LayerType = nn.ReLU,
):
super().__init__()
self.conv = Residual(Conv2dNorm(dim, dim, 3, 1, 1, groups=dim, bn_weight_init=0))
if type == "s":
self.mixer = Residual(SHSA(dim, qk_dim, pdim, norm_layer, act_layer))
else:
self.mixer = nn.Identity()
self.ffn = Residual(FFN(dim, int(dim * 2)))
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.conv(x)
x = self.mixer(x)
x = self.ffn(x)
return x
class StageBlock(nn.Module):
def __init__(
self,
prev_dim: int,
dim: int,
qk_dim: int,
pdim: int,
type: str,
depth: int,
norm_layer: LayerType = GroupNorm1,
act_layer: LayerType = nn.ReLU,
):
super().__init__()
self.grad_checkpointing = False
self.downsample = nn.Sequential(
Residual(Conv2dNorm(prev_dim, prev_dim, 3, 1, 1, groups=prev_dim)),
Residual(FFN(prev_dim, int(prev_dim * 2), act_layer)),
PatchMerging(prev_dim, dim, act_layer),
Residual(Conv2dNorm(dim, dim, 3, 1, 1, groups=dim)),
Residual(FFN(dim, int(dim * 2), act_layer)),
) if prev_dim != dim else nn.Identity()
self.blocks = nn.Sequential(*[
BasicBlock(dim, qk_dim, pdim, type, norm_layer, act_layer) for _ in range(depth)
])
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.downsample(x)
if self.grad_checkpointing and not torch.jit.is_scripting():
x = checkpoint_seq(self.blocks, x)
else:
x = self.blocks(x)
return x
class SHViT(nn.Module):
def __init__(
self,
in_chans: int = 3,
num_classes: int = 1000,
global_pool: str = 'avg',
embed_dim: Tuple[int, int, int] = (128, 256, 384),
partial_dim: Tuple[int, int, int] = (32, 64, 96),
qk_dim: Tuple[int, int, int] = (16, 16, 16),
depth: Tuple[int, int, int] = (1, 2, 3),
types: Tuple[str, str, str] = ("s", "s", "s"),
drop_rate: float = 0.,
norm_layer: LayerType = GroupNorm1,
act_layer: LayerType = nn.ReLU,
):
super().__init__()
self.num_classes = num_classes
self.drop_rate = drop_rate
self.feature_info = []
# Patch embedding
stem_chs = embed_dim[0]
self.patch_embed = nn.Sequential(
Conv2dNorm(in_chans, stem_chs // 8, 3, 2, 1),
act_layer(),
Conv2dNorm(stem_chs // 8, stem_chs // 4, 3, 2, 1),
act_layer(),
Conv2dNorm(stem_chs // 4, stem_chs // 2, 3, 2, 1),
act_layer(),
Conv2dNorm(stem_chs // 2, stem_chs, 3, 2, 1)
)
# Build SHViT blocks
stages = []
prev_chs = stem_chs
for i in range(len(embed_dim)):
stages.append(StageBlock(
prev_dim=prev_chs,
dim=embed_dim[i],
qk_dim=qk_dim[i],
pdim=partial_dim[i],
type=types[i],
depth=depth[i],
norm_layer=norm_layer,
act_layer=act_layer,
))
prev_chs = embed_dim[i]
self.feature_info.append(dict(num_chs=prev_chs, reduction=2**(i+4), module=f'stages.{i}'))
self.stages = nn.Sequential(*stages)
# Classifier head
self.num_features = self.head_hidden_size = embed_dim[-1]
self.global_pool = SelectAdaptivePool2d(pool_type=global_pool)
self.flatten = nn.Flatten(1) if global_pool else nn.Identity() # don't flatten if pooling disabled
self.head = NormLinear(self.head_hidden_size, num_classes) if num_classes > 0 else nn.Identity()
@torch.jit.ignore
def no_weight_decay(self) -> Set:
return set()
@torch.jit.ignore
def group_matcher(self, coarse: bool = False) -> Dict[str, Any]:
matcher = dict(
stem=r'^patch_embed', # stem and embed
blocks=r'^stages\.(\d+)' if coarse else [
(r'^stages\.(\d+).downsample', (0,)),
(r'^stages\.(\d+)\.blocks\.(\d+)', None),
]
)
return matcher
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
for s in self.stages:
s.grad_checkpointing = enable
@torch.jit.ignore
def get_classifier(self) -> nn.Module:
return self.head.l
def reset_classifier(self, num_classes: int, global_pool: str = 'avg'):
self.num_classes = num_classes
# cannot meaningfully change pooling of efficient head after creation
self.global_pool = SelectAdaptivePool2d(pool_type=global_pool)
self.flatten = nn.Flatten(1) if global_pool else nn.Identity() # don't flatten if pooling disabled
self.head = NormLinear(self.head_hidden_size, num_classes) if num_classes > 0 else nn.Identity()
def forward_intermediates(
self,
x: torch.Tensor,
indices: Optional[Union[int, List[int]]] = None,
norm: bool = False,
stop_early: bool = False,
output_fmt: str = 'NCHW',
intermediates_only: bool = False,
) -> Union[List[torch.Tensor], Tuple[torch.Tensor, List[torch.Tensor]]]:
""" Forward features that returns intermediates.
Args:
x: Input image tensor
indices: Take last n blocks if int, all if None, select matching indices if sequence
norm: Apply norm layer to compatible intermediates
stop_early: Stop iterating over blocks when last desired intermediate hit
output_fmt: Shape of intermediate feature outputs
intermediates_only: Only return intermediate features
Returns:
"""
assert output_fmt in ('NCHW',), 'Output shape must be NCHW.'
intermediates = []
take_indices, max_index = feature_take_indices(len(self.stages), indices)
# forward pass
x = self.patch_embed(x)
if torch.jit.is_scripting() or not stop_early: # can't slice blocks in torchscript
stages = self.stages
else:
stages = self.stages[:max_index + 1]
for feat_idx, stage in enumerate(stages):
x = stage(x)
if feat_idx in take_indices:
intermediates.append(x)
if intermediates_only:
return intermediates
return x, intermediates
def prune_intermediate_layers(
self,
indices: Union[int, List[int]] = 1,
prune_norm: bool = False,
prune_head: bool = True,
):
""" Prune layers not required for specified intermediates.
"""
take_indices, max_index = feature_take_indices(len(self.stages), indices)
self.stages = self.stages[:max_index + 1] # truncate blocks w/ stem as idx 0
if prune_head:
self.reset_classifier(0, '')
return take_indices
def forward_features(self, x: torch.Tensor) -> torch.Tensor:
x = self.patch_embed(x)
x = self.stages(x)
return x
def forward_head(self, x: torch.Tensor, pre_logits: bool = False) -> torch.Tensor:
x = self.global_pool(x)
x = self.flatten(x)
if self.drop_rate > 0.:
x = F.dropout(x, p=self.drop_rate, training=self.training)
return x if pre_logits else self.head(x)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.forward_features(x)
x = self.forward_head(x)
return x
@torch.no_grad()
def fuse(self):
def fuse_children(net):
for child_name, child in net.named_children():
if hasattr(child, 'fuse'):
fused = child.fuse()
setattr(net, child_name, fused)
fuse_children(fused)
else:
fuse_children(child)
fuse_children(self)
def checkpoint_filter_fn(state_dict: Dict[str, torch.Tensor], model: nn.Module) -> Dict[str, torch.Tensor]:
state_dict = state_dict.get('model', state_dict)
# out_dict = {}
# import re
# replace_rules = [
# (re.compile(r'^blocks1\.'), 'stages.0.blocks.'),
# (re.compile(r'^blocks2\.'), 'stages.1.blocks.'),
# (re.compile(r'^blocks3\.'), 'stages.2.blocks.'),
# ]
# downsample_mapping = {}
# for i in range(1, 3):
# downsample_mapping[f'^stages\\.{i}\\.blocks\\.0\\.0\\.'] = f'stages.{i}.downsample.0.'
# downsample_mapping[f'^stages\\.{i}\\.blocks\\.0\\.1\\.'] = f'stages.{i}.downsample.1.'
# downsample_mapping[f'^stages\\.{i}\\.blocks\\.1\\.'] = f'stages.{i}.downsample.2.'
# downsample_mapping[f'^stages\\.{i}\\.blocks\\.2\\.0\\.'] = f'stages.{i}.downsample.3.'
# downsample_mapping[f'^stages\\.{i}\\.blocks\\.2\\.1\\.'] = f'stages.{i}.downsample.4.'
# for j in range(3, 10):
# downsample_mapping[f'^stages\\.{i}\\.blocks\\.{j}\\.'] = f'stages.{i}.blocks.{j - 3}.'
#
# downsample_patterns = [
# (re.compile(pattern), replacement) for pattern, replacement in downsample_mapping.items()]
#
# for k, v in state_dict.items():
# for pattern, replacement in replace_rules:
# k = pattern.sub(replacement, k)
# for pattern, replacement in downsample_patterns:
# k = pattern.sub(replacement, k)
# out_dict[k] = v
return state_dict
def _cfg(url: str = '', **kwargs: Any) -> Dict[str, Any]:
return {
'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (4, 4),
'crop_pct': 0.875, 'interpolation': 'bicubic',
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'patch_embed.0.c', 'classifier': 'head.l',
'paper_ids': 'arXiv:2401.16456',
'paper_name': 'SHViT: Single-Head Vision Transformer with Memory Efficient Macro Design',
'origin_url': 'https://github.com/ysj9909/SHViT',
**kwargs
}
default_cfgs = generate_default_cfgs({
'shvit_s1.in1k': _cfg(
hf_hub_id='timm/',
#url='https://github.com/ysj9909/SHViT/releases/download/v1.0/shvit_s1.pth',
),
'shvit_s2.in1k': _cfg(
hf_hub_id='timm/',
#url='https://github.com/ysj9909/SHViT/releases/download/v1.0/shvit_s2.pth',
),
'shvit_s3.in1k': _cfg(
hf_hub_id='timm/',
#url='https://github.com/ysj9909/SHViT/releases/download/v1.0/shvit_s3.pth',
),
'shvit_s4.in1k': _cfg(
hf_hub_id='timm/',
#url='https://github.com/ysj9909/SHViT/releases/download/v1.0/shvit_s4.pth',
input_size=(3, 256, 256),
),
})
def _create_shvit(variant: str, pretrained: bool = False, **kwargs: Any) -> SHViT:
model = build_model_with_cfg(
SHViT, variant, pretrained,
pretrained_filter_fn=checkpoint_filter_fn,
feature_cfg=dict(out_indices=(0, 1, 2), flatten_sequential=True),
**kwargs,
)
return model
@register_model
def shvit_s1(pretrained: bool = False, **kwargs: Any) -> SHViT:
model_args = dict(
embed_dim=(128, 224, 320), depth=(2, 4, 5), partial_dim=(32, 48, 68), types=("i", "s", "s"))
return _create_shvit('shvit_s1', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def shvit_s2(pretrained: bool = False, **kwargs: Any) -> SHViT:
model_args = dict(
embed_dim=(128, 308, 448), depth=(2, 4, 5), partial_dim=(32, 66, 96), types=("i", "s", "s"))
return _create_shvit('shvit_s2', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def shvit_s3(pretrained: bool = False, **kwargs: Any) -> SHViT:
model_args = dict(
embed_dim=(192, 352, 448), depth=(3, 5, 5), partial_dim=(48, 75, 96), types=("i", "s", "s"))
return _create_shvit('shvit_s3', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def shvit_s4(pretrained: bool = False, **kwargs: Any) -> SHViT:
model_args = dict(
embed_dim=(224, 336, 448), depth=(4, 7, 6), partial_dim=(48, 72, 96), types=("i", "s", "s"))
return _create_shvit('shvit_s4', pretrained=pretrained, **dict(model_args, **kwargs))
| pytorch-image-models/timm/models/shvit.py/0 | {
"file_path": "pytorch-image-models/timm/models/shvit.py",
"repo_id": "pytorch-image-models",
"token_count": 9449
} | 249 |
""" Vision Transformer (ViT) in PyTorch
A PyTorch implement of Vision Transformers as described in:
'Exploring Plain Vision Transformer Backbones for Object Detection'
- https://arxiv.org/abs/2203.16527
'Segment Anything Model (SAM)'
- https://github.com/facebookresearch/segment-anything/
"""
import logging
from functools import partial
from typing import Callable, List, Optional, Tuple, Union
import torch
import torch.nn as nn
import torch.nn.functional as F
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD
from timm.layers import PatchEmbed, Mlp, DropPath, PatchDropout, LayerNorm2d, ClassifierHead, NormMlpClassifierHead, \
Format, resample_abs_pos_embed_nhwc, RotaryEmbeddingCat, apply_rot_embed_cat, to_2tuple, use_fused_attn
from torch.jit import Final
from ._builder import build_model_with_cfg
from ._features import feature_take_indices
from ._features_fx import register_notrace_function
from ._manipulate import checkpoint, checkpoint_seq
from ._registry import generate_default_cfgs, register_model
# model_registry will add each entrypoint fn to this
__all__ = ['VisionTransformerSAM']
_logger = logging.getLogger(__name__)
def get_rel_pos(q_size: int, k_size: int, rel_pos: torch.Tensor) -> torch.Tensor:
"""
Get relative positional embeddings according to the relative positions of
query and key sizes.
Args:
q_size (int): size of query q.
k_size (int): size of key k.
rel_pos (Tensor): relative position embeddings (L, C).
Returns:
Extracted positional embeddings according to relative positions.
"""
max_rel_dist = int(2 * max(q_size, k_size) - 1)
# Interpolate rel pos if needed.
if rel_pos.shape[0] != max_rel_dist:
# Interpolate rel pos.
rel_pos_resized = F.interpolate(
rel_pos.reshape(1, rel_pos.shape[0], -1).permute(0, 2, 1),
size=max_rel_dist,
mode="linear",
)
rel_pos_resized = rel_pos_resized.reshape(-1, max_rel_dist).permute(1, 0)
else:
rel_pos_resized = rel_pos
# Scale the coords with short length if shapes for q and k are different.
q_coords = torch.arange(q_size)[:, None] * max(k_size / q_size, 1.0)
k_coords = torch.arange(k_size)[None, :] * max(q_size / k_size, 1.0)
relative_coords = (q_coords - k_coords) + (k_size - 1) * max(q_size / k_size, 1.0)
return rel_pos_resized[relative_coords.long()]
register_notrace_function(get_rel_pos)
def get_decomposed_rel_pos_bias(
q: torch.Tensor,
rel_pos_h: torch.Tensor,
rel_pos_w: torch.Tensor,
q_size: Tuple[int, int],
k_size: Tuple[int, int],
) -> torch.Tensor:
"""
Calculate decomposed Relative Positional Embeddings from :paper:`mvitv2`.
https://github.com/facebookresearch/mvit/blob/19786631e330df9f3622e5402b4a419a263a2c80/mvit/models/attention.py
Args:
q (Tensor): query q in the attention layer with shape (B, q_h * q_w, C).
rel_pos_h (Tensor): relative position embeddings (Lh, C) for height axis.
rel_pos_w (Tensor): relative position embeddings (Lw, C) for width axis.
q_size (Tuple): spatial sequence size of query q with (q_h, q_w).
k_size (Tuple): spatial sequence size of key k with (k_h, k_w).
Returns:
bias (Tensor): attention bias to add to attention map
"""
q_h, q_w = q_size
k_h, k_w = k_size
Rh = get_rel_pos(q_h, k_h, rel_pos_h)
Rw = get_rel_pos(q_w, k_w, rel_pos_w)
B, _, dim = q.shape
r_q = q.reshape(B, q_h, q_w, dim)
rel_h = torch.einsum("bhwc,hkc->bhwk", r_q, Rh)
rel_w = torch.einsum("bhwc,wkc->bhwk", r_q, Rw)
attn_bias = rel_h[:, :, :, :, None] + rel_w[:, :, :, None, :]
return attn_bias.reshape(-1, q_h * q_w, k_h * k_w)
class Attention(nn.Module):
fused_attn: Final[bool]
def __init__(
self,
dim,
num_heads=8,
qkv_bias=True,
qk_norm=False,
attn_drop=0.,
proj_drop=0.,
norm_layer=nn.LayerNorm,
use_rel_pos: bool = False,
input_size: Optional[Tuple[int, int]] = None,
rope: Optional[nn.Module] = None,
):
super().__init__()
assert dim % num_heads == 0, 'dim should be divisible by num_heads'
self.num_heads = num_heads
self.head_dim = dim // num_heads
self.scale = self.head_dim ** -0.5
self.fused_attn = use_fused_attn()
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.q_norm = norm_layer(self.head_dim) if qk_norm else nn.Identity()
self.k_norm = norm_layer(self.head_dim) if qk_norm else nn.Identity()
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
self.use_rel_pos = use_rel_pos
if self.use_rel_pos:
assert rope is None
assert (
input_size is not None
), "Input size must be provided if using relative positional encoding."
# initialize relative positional embeddings
self.rel_pos_h = nn.Parameter(torch.zeros(
2 * input_size[0] - 1, self.head_dim))
self.rel_pos_w = nn.Parameter(torch.zeros(
2 * input_size[1] - 1, self.head_dim))
self.rope = rope
def forward(self, x):
B, H, W, _ = x.shape
N = H * W
x = x.reshape(B, N, -1)
qkv = self.qkv(x).view(B, N, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4)
# qkv with shape (3, B, nHead, H * W, C)
q, k, v = qkv.reshape(3, B * self.num_heads, N, -1).unbind(0)
# q, k, v with shape (B * nHead, H * W, C)
q, k = self.q_norm(q), self.k_norm(k)
if self.use_rel_pos:
attn_bias = get_decomposed_rel_pos_bias(q, self.rel_pos_h, self.rel_pos_w, (H, W), (H, W))
else:
attn_bias = None
if self.rope is not None:
rope = self.rope.get_embed()
q = apply_rot_embed_cat(q, rope).type_as(v)
k = apply_rot_embed_cat(k, rope).type_as(v)
if self.fused_attn:
x = torch.nn.functional.scaled_dot_product_attention(
q, k, v,
attn_mask=attn_bias,
dropout_p=self.attn_drop.p if self.training else 0.,
)
else:
q = q * self.scale
attn = q @ k.transpose(-2, -1)
if attn_bias is not None:
attn = attn + attn_bias
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = attn @ v
x = x.view(B, self.num_heads, N, -1).transpose(1, 2).reshape(B, N, -1)
x = self.proj(x)
x = self.proj_drop(x)
x = x.view(B, H, W, -1)
return x
class LayerScale(nn.Module):
def __init__(self, dim, init_values=1e-5, inplace=False):
super().__init__()
self.inplace = inplace
self.gamma = nn.Parameter(init_values * torch.ones(dim))
def forward(self, x):
return x.mul_(self.gamma) if self.inplace else x * self.gamma
class Block(nn.Module):
def __init__(
self,
dim,
num_heads,
mlp_ratio=4.,
qkv_bias=True,
qk_norm=False,
proj_drop=0.,
attn_drop=0.,
init_values=None,
drop_path=0.,
act_layer=nn.GELU,
norm_layer=nn.LayerNorm,
mlp_layer=Mlp,
use_rel_pos=False,
window_size=0,
input_size=None,
rope=None,
):
super().__init__()
self.window_size = window_size
self.norm1 = norm_layer(dim)
self.attn = Attention(
dim,
num_heads=num_heads,
qkv_bias=qkv_bias,
qk_norm=qk_norm,
attn_drop=attn_drop,
proj_drop=proj_drop,
norm_layer=norm_layer,
use_rel_pos=use_rel_pos,
input_size=input_size if window_size == 0 else (window_size, window_size),
rope=rope,
)
self.ls1 = LayerScale(dim, init_values=init_values) if init_values else nn.Identity()
self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
self.mlp = mlp_layer(
in_features=dim,
hidden_features=int(dim * mlp_ratio),
act_layer=act_layer,
drop=proj_drop,
)
self.ls2 = LayerScale(dim, init_values=init_values) if init_values else nn.Identity()
self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
def forward(self, x):
B, H, W, _ = x.shape
shortcut = x
x = self.norm1(x)
# Window partition
pad_hw: Optional[Tuple[int, int]] = None
if self.window_size > 0:
x, pad_hw = window_partition(x, self.window_size)
x = self.drop_path1(self.ls1(self.attn(x)))
# Reverse window partition
if self.window_size > 0:
x = window_unpartition(x, self.window_size, (H, W), pad_hw)
x = shortcut + x
x = x.reshape(B, H * W, -1) # MLP is faster for N, L, C tensor
x = x + self.drop_path2(self.ls2(self.mlp(self.norm2(x))))
x = x.reshape(B, H, W, -1)
return x
def window_partition(x: torch.Tensor, window_size: int) -> Tuple[torch.Tensor, Tuple[int, int]]:
"""
Partition into non-overlapping windows with padding if needed.
Args:
x (tensor): input tokens with [B, H, W, C].
window_size (int): window size.
Returns:
windows: windows after partition with [B * num_windows, window_size, window_size, C].
(Hp, Wp): padded height and width before partition
"""
B, H, W, C = x.shape
pad_h = (window_size - H % window_size) % window_size
pad_w = (window_size - W % window_size) % window_size
x = F.pad(x, (0, 0, 0, pad_w, 0, pad_h))
Hp, Wp = H + pad_h, W + pad_w
x = x.view(B, Hp // window_size, window_size, Wp // window_size, window_size, C)
windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C)
return windows, (Hp, Wp)
def window_unpartition(
windows: torch.Tensor, window_size: int, hw: Tuple[int, int], pad_hw: Optional[Tuple[int, int]] = None,
) -> torch.Tensor:
"""
Window unpartition into original sequences and removing padding.
Args:
windows (tensor): input tokens with [B * num_windows, window_size, window_size, C].
window_size (int): window size.
pad_hw (Tuple): padded height and width (Hp, Wp).
hw (Tuple): original height and width (H, W) before padding.
Returns:
x: unpartitioned sequences with [B, H, W, C].
"""
Hp, Wp = pad_hw if pad_hw is not None else hw
H, W = hw
B = windows.shape[0] // (Hp * Wp // window_size // window_size)
x = windows.view(B, Hp // window_size, Wp // window_size, window_size, window_size, -1)
x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, Hp, Wp, -1)
x = x[:, :H, :W, :].contiguous()
return x
class VisionTransformerSAM(nn.Module):
""" Vision Transformer for Segment-Anything Model(SAM)
A PyTorch impl of : `Exploring Plain Vision Transformer Backbones for Object Detection` or `Segment Anything Model (SAM)`
- https://arxiv.org/abs/2010.11929
"""
def __init__(
self,
img_size: int = 1024,
patch_size: int = 16,
in_chans: int = 3,
num_classes: int = 768,
embed_dim: int = 768,
depth: int = 12,
num_heads: int = 12,
mlp_ratio: float = 4.,
qkv_bias: bool = True,
qk_norm: bool = False,
init_values: Optional[float] = None,
pre_norm: bool = False,
drop_rate: float = 0.,
pos_drop_rate: float = 0.,
patch_drop_rate: float = 0.,
proj_drop_rate: float = 0.,
attn_drop_rate: float = 0.,
drop_path_rate: float = 0.,
weight_init: str = '',
embed_layer: Callable = partial(PatchEmbed, output_fmt=Format.NHWC, strict_img_size=False),
norm_layer: Optional[Callable] = nn.LayerNorm,
act_layer: Optional[Callable] = nn.GELU,
block_fn: Callable = Block,
mlp_layer: Callable = Mlp,
use_abs_pos: bool = True,
use_rel_pos: bool = False,
use_rope: bool = False,
window_size: int = 14,
global_attn_indexes: Tuple[int, ...] = (),
neck_chans: int = 256,
global_pool: str = 'avg',
head_hidden_size: Optional[int] = None,
ref_feat_shape: Optional[Tuple[Tuple[int, int], Tuple[int, int]]] = None
):
"""
Args:
img_size: Input image size.
patch_size: Patch size.
in_chans: Number of image input channels.
num_classes: Number of classes for classification head.
global_pool: Type of global pooling for final sequence (default: 'token').
embed_dim: Transformer embedding dimension.
depth: Depth of transformer.
num_heads: Number of attention heads.
mlp_ratio: Ratio of mlp hidden dim to embedding dim.
qkv_bias: Enable bias for qkv projections if True.
init_values: Layer-scale init values (layer-scale enabled if not None).
drop_rate: Head dropout rate.
pos_drop_rate: Position embedding dropout rate.
attn_drop_rate: Attention dropout rate.
drop_path_rate: Stochastic depth rate.
weight_init: Weight initialization scheme.
embed_layer: Patch embedding layer.
norm_layer: Normalization layer.
act_layer: MLP activation layer.
block_fn: Transformer block layer.
use_abs_pos: If True, use absolute positional embeddings.
use_rel_pos: If True, add relative positional embeddings to the attention map.
use_rope: If True, add rotary position embeddings to q/k in attention block.
window_size: Window size for window attention blocks. If 0, not use window attention.
global_attn_indexes: Indexes for blocks using global attention. Used when window_size > 0.
global_pool: Global pooling type.
head_hidden_size: If set, use NormMlpHead
ref_feat_shape: Tuple of reference feature shapes for ROPE, (global, local)
"""
super().__init__()
norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6)
act_layer = act_layer or nn.GELU
self.num_classes = num_classes
self.global_pool = global_pool
self.num_features = self.head_hidden_size = self.embed_dim = embed_dim # for consistency with other models
self.grad_checkpointing = False
self.patch_embed = embed_layer(
img_size=img_size,
patch_size=patch_size,
in_chans=in_chans,
embed_dim=embed_dim,
bias=not pre_norm, # disable bias if pre-norm is used
)
grid_size = self.patch_embed.grid_size
r = self.patch_embed.feat_ratio() if hasattr(self.patch_embed, 'feat_ratio') else patch_size
if use_abs_pos:
# Initialize absolute positional embedding with pretrain image size.
self.pos_embed = nn.Parameter(torch.zeros(1, grid_size[0], grid_size[1], embed_dim))
else:
self.pos_embed = None
self.pos_drop = nn.Dropout(p=pos_drop_rate)
if patch_drop_rate > 0:
self.patch_drop = PatchDropout(
patch_drop_rate,
num_prefix_tokens=0,
)
else:
self.patch_drop = nn.Identity()
self.norm_pre = norm_layer(embed_dim) if pre_norm else nn.Identity()
if use_rope:
assert not use_rel_pos, "ROPE and relative pos embeddings should not be enabled at same time"
if ref_feat_shape is not None:
assert len(ref_feat_shape) == 2
ref_feat_shape_global = to_2tuple(ref_feat_shape[0])
ref_feat_shape_window = to_2tuple(ref_feat_shape[1])
else:
ref_feat_shape_global = ref_feat_shape_window = None
self.rope_global = RotaryEmbeddingCat(
embed_dim // num_heads,
in_pixels=False,
feat_shape=grid_size,
ref_feat_shape=ref_feat_shape_global,
)
self.rope_window = RotaryEmbeddingCat(
embed_dim // num_heads,
in_pixels=False,
feat_shape=to_2tuple(window_size),
ref_feat_shape=ref_feat_shape_window,
)
else:
self.rope_global = None
self.rope_window = None
# stochastic depth decay rule
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)]
self.blocks = nn.Sequential(*[
block_fn(
dim=embed_dim,
num_heads=num_heads,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
qk_norm=qk_norm,
init_values=init_values,
proj_drop=proj_drop_rate,
attn_drop=attn_drop_rate,
drop_path=dpr[i],
norm_layer=norm_layer,
act_layer=act_layer,
mlp_layer=mlp_layer,
use_rel_pos=use_rel_pos,
window_size=window_size if i not in global_attn_indexes else 0,
input_size=grid_size,
rope=self.rope_window if i not in global_attn_indexes else self.rope_global,
)
for i in range(depth)])
self.feature_info = [
dict(module=f'blocks.{i}', num_chs=embed_dim, reduction=r) for i in range(depth)]
if neck_chans:
self.neck = nn.Sequential(
nn.Conv2d(
embed_dim,
neck_chans,
kernel_size=1,
bias=False,
),
LayerNorm2d(neck_chans),
nn.Conv2d(
neck_chans,
neck_chans,
kernel_size=3,
padding=1,
bias=False,
),
LayerNorm2d(neck_chans),
)
self.num_features = neck_chans
else:
if head_hidden_size:
self.neck = nn.Identity()
else:
# should have a final norm with standard ClassifierHead
self.neck = LayerNorm2d(embed_dim)
neck_chans = embed_dim
# Classifier Head
if head_hidden_size:
self.head = NormMlpClassifierHead(
neck_chans,
num_classes,
hidden_size=head_hidden_size,
pool_type=global_pool,
drop_rate=drop_rate,
)
else:
self.head = ClassifierHead(
neck_chans,
num_classes,
pool_type=global_pool,
drop_rate=drop_rate,
)
@torch.jit.ignore
def no_weight_decay(self):
return {'pos_embed', 'dist_token'}
@torch.jit.ignore
def group_matcher(self, coarse=False):
return dict(
stem=r'^pos_embed|patch_embed', # stem and embed
blocks=[(r'^blocks\.(\d+)', None), (r'^norm', (99999,))]
)
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
self.grad_checkpointing = enable
@torch.jit.ignore
def get_classifier(self) -> nn.Module:
return self.head
def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None):
self.num_classes = num_classes
self.head.reset(num_classes, global_pool)
def forward_intermediates(
self,
x: torch.Tensor,
indices: Optional[Union[int, List[int]]] = None,
norm: bool = False,
stop_early: bool = False,
output_fmt: str = 'NCHW',
intermediates_only: bool = False,
) -> Union[List[torch.Tensor], Tuple[torch.Tensor, List[torch.Tensor]]]:
""" Forward features that returns intermediates.
Args:
x: Input image tensor
indices: Take last n blocks if int, all if None, select matching indices if sequence
norm: Apply norm layer to all intermediates
stop_early: Stop iterating over blocks when last desired intermediate hit
output_fmt: Shape of intermediate feature outputs
intermediates_only: Only return intermediate features
Returns:
"""
assert output_fmt == 'NCHW', 'Output shape for ViT-SAM must be NCHW.'
intermediates = []
take_indices, max_index = feature_take_indices(len(self.blocks), indices)
# forward pass, collect intermediates
x = self.patch_embed(x)
if self.pos_embed is not None:
# dynamically resize abs pos embedding if needed
x = x + resample_abs_pos_embed_nhwc(self.pos_embed, x.shape[1:3])
x = self.pos_drop(x)
x = self.patch_drop(x)
x = self.norm_pre(x)
if torch.jit.is_scripting() or not stop_early: # can't slice blocks in torchscript
blocks = self.blocks
else:
blocks = self.blocks[:max_index + 1]
for i, blk in enumerate(blocks):
if self.grad_checkpointing and not torch.jit.is_scripting():
x = checkpoint(blk, x)
else:
x = blk(x)
if i in take_indices:
# make output BCHW
if norm:
# norm is intertwined with neck convs so apply both, changes the dim
# FIXME only apply to final? Need experiments
intermediates.append(self.neck(x.permute(0, 3, 1, 2)))
else:
intermediates.append(x.permute(0, 3, 1, 2))
if intermediates_only:
return intermediates
x = self.neck(x.permute(0, 3, 1, 2))
return x, intermediates
def prune_intermediate_layers(
self,
indices: Optional[Union[int, List[int]]] = None,
prune_norm: bool = False,
prune_head: bool = True,
):
""" Prune layers not required for specified intermediates.
"""
take_indices, max_index = feature_take_indices(len(self.blocks), indices)
self.blocks = self.blocks[:max_index + 1] # truncate blocks
if prune_norm:
# neck is being treated as equivalent to final norm here
self.neck = nn.Identity()
if prune_head:
self.reset_classifier(0, '')
return take_indices
def forward_features(self, x):
x = self.patch_embed(x)
if self.pos_embed is not None:
# dynamically resize abs pos embedding if needed
x = x + resample_abs_pos_embed_nhwc(self.pos_embed, x.shape[1:3])
x = self.pos_drop(x)
x = self.patch_drop(x)
x = self.norm_pre(x)
if self.grad_checkpointing and not torch.jit.is_scripting():
x = checkpoint_seq(self.blocks, x)
else:
x = self.blocks(x)
x = self.neck(x.permute(0, 3, 1, 2))
return x
def forward_head(self, x, pre_logits: bool = False):
return self.head(x, pre_logits=True) if pre_logits else self.head(x)
def forward(self, x):
x = self.forward_features(x)
x = self.forward_head(x)
return x
def checkpoint_filter_fn(
state_dict,
model,
):
""" Remap SAM checkpoints -> timm """
sam_checkpoint = 'image_encoder.patch_embed.proj.weight' in state_dict
out_dict = {}
for k, v in state_dict.items():
if k.startswith('image_encoder.'):
k = k[14:]
k = k.replace('mlp.lin', 'mlp.fc')
else:
if sam_checkpoint:
continue
out_dict[k] = v
return out_dict
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 1024, 1024), 'pool_size': None,
'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True,
'mean': IMAGENET_INCEPTION_MEAN, 'std': IMAGENET_INCEPTION_STD,
'first_conv': 'patch_embed.proj', 'classifier': 'head.fc',
**kwargs
}
default_cfgs = generate_default_cfgs({
# Segment-Anything Model (SAM) pretrained - https://github.com/facebookresearch/segment-anything (no classifier head, for fine-tune/features only)
'samvit_base_patch16.sa1b': _cfg(
url='https://dl.fbaipublicfiles.com/segment_anything/sam_vit_b_01ec64.pth',
hf_hub_id='timm/',
license='apache-2.0',
mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0,
input_size=(3, 1024, 1024), crop_pct=1.0),
'samvit_large_patch16.sa1b': _cfg(
url='https://dl.fbaipublicfiles.com/segment_anything/sam_vit_l_0b3195.pth',
hf_hub_id='timm/',
license='apache-2.0',
mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0,
input_size=(3, 1024, 1024), crop_pct=1.0),
'samvit_huge_patch16.sa1b': _cfg(
url='https://dl.fbaipublicfiles.com/segment_anything/sam_vit_h_4b8939.pth',
hf_hub_id='timm/',
license='apache-2.0',
mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0,
input_size=(3, 1024, 1024), crop_pct=1.0),
'samvit_base_patch16_224': _cfg(
mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=1000,
input_size=(3, 224, 224), crop_pct=0.9),
})
def _create_vision_transformer(variant, pretrained=False, **kwargs):
out_indices = kwargs.pop('out_indices', 3)
return build_model_with_cfg(
VisionTransformerSAM,
variant,
pretrained,
pretrained_filter_fn=checkpoint_filter_fn,
feature_cfg=dict(out_indices=out_indices, feature_cls='getter'),
**kwargs,
)
@register_model
def samvit_base_patch16(pretrained=False, **kwargs) -> VisionTransformerSAM:
""" ViT-B/16 for Segment-Anything
"""
model_args = dict(
patch_size=16, embed_dim=768, depth=12, num_heads=12, global_attn_indexes=[2, 5, 8, 11],
window_size=14, use_rel_pos=True, img_size=1024,
)
model = _create_vision_transformer(
'samvit_base_patch16', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def samvit_large_patch16(pretrained=False, **kwargs) -> VisionTransformerSAM:
""" ViT-L/16 for Segment-Anything
"""
model_args = dict(
patch_size=16, embed_dim=1024, depth=24, num_heads=16, global_attn_indexes=[5, 11, 17, 23],
window_size=14, use_rel_pos=True, img_size=1024,
)
model = _create_vision_transformer(
'samvit_large_patch16', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def samvit_huge_patch16(pretrained=False, **kwargs) -> VisionTransformerSAM:
""" ViT-H/16 for Segment-Anything
"""
model_args = dict(
patch_size=16, embed_dim=1280, depth=32, num_heads=16, global_attn_indexes=[7, 15, 23, 31],
window_size=14, use_rel_pos=True, img_size=1024,
)
model = _create_vision_transformer(
'samvit_huge_patch16', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def samvit_base_patch16_224(pretrained=False, **kwargs) -> VisionTransformerSAM:
""" ViT-B/16 based on samvit arch
"""
model_args = dict(
patch_size=16, embed_dim=768, depth=12, num_heads=12, global_attn_indexes=[2, 5, 8, 11],
window_size=14, use_rel_pos=True, use_abs_pos=False, img_size=224, neck_chans=None,
)
model = _create_vision_transformer(
'samvit_base_patch16_224', pretrained=pretrained, **dict(model_args, **kwargs))
return model
| pytorch-image-models/timm/models/vision_transformer_sam.py/0 | {
"file_path": "pytorch-image-models/timm/models/vision_transformer_sam.py",
"repo_id": "pytorch-image-models",
"token_count": 13996
} | 250 |
""" AdamW Optimizer
Impl copied from PyTorch master
References for added functionality:
Cautious Optimizers: https://arxiv.org/abs/2411.16085
Why Gradients Rapidly Increase Near the End of Training: https://arxiv.org/abs/2506.02285
NOTE: This impl has been deprecated in favour of torch.optim.AdamW and remains as a reference
"""
import math
from typing import List, Optional, Tuple
import torch
from torch import Tensor
from torch.optim.optimizer import Optimizer
from ._types import ParamsT
class AdamWLegacy(Optimizer):
r"""Implements AdamW algorithm.
NOTE: This impl has been deprecated in favour of torch.optim.AdamW and remains as a reference
References:
- Adam: A Method for Stochastic Optimization: https://arxiv.org/abs/1412.6980
- Decoupled Weight Decay Regularization: https://arxiv.org/abs/1711.05101
- On the Convergence of Adam and Beyond: https://openreview.net/forum?id=ryQu7f-RZ
Args:
params: iterable of parameters to optimize or dicts defining parameter groups
lr: learning rate
betas: coefficients used for computing running averages of gradient and its square
eps: term added to the denominator to improve numerical stability
weight_decay: weight decay coefficient
amsgrad: whether to use the AMSGrad variant of this algorithm
from the paper `On the Convergence of Adam and Beyond`
caution: apply caution when using AdamW
corrected_weight_decay: apply corrected weight decay (lr**2 / max_lr)
maximize: maximize the params based on the objective, instead of minimizing
foreach: whether foreach implementation of optimizer is used.
If unspecified by the user (so foreach is None), we will try to use
foreach over for-loop implementation on CUDA, since it is faster in general.
capturable: whether this instance is safe to capture in a CUDA graph.
Passing True can impair ungraphed performance, so if you don't intend to
graph capture this instance, leave it False
"""
def __init__(
self,
params: ParamsT,
lr: float = 1e-3,
betas: Tuple[float, float] = (0.9, 0.999),
eps: float = 1e-8,
weight_decay: float = 1e-2,
amsgrad: bool = False,
caution: bool = False,
corrected_weight_decay: bool = False,
maximize: bool = False,
foreach: Optional[bool] = None,
capturable: bool = False,
):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
defaults = dict(
lr=lr,
betas=betas,
eps=eps,
weight_decay=weight_decay,
amsgrad=amsgrad,
caution=caution,
corrected_weight_decay=corrected_weight_decay,
foreach=foreach,
maximize=maximize,
capturable=capturable,
)
super(AdamWLegacy, self).__init__(params, defaults)
def __setstate__(self, state):
super(AdamWLegacy, self).__setstate__(state)
state_values = list(self.state.values())
step_is_tensor = (len(state_values) != 0) and torch.is_tensor(state_values[0]['step'])
if not step_is_tensor:
for s in state_values:
s['step'] = torch.tensor(float(s['step']))
for group in self.param_groups:
group.setdefault('amsgrad', False)
group.setdefault('caution', False)
group.setdefault('corrected_weight_decay', False)
group.setdefault('foreach', None)
group.setdefault('maximize', False)
group.setdefault('capturable', False)
@torch.no_grad()
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
self._cuda_graph_capture_health_check()
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
params_with_grad = []
grads = []
exp_avgs = []
exp_avg_sqs = []
max_exp_avg_sqs = []
state_steps = []
beta1, beta2 = group['betas']
amsgrad = group['amsgrad']
for p in group['params']:
if p.grad is None:
continue
params_with_grad.append(p)
if p.grad.is_sparse:
raise RuntimeError('AdamW does not support sparse gradients')
grads.append(p.grad)
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = torch.tensor(0.)
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format)
if amsgrad:
# Maintains max of all exp. moving avg. of sq. grad. values
state['max_exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format)
exp_avgs.append(state['exp_avg'])
exp_avg_sqs.append(state['exp_avg_sq'])
if amsgrad:
max_exp_avg_sqs.append(state.get('max_exp_avg_sq', None))
state_steps.append(state['step'])
adamw(
params_with_grad,
grads,
exp_avgs,
exp_avg_sqs,
max_exp_avg_sqs,
state_steps,
foreach=group['foreach'],
amsgrad=amsgrad,
beta1=beta1,
beta2=beta2,
lr=group['lr'],
weight_decay=group['weight_decay'],
eps=group['eps'],
caution=group['caution'],
maximize=group['maximize'],
capturable=group['capturable'],
max_lr=self.defaults['lr'] if group['corrected_weight_decay'] else None,
)
return loss
def adamw(
params: List[Tensor],
grads: List[Tensor],
exp_avgs: List[Tensor],
exp_avg_sqs: List[Tensor],
max_exp_avg_sqs: List[Tensor],
state_steps: List[Tensor],
foreach: Optional[bool] = None,
capturable: bool = False,
*,
amsgrad: bool,
beta1: float,
beta2: float,
lr: float,
weight_decay: float,
eps: float,
caution: bool,
maximize: bool,
max_lr: Optional[float],
) -> None:
r"""Functional API that performs AdamW algorithm computation.
See AdamWLegacy class for details.
"""
if not all(isinstance(t, torch.Tensor) for t in state_steps):
raise RuntimeError(
'API has changed, `state_steps` argument must contain a list of' +
' singleton tensors')
if foreach is None:
try:
# cannot do foreach if this overload doesn't exist when caution enabled
foreach = not caution or 'Scalar' in torch.ops.aten._foreach_maximum_.overloads()
except:
foreach = False
if foreach and not torch.jit.is_scripting():
func = _multi_tensor_adamw
else:
func = _single_tensor_adamw
func(
params,
grads,
exp_avgs,
exp_avg_sqs,
max_exp_avg_sqs,
state_steps,
amsgrad=amsgrad,
beta1=beta1,
beta2=beta2,
lr=lr,
weight_decay=weight_decay,
eps=eps,
caution=caution,
maximize=maximize,
capturable=capturable,
max_lr=max_lr,
)
def _single_tensor_adamw(
params: List[Tensor],
grads: List[Tensor],
exp_avgs: List[Tensor],
exp_avg_sqs: List[Tensor],
max_exp_avg_sqs: List[Tensor],
state_steps: List[Tensor],
*,
amsgrad: bool,
beta1: float,
beta2: float,
lr: float,
weight_decay: float,
eps: float,
caution: bool,
maximize: bool,
capturable: bool,
max_lr: Optional[float],
):
for i, param in enumerate(params):
grad = grads[i] if not maximize else -grads[i]
exp_avg = exp_avgs[i]
exp_avg_sq = exp_avg_sqs[i]
step_t = state_steps[i]
# Update step.
step_t += 1
# Perform stepweight decay.
wd_scale = lr if max_lr is None else lr ** 2 / max_lr
param.mul_(1. - wd_scale * weight_decay)
# Decay the first and second moment running average coefficient.
exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1)
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2)
if amsgrad:
max_exp_avg_sq = max_exp_avg_sqs[i]
# Maintains the maximum of all 2nd moment running avg. till now
torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq)
denom_base = max_exp_avg_sq
else:
denom_base = exp_avg_sq
if capturable:
step = step_t
# 1 - beta1 ** step can't be captured in a CUDA graph, even if step is a CUDA tensor
# (incurs "RuntimeError: CUDA error: operation not permitted when stream is capturing")
bias_correction1 = 1 - torch.pow(beta1, step)
bias_correction2 = 1 - torch.pow(beta2, step)
step_size = lr / bias_correction1
step_size_neg = step_size.neg()
bias_correction2_sqrt = bias_correction2.sqrt()
denom = (denom_base.sqrt() / (bias_correction2_sqrt * step_size_neg)).add_(eps / step_size_neg)
if caution:
# Apply caution as per 'Cautious Optimizers' - https://arxiv.org/abs/2411.16085
# FIXME not 100% sure if this remains capturable?
mask = (exp_avg * grad > 0).to(grad.dtype)
mask.div_(mask.mean().clamp_(min=1e-3))
exp_avg = exp_avg * mask
param.addcdiv_(exp_avg, denom)
else:
step = step_t.item()
bias_correction1 = 1 - beta1 ** step
bias_correction2 = 1 - beta2 ** step
step_size = lr / bias_correction1
bias_correction2_sqrt = math.sqrt(bias_correction2)
denom = (denom_base.sqrt() / bias_correction2_sqrt).add_(eps)
if caution:
# Apply caution as per 'Cautious Optimizers' - https://arxiv.org/abs/2411.16085
mask = (exp_avg * grad > 0).to(grad.dtype)
mask.div_(mask.mean().clamp_(min=1e-3))
exp_avg = exp_avg * mask
param.addcdiv_(exp_avg, denom, value=-step_size)
def _multi_tensor_adamw(
params: List[Tensor],
grads: List[Tensor],
exp_avgs: List[Tensor],
exp_avg_sqs: List[Tensor],
max_exp_avg_sqs: List[Tensor],
state_steps: List[Tensor],
*,
amsgrad: bool,
beta1: float,
beta2: float,
lr: float,
weight_decay: float,
eps: float,
caution: bool,
maximize: bool,
capturable: bool,
max_lr: Optional[float],
):
if len(params) == 0:
return
if capturable:
assert all(
p.is_cuda and step.is_cuda for p, step in zip(params, state_steps)
), "If capturable=True, params and state_steps must be CUDA tensors."
if maximize:
grads = torch._foreach_neg(tuple(grads)) # type: ignore[assignment]
grads = [torch.view_as_real(x) if torch.is_complex(x) else x for x in grads]
exp_avgs = [torch.view_as_real(x) if torch.is_complex(x) else x for x in exp_avgs]
exp_avg_sqs = [torch.view_as_real(x) if torch.is_complex(x) else x for x in exp_avg_sqs]
params = [torch.view_as_real(x) if torch.is_complex(x) else x for x in params]
# update steps
torch._foreach_add_(state_steps, 1)
# Perform stepweight decay
wd_scale = lr if max_lr is None else lr ** 2 / max_lr
torch._foreach_mul_(params, 1 - wd_scale * weight_decay)
# Decay the first and second moment running average coefficient
#torch._foreach_lerp_(exp_avgs, grads, 1 - beta1)
torch._foreach_mul_(exp_avgs, beta1)
torch._foreach_add_(exp_avgs, grads, alpha=1 - beta1)
torch._foreach_mul_(exp_avg_sqs, beta2)
torch._foreach_addcmul_(exp_avg_sqs, grads, grads, 1 - beta2)
if capturable:
# TODO: use foreach_pow if/when foreach_pow is added
bias_correction1 = [torch.pow(beta1, step) for step in state_steps]
bias_correction2 = [torch.pow(beta2, step) for step in state_steps]
# foreach_sub doesn't allow a scalar as the first arg
torch._foreach_sub_(bias_correction1, 1)
torch._foreach_sub_(bias_correction2, 1)
torch._foreach_neg_(bias_correction1)
torch._foreach_neg_(bias_correction2)
# foreach_div doesn't allow a scalar as the first arg
step_size = torch._foreach_div(bias_correction1, lr)
torch._foreach_reciprocal_(step_size)
torch._foreach_neg_(step_size)
bias_correction2_sqrt = torch._foreach_sqrt(bias_correction2)
if amsgrad:
# Maintains the maximum of all 2nd moment running avg. till now
max_exp_avg_sqs = [torch.view_as_real(x) if torch.is_complex(x) else x for x in max_exp_avg_sqs]
torch._foreach_maximum_(max_exp_avg_sqs, exp_avg_sqs)
denom_base = torch._foreach_sqrt(max_exp_avg_sqs)
else:
denom_base = torch._foreach_sqrt(exp_avg_sqs)
torch._foreach_div_(
denom_base,
torch._foreach_mul(bias_correction2_sqrt, step_size)
)
eps_over_step_size = torch._foreach_div(step_size, eps)
torch._foreach_reciprocal_(eps_over_step_size)
denom = torch._foreach_add(denom_base, eps_over_step_size)
if caution:
# Apply caution as per 'Cautious Optimizers' - https://arxiv.org/abs/2411.16085
masks = torch._foreach_mul(exp_avgs, grads)
masks = [(m > 0).to(g.dtype) for m, g in zip(masks, grads)] # capturable?
mask_scale = [m.mean() for m in masks]
torch._foreach_maximum_(mask_scale, 1e-3)
#torch._foreach_clamp_min_(mask_scale, 1e-3)
torch._foreach_div_(masks, mask_scale)
exp_avgs = torch._foreach_mul(exp_avgs, masks)
torch._foreach_addcdiv_(params, exp_avgs, denom)
else:
bias_correction1 = [1 - beta1 ** step.item() for step in state_steps]
bias_correction2 = [1 - beta2 ** step.item() for step in state_steps]
step_size = [(lr / bc) * -1 for bc in bias_correction1]
bias_correction2_sqrt = [math.sqrt(bc) for bc in bias_correction2]
if amsgrad:
# Maintains the maximum of all 2nd moment running avg. till now
max_exp_avg_sqs = [torch.view_as_real(x) if torch.is_complex(x) else x for x in max_exp_avg_sqs]
torch._foreach_maximum_(max_exp_avg_sqs, exp_avg_sqs)
denom = torch._foreach_sqrt(max_exp_avg_sqs)
else:
denom = torch._foreach_sqrt(exp_avg_sqs)
torch._foreach_div_(denom, bias_correction2_sqrt)
torch._foreach_add_(denom, eps)
if caution:
# Apply caution as per 'Cautious Optimizers' - https://arxiv.org/abs/2411.16085
masks = torch._foreach_mul(exp_avgs, grads)
masks = [(m > 0).to(g.dtype) for m, g in zip(masks, grads)]
mask_scale = [m.mean() for m in masks]
torch._foreach_maximum_(mask_scale, 1e-3)
#torch._foreach_clamp_min_(mask_scale, 1e-3)
torch._foreach_div_(masks, mask_scale)
exp_avgs = torch._foreach_mul(exp_avgs, masks)
torch._foreach_addcdiv_(params, exp_avgs, denom, step_size)
| pytorch-image-models/timm/optim/adamw.py/0 | {
"file_path": "pytorch-image-models/timm/optim/adamw.py",
"repo_id": "pytorch-image-models",
"token_count": 8144
} | 251 |
""" RMSProp modified to behave like Tensorflow impl
Originally cut & paste from PyTorch RMSProp
https://github.com/pytorch/pytorch/blob/063946d2b3f3f1e953a2a3b54e0b34f1393de295/torch/optim/rmsprop.py
Licensed under BSD-Clause 3 (ish), https://github.com/pytorch/pytorch/blob/master/LICENSE
References for added functionality:
Cautious Optimizers: https://arxiv.org/abs/2411.16085
Why Gradients Rapidly Increase Near the End of Training: https://arxiv.org/abs/2506.02285
Modifications Copyright 2021 Ross Wightman
"""
import torch
from torch.optim import Optimizer
from ._types import ParamsT
class RMSpropTF(Optimizer):
"""Implements RMSprop algorithm (TensorFlow style epsilon)
NOTE: This is a direct cut-and-paste of PyTorch RMSprop with eps applied before sqrt
and a few other modifications to closer match Tensorflow for matching hyper-params.
Noteworthy changes include:
1. Epsilon applied inside square-root
2. square_avg initialized to ones
3. LR scaling of update accumulated in momentum buffer
Proposed by G. Hinton in his
`course <http://www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf>`_.
The centered version first appears in `Generating Sequences
With Recurrent Neural Networks <https://arxiv.org/pdf/1308.0850v5.pdf>`_.
Args:
params: iterable of parameters to optimize or dicts defining parameter groups
lr: learning rate
momentum: momentum factor
alpha: smoothing (decay) constant
eps: term added to the denominator to improve numerical stability
centered: if ``True``, compute the centered RMSProp, the gradient is normalized by an estimation of its variance
weight_decay: weight decay (L2 penalty) (default: 0)
decoupled_decay: decoupled weight decay as per https://arxiv.org/abs/1711.05101
corrected_weight_decay: apply corrected weight decay (lr**2 / max_lr) when decoupled_decay is True
lr_in_momentum: learning rate scaling is included in the momentum buffer update as per defaults in Tensorflow
caution: apply caution
"""
def __init__(
self,
params: ParamsT,
lr: float = 1e-2,
alpha: float = 0.9,
eps: float = 1e-10,
weight_decay: float = 0,
momentum: float = 0.,
centered: bool = False,
decoupled_decay: bool = False,
corrected_weight_decay: bool = False,
lr_in_momentum: bool = True,
caution: bool = False,
):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= momentum:
raise ValueError("Invalid momentum value: {}".format(momentum))
if not 0.0 <= weight_decay:
raise ValueError("Invalid weight_decay value: {}".format(weight_decay))
if not 0.0 <= alpha:
raise ValueError("Invalid alpha value: {}".format(alpha))
defaults = dict(
lr=lr,
momentum=momentum,
alpha=alpha,
eps=eps,
centered=centered,
weight_decay=weight_decay,
decoupled_decay=decoupled_decay,
corrected_weight_decay=corrected_weight_decay,
lr_in_momentum=lr_in_momentum,
caution=caution,
)
super(RMSpropTF, self).__init__(params, defaults)
def __setstate__(self, state):
super(RMSpropTF, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('momentum', 0)
group.setdefault('centered', False)
group.setdefault('caution', False)
group.setdefault('corrected_weight_decay', False)
@torch.no_grad()
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad
if grad.is_sparse:
raise RuntimeError('RMSprop does not support sparse gradients')
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
state['square_avg'] = torch.ones_like(p) # PyTorch inits to zero
if group['momentum'] > 0:
state['momentum_buffer'] = torch.zeros_like(p)
if group['centered']:
state['grad_avg'] = torch.zeros_like(p)
square_avg = state['square_avg']
one_minus_alpha = 1. - group['alpha']
state['step'] += 1
if group['weight_decay'] != 0:
if group['decoupled_decay']:
if group['corrected_weight_decay']:
wd_scale = group['lr'] ** 2 / self.defaults['lr']
else:
wd_scale = group['lr']
p.mul_(1. - wd_scale * group['weight_decay'])
else:
grad = grad.add(p, alpha=group['weight_decay'])
# Tensorflow order of ops for updating squared avg
square_avg.add_(grad.pow(2) - square_avg, alpha=one_minus_alpha)
# square_avg.mul_(alpha).addcmul_(grad, grad, value=1 - alpha) # PyTorch original
if group['centered']:
grad_avg = state['grad_avg']
grad_avg.add_(grad - grad_avg, alpha=one_minus_alpha)
avg = square_avg.addcmul(grad_avg, grad_avg, value=-1).add(group['eps']).sqrt_() # eps in sqrt
# grad_avg.mul_(alpha).add_(grad, alpha=1 - alpha) # PyTorch original
else:
avg = square_avg.add(group['eps']).sqrt_() # eps moved in sqrt
if group['momentum'] > 0:
buf = state['momentum_buffer']
buf.mul_(group['momentum'])
def _apply_caution(_m, _g):
# Apply caution as per 'Cautious Optimizers' - https://arxiv.org/abs/2411.16085
mask = (_m * _g > 0).to(_g.dtype)
mask.div_(mask.mean().clamp_(min=1e-3))
return _m * mask
if group['lr_in_momentum']:
# Tensorflow accumulates the LR scaling in the momentum buffer
buf.addcdiv_(grad, avg, value=group['lr'])
if group['caution']:
buf = _apply_caution(buf, grad)
p.add_(-buf)
else:
# PyTorch scales the param update by LR
buf.addcdiv_(grad, avg)
if group['caution']:
buf = _apply_caution(buf, grad)
p.add_(buf, alpha=-group['lr'])
else:
p.addcdiv_(grad, avg, value=-group['lr'])
return loss
| pytorch-image-models/timm/optim/rmsprop_tf.py/0 | {
"file_path": "pytorch-image-models/timm/optim/rmsprop_tf.py",
"repo_id": "pytorch-image-models",
"token_count": 3755
} | 252 |
""" Checkpoint Saver
Track top-n training checkpoints and maintain recovery checkpoints on specified intervals.
Hacked together by / Copyright 2020 Ross Wightman
"""
import glob
import logging
import operator
import os
import shutil
import torch
from .model import unwrap_model, get_state_dict
_logger = logging.getLogger(__name__)
class CheckpointSaver:
def __init__(
self,
model,
optimizer,
args=None,
model_ema=None,
amp_scaler=None,
checkpoint_prefix='checkpoint',
recovery_prefix='recovery',
checkpoint_dir='',
recovery_dir='',
decreasing=False,
max_history=10,
unwrap_fn=unwrap_model
):
# objects to save state_dicts of
self.model = model
self.optimizer = optimizer
self.args = args
self.model_ema = model_ema
self.amp_scaler = amp_scaler
# state
self.checkpoint_files = [] # (filename, metric) tuples in order of decreasing betterness
self.best_epoch = None
self.best_metric = None
self.curr_recovery_file = ''
self.prev_recovery_file = ''
self.can_hardlink = True
# config
self.checkpoint_dir = checkpoint_dir
self.recovery_dir = recovery_dir
self.save_prefix = checkpoint_prefix
self.recovery_prefix = recovery_prefix
self.extension = '.pth.tar'
self.decreasing = decreasing # a lower metric is better if True
self.cmp = operator.lt if decreasing else operator.gt # True if lhs better than rhs
self.max_history = max_history
self.unwrap_fn = unwrap_fn
assert self.max_history >= 1
def _replace(self, src, dst):
if self.can_hardlink:
try:
if os.path.exists(dst):
os.unlink(dst) # required for Windows support.
except (OSError, NotImplementedError) as e:
self.can_hardlink = False
os.replace(src, dst)
def _duplicate(self, src, dst):
if self.can_hardlink:
try:
if os.path.exists(dst):
# for Windows
os.unlink(dst)
os.link(src, dst)
return
except (OSError, NotImplementedError) as e:
self.can_hardlink = False
shutil.copy2(src, dst)
def _save(self, save_path, epoch, metric=None):
save_state = {
'epoch': epoch,
'arch': type(self.model).__name__.lower(),
'state_dict': get_state_dict(self.model, self.unwrap_fn),
'optimizer': self.optimizer.state_dict(),
'version': 2, # version < 2 increments epoch before save
}
if self.args is not None:
save_state['arch'] = self.args.model
save_state['args'] = self.args
if self.amp_scaler is not None:
save_state[self.amp_scaler.state_dict_key] = self.amp_scaler.state_dict()
if self.model_ema is not None:
save_state['state_dict_ema'] = get_state_dict(self.model_ema, self.unwrap_fn)
if metric is not None:
save_state['metric'] = metric
torch.save(save_state, save_path)
def _cleanup_checkpoints(self, trim=0):
trim = min(len(self.checkpoint_files), trim)
delete_index = self.max_history - trim
if delete_index < 0 or len(self.checkpoint_files) <= delete_index:
return
to_delete = self.checkpoint_files[delete_index:]
for d in to_delete:
try:
_logger.debug("Cleaning checkpoint: {}".format(d))
os.remove(d[0])
except Exception as e:
_logger.error("Exception '{}' while deleting checkpoint".format(e))
self.checkpoint_files = self.checkpoint_files[:delete_index]
def save_checkpoint(self, epoch, metric=None):
assert epoch >= 0
tmp_save_path = os.path.join(self.checkpoint_dir, 'tmp' + self.extension)
last_save_path = os.path.join(self.checkpoint_dir, 'last' + self.extension)
self._save(tmp_save_path, epoch, metric)
self._replace(tmp_save_path, last_save_path)
worst_file = self.checkpoint_files[-1] if self.checkpoint_files else None
if (
len(self.checkpoint_files) < self.max_history
or metric is None
or self.cmp(metric, worst_file[1])
):
if len(self.checkpoint_files) >= self.max_history:
self._cleanup_checkpoints(1)
filename = '-'.join([self.save_prefix, str(epoch)]) + self.extension
save_path = os.path.join(self.checkpoint_dir, filename)
self._duplicate(last_save_path, save_path)
self.checkpoint_files.append((save_path, metric))
self.checkpoint_files = sorted(
self.checkpoint_files,
key=lambda x: x[1],
reverse=not self.decreasing # sort in descending order if a lower metric is not better
)
checkpoints_str = "Current checkpoints:\n"
for c in self.checkpoint_files:
checkpoints_str += ' {}\n'.format(c)
_logger.info(checkpoints_str)
if metric is not None and (self.best_metric is None or self.cmp(metric, self.best_metric)):
self.best_epoch = epoch
self.best_metric = metric
best_save_path = os.path.join(self.checkpoint_dir, 'model_best' + self.extension)
self._duplicate(last_save_path, best_save_path)
return (None, None) if self.best_metric is None else (self.best_metric, self.best_epoch)
def save_recovery(self, epoch, batch_idx=0):
assert epoch >= 0
tmp_save_path = os.path.join(self.recovery_dir, 'recovery_tmp' + self.extension)
self._save(tmp_save_path, epoch)
filename = '-'.join([self.recovery_prefix, str(epoch), str(batch_idx)]) + self.extension
save_path = os.path.join(self.recovery_dir, filename)
self._replace(tmp_save_path, save_path)
if os.path.exists(self.prev_recovery_file):
try:
_logger.debug("Cleaning recovery: {}".format(self.prev_recovery_file))
os.remove(self.prev_recovery_file)
except Exception as e:
_logger.error("Exception '{}' while removing {}".format(e, self.prev_recovery_file))
self.prev_recovery_file = self.curr_recovery_file
self.curr_recovery_file = save_path
def find_recovery(self):
recovery_path = os.path.join(self.recovery_dir, self.recovery_prefix)
files = glob.glob(recovery_path + '*' + self.extension)
files = sorted(files)
return files[0] if len(files) else ''
| pytorch-image-models/timm/utils/checkpoint_saver.py/0 | {
"file_path": "pytorch-image-models/timm/utils/checkpoint_saver.py",
"repo_id": "pytorch-image-models",
"token_count": 3258
} | 253 |
#!/usr/bin/env python3
""" ImageNet Validation Script
This is intended to be a lean and easily modifiable ImageNet validation script for evaluating pretrained
models or training checkpoints against ImageNet or similarly organized image datasets. It prioritizes
canonical PyTorch, standard Python style, and good performance. Repurpose as you see fit.
Hacked together by Ross Wightman (https://github.com/rwightman)
"""
import argparse
import csv
import glob
import json
import logging
import os
import time
from collections import OrderedDict
from contextlib import suppress
from functools import partial
import torch
import torch.nn as nn
import torch.nn.parallel
from timm.data import create_dataset, create_loader, resolve_data_config, RealLabelsImagenet
from timm.layers import apply_test_time_pool, set_fast_norm
from timm.models import create_model, load_checkpoint, is_model, list_models
from timm.utils import accuracy, AverageMeter, natural_key, setup_default_logging, set_jit_fuser, \
decay_batch_step, check_batch_size_retry, ParseKwargs, reparameterize_model
try:
from apex import amp
has_apex = True
except ImportError:
has_apex = False
try:
from functorch.compile import memory_efficient_fusion
has_functorch = True
except ImportError as e:
has_functorch = False
has_compile = hasattr(torch, 'compile')
_logger = logging.getLogger('validate')
parser = argparse.ArgumentParser(description='PyTorch ImageNet Validation')
parser.add_argument('data', nargs='?', metavar='DIR', const=None,
help='path to dataset (*deprecated*, use --data-dir)')
parser.add_argument('--data-dir', metavar='DIR',
help='path to dataset (root dir)')
parser.add_argument('--dataset', metavar='NAME', default='',
help='dataset type + name ("<type>/<name>") (default: ImageFolder or ImageTar if empty)')
parser.add_argument('--split', metavar='NAME', default='validation',
help='dataset split (default: validation)')
parser.add_argument('--num-samples', default=None, type=int,
metavar='N', help='Manually specify num samples in dataset split, for IterableDatasets.')
parser.add_argument('--dataset-download', action='store_true', default=False,
help='Allow download of dataset for torch/ and tfds/ datasets that support it.')
parser.add_argument('--class-map', default='', type=str, metavar='FILENAME',
help='path to class to idx mapping file (default: "")')
parser.add_argument('--input-key', default=None, type=str,
help='Dataset key for input images.')
parser.add_argument('--input-img-mode', default=None, type=str,
help='Dataset image conversion mode for input images.')
parser.add_argument('--target-key', default=None, type=str,
help='Dataset key for target labels.')
parser.add_argument('--dataset-trust-remote-code', action='store_true', default=False,
help='Allow huggingface dataset import to execute code downloaded from the dataset\'s repo.')
parser.add_argument('--model', '-m', metavar='NAME', default='dpn92',
help='model architecture (default: dpn92)')
parser.add_argument('--pretrained', dest='pretrained', action='store_true',
help='use pre-trained model')
parser.add_argument('-j', '--workers', default=4, type=int, metavar='N',
help='number of data loading workers (default: 4)')
parser.add_argument('-b', '--batch-size', default=256, type=int,
metavar='N', help='mini-batch size (default: 256)')
parser.add_argument('--img-size', default=None, type=int,
metavar='N', help='Input image dimension, uses model default if empty')
parser.add_argument('--in-chans', type=int, default=None, metavar='N',
help='Image input channels (default: None => 3)')
parser.add_argument('--input-size', default=None, nargs=3, type=int, metavar='N',
help='Input all image dimensions (d h w, e.g. --input-size 3 224 224), uses model default if empty')
parser.add_argument('--use-train-size', action='store_true', default=False,
help='force use of train input size, even when test size is specified in pretrained cfg')
parser.add_argument('--crop-pct', default=None, type=float,
metavar='N', help='Input image center crop pct')
parser.add_argument('--crop-mode', default=None, type=str,
metavar='N', help='Input image crop mode (squash, border, center). Model default if None.')
parser.add_argument('--crop-border-pixels', type=int, default=None,
help='Crop pixels from image border.')
parser.add_argument('--mean', type=float, nargs='+', default=None, metavar='MEAN',
help='Override mean pixel value of dataset')
parser.add_argument('--std', type=float, nargs='+', default=None, metavar='STD',
help='Override std deviation of of dataset')
parser.add_argument('--interpolation', default='', type=str, metavar='NAME',
help='Image resize interpolation type (overrides model)')
parser.add_argument('--num-classes', type=int, default=None,
help='Number classes in dataset')
parser.add_argument('--gp', default=None, type=str, metavar='POOL',
help='Global pool type, one of (fast, avg, max, avgmax, avgmaxc). Model default if None.')
parser.add_argument('--log-freq', default=10, type=int,
metavar='N', help='batch logging frequency (default: 10)')
parser.add_argument('--checkpoint', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('--num-gpu', type=int, default=1,
help='Number of GPUS to use')
parser.add_argument('--test-pool', dest='test_pool', action='store_true',
help='enable test time pool')
parser.add_argument('--no-prefetcher', action='store_true', default=False,
help='disable fast prefetcher')
parser.add_argument('--pin-mem', action='store_true', default=False,
help='Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.')
parser.add_argument('--channels-last', action='store_true', default=False,
help='Use channels_last memory layout')
parser.add_argument('--device', default='cuda', type=str,
help="Device (accelerator) to use.")
parser.add_argument('--amp', action='store_true', default=False,
help='use NVIDIA Apex AMP or Native AMP for mixed precision training')
parser.add_argument('--amp-dtype', default='float16', type=str,
help='lower precision AMP dtype (default: float16)')
parser.add_argument('--amp-impl', default='native', type=str,
help='AMP impl to use, "native" or "apex" (default: native)')
parser.add_argument('--model-dtype', default=None, type=str,
help='Model dtype override (non-AMP) (default: float32)')
parser.add_argument('--tf-preprocessing', action='store_true', default=False,
help='Use Tensorflow preprocessing pipeline (require CPU TF installed')
parser.add_argument('--use-ema', dest='use_ema', action='store_true',
help='use ema version of weights if present')
parser.add_argument('--fuser', default='', type=str,
help="Select jit fuser. One of ('', 'te', 'old', 'nvfuser')")
parser.add_argument('--fast-norm', default=False, action='store_true',
help='enable experimental fast-norm')
parser.add_argument('--reparam', default=False, action='store_true',
help='Reparameterize model')
parser.add_argument('--model-kwargs', nargs='*', default={}, action=ParseKwargs)
parser.add_argument('--torchcompile-mode', type=str, default=None,
help="torch.compile mode (default: None).")
scripting_group = parser.add_mutually_exclusive_group()
scripting_group.add_argument('--torchscript', default=False, action='store_true',
help='torch.jit.script the full model')
scripting_group.add_argument('--torchcompile', nargs='?', type=str, default=None, const='inductor',
help="Enable compilation w/ specified backend (default: inductor).")
scripting_group.add_argument('--aot-autograd', default=False, action='store_true',
help="Enable AOT Autograd support.")
parser.add_argument('--results-file', default='', type=str, metavar='FILENAME',
help='Output csv file for validation results (summary)')
parser.add_argument('--results-format', default='csv', type=str,
help='Format for results file one of (csv, json) (default: csv).')
parser.add_argument('--real-labels', default='', type=str, metavar='FILENAME',
help='Real labels JSON file for imagenet evaluation')
parser.add_argument('--valid-labels', default='', type=str, metavar='FILENAME',
help='Valid label indices txt file for validation of partial label space')
parser.add_argument('--retry', default=False, action='store_true',
help='Enable batch size decay & retry for single model validation')
# NaFlex loader arguments
parser.add_argument('--naflex-loader', action='store_true', default=False,
help='Use NaFlex loader (Requires NaFlex compatible model)')
parser.add_argument('--naflex-max-seq-len', type=int, default=576,
help='Fixed maximum sequence length for NaFlex loader (validation)')
def validate(args):
# might as well try to validate something
args.pretrained = args.pretrained or not args.checkpoint
args.prefetcher = not args.no_prefetcher
if torch.cuda.is_available():
torch.backends.cuda.matmul.allow_tf32 = True
torch.backends.cudnn.benchmark = True
device = torch.device(args.device)
model_dtype = None
if args.model_dtype:
assert args.model_dtype in ('float32', 'float16', 'bfloat16')
model_dtype = getattr(torch, args.model_dtype)
# resolve AMP arguments based on PyTorch / Apex availability
use_amp = None
amp_autocast = suppress
if args.amp:
assert model_dtype is None or model_dtype == torch.float32, 'float32 model dtype must be used with AMP'
if args.amp_impl == 'apex':
assert has_apex, 'AMP impl specified as APEX but APEX is not installed.'
assert args.amp_dtype == 'float16'
use_amp = 'apex'
_logger.info('Validating in mixed precision with NVIDIA APEX AMP.')
else:
assert args.amp_dtype in ('float16', 'bfloat16')
use_amp = 'native'
amp_dtype = torch.bfloat16 if args.amp_dtype == 'bfloat16' else torch.float16
amp_autocast = partial(torch.autocast, device_type=device.type, dtype=amp_dtype)
_logger.info('Validating in mixed precision with native PyTorch AMP.')
else:
_logger.info(f'Validating in {model_dtype or torch.float32}. AMP not enabled.')
if args.fuser:
set_jit_fuser(args.fuser)
if args.fast_norm:
set_fast_norm()
# create model
in_chans = 3
if args.in_chans is not None:
in_chans = args.in_chans
elif args.input_size is not None:
in_chans = args.input_size[0]
model = create_model(
args.model,
pretrained=args.pretrained,
num_classes=args.num_classes,
in_chans=in_chans,
global_pool=args.gp,
scriptable=args.torchscript,
**args.model_kwargs,
)
if args.num_classes is None:
assert hasattr(model, 'num_classes'), 'Model must have `num_classes` attr if not set on cmd line/config.'
args.num_classes = model.num_classes
if args.checkpoint:
load_checkpoint(model, args.checkpoint, args.use_ema)
if args.reparam:
model = reparameterize_model(model)
param_count = sum([m.numel() for m in model.parameters()])
_logger.info('Model %s created, param count: %d' % (args.model, param_count))
data_config = resolve_data_config(
vars(args),
model=model,
use_test_size=not args.use_train_size,
verbose=True,
)
test_time_pool = False
if args.test_pool:
model, test_time_pool = apply_test_time_pool(model, data_config)
model = model.to(device=device, dtype=model_dtype) # FIXME move model device & dtype into create_model
if args.channels_last:
model = model.to(memory_format=torch.channels_last)
if args.torchscript:
assert not use_amp == 'apex', 'Cannot use APEX AMP with torchscripted model'
model = torch.jit.script(model)
elif args.torchcompile:
assert has_compile, 'A version of torch w/ torch.compile() is required for --compile, possibly a nightly.'
torch._dynamo.reset()
model = torch.compile(model, backend=args.torchcompile, mode=args.torchcompile_mode)
elif args.aot_autograd:
assert has_functorch, "functorch is needed for --aot-autograd"
model = memory_efficient_fusion(model)
if use_amp == 'apex':
model = amp.initialize(model, opt_level='O1')
if args.num_gpu > 1:
model = torch.nn.DataParallel(model, device_ids=list(range(args.num_gpu)))
criterion = nn.CrossEntropyLoss().to(device)
root_dir = args.data or args.data_dir
if args.input_img_mode is None:
input_img_mode = 'RGB' if data_config['input_size'][0] == 3 else 'L'
else:
input_img_mode = args.input_img_mode
dataset = create_dataset(
root=root_dir,
name=args.dataset,
split=args.split,
download=args.dataset_download,
load_bytes=args.tf_preprocessing,
class_map=args.class_map,
num_samples=args.num_samples,
input_key=args.input_key,
input_img_mode=input_img_mode,
target_key=args.target_key,
trust_remote_code=args.dataset_trust_remote_code,
)
if args.valid_labels:
with open(args.valid_labels, 'r') as f:
valid_labels = [int(line.rstrip()) for line in f]
else:
valid_labels = None
if args.real_labels:
real_labels = RealLabelsImagenet(dataset.filenames(basename=True), real_json=args.real_labels)
else:
real_labels = None
crop_pct = 1.0 if test_time_pool else data_config['crop_pct']
if args.naflex_loader:
model_patch_size = None
if hasattr(model, 'embeds') and hasattr(model.embeds, 'patch_size'):
# NaFlexVit models have embeds.patch_size
model_patch_size = model.embeds.patch_size
from timm.data import create_naflex_loader
loader = create_naflex_loader(
dataset,
batch_size=args.batch_size,
use_prefetcher=args.prefetcher,
interpolation=data_config['interpolation'],
mean=data_config['mean'],
std=data_config['std'],
num_workers=args.workers,
crop_pct=crop_pct,
crop_mode=data_config['crop_mode'],
crop_border_pixels=args.crop_border_pixels,
pin_memory=args.pin_mem,
device=device,
img_dtype=model_dtype or torch.float32,
patch_size=model_patch_size or (16, 16),
max_seq_len=args.naflex_max_seq_len,
)
else:
loader = create_loader(
dataset,
input_size=data_config['input_size'],
batch_size=args.batch_size,
use_prefetcher=args.prefetcher,
interpolation=data_config['interpolation'],
mean=data_config['mean'],
std=data_config['std'],
num_workers=args.workers,
crop_pct=crop_pct,
crop_mode=data_config['crop_mode'],
crop_border_pixels=args.crop_border_pixels,
pin_memory=args.pin_mem,
device=device,
img_dtype=model_dtype or torch.float32,
tf_preprocessing=args.tf_preprocessing,
)
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
model.eval()
with torch.inference_mode():
# warmup, reduce variability of first batch time, especially for comparing torchscript vs non
if not args.naflex_loader:
input = torch.randn((args.batch_size,) + tuple(data_config['input_size'])).to(device=device, dtype=model_dtype)
if args.channels_last:
input = input.contiguous(memory_format=torch.channels_last)
with amp_autocast():
model(input)
end = time.time()
for batch_idx, (input, target) in enumerate(loader):
if args.no_prefetcher:
target = target.to(device=device)
input = input.to(device=device, dtype=model_dtype)
if args.channels_last:
input = input.contiguous(memory_format=torch.channels_last)
# compute output
with amp_autocast():
output = model(input)
if valid_labels is not None:
output = output[:, valid_labels]
loss = criterion(output, target)
if real_labels is not None:
real_labels.add_result(output)
# measure accuracy and record loss
batch_size = output.shape[0]
acc1, acc5 = accuracy(output.detach(), target, topk=(1, 5))
losses.update(loss.item(), batch_size)
top1.update(acc1.item(), batch_size)
top5.update(acc5.item(), batch_size)
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if batch_idx % args.log_freq == 0:
_logger.info(
'Test: [{0:>4d}/{1}] '
'Time: {batch_time.val:.3f}s ({batch_time.avg:.3f}s, {rate_avg:>7.2f}/s) '
'Loss: {loss.val:>7.4f} ({loss.avg:>6.4f}) '
'Acc@1: {top1.val:>7.3f} ({top1.avg:>7.3f}) '
'Acc@5: {top5.val:>7.3f} ({top5.avg:>7.3f})'.format(
batch_idx,
len(loader),
batch_time=batch_time,
rate_avg=batch_size / batch_time.avg,
loss=losses,
top1=top1,
top5=top5
)
)
if real_labels is not None:
# real labels mode replaces topk values at the end
top1a, top5a = real_labels.get_accuracy(k=1), real_labels.get_accuracy(k=5)
else:
top1a, top5a = top1.avg, top5.avg
results = OrderedDict(
model=args.model,
top1=round(top1a, 4), top1_err=round(100 - top1a, 4),
top5=round(top5a, 4), top5_err=round(100 - top5a, 4),
param_count=round(param_count / 1e6, 2),
img_size=data_config['input_size'][-1],
crop_pct=crop_pct,
interpolation=data_config['interpolation'],
)
_logger.info(' * Acc@1 {:.3f} ({:.3f}) Acc@5 {:.3f} ({:.3f})'.format(
results['top1'], results['top1_err'], results['top5'], results['top5_err']))
return results
def _try_run(args, initial_batch_size):
batch_size = initial_batch_size
results = OrderedDict()
error_str = 'Unknown'
while batch_size:
args.batch_size = batch_size * args.num_gpu # multiply by num-gpu for DataParallel case
try:
if 'cuda' in args.device and torch.cuda.is_available():
torch.cuda.empty_cache()
elif "npu" in args.device and torch.npu.is_available():
torch.npu.empty_cache()
results = validate(args)
return results
except RuntimeError as e:
error_str = str(e)
_logger.error(f'"{error_str}" while running validation.')
if not check_batch_size_retry(error_str):
break
batch_size = decay_batch_step(batch_size)
_logger.warning(f'Reducing batch size to {batch_size} for retry.')
results['model'] = args.model
results['error'] = error_str
_logger.error(f'{args.model} failed to validate ({error_str}).')
return results
_NON_IN1K_FILTERS = ['*_in21k', '*_in22k', '*in12k', '*_dino', '*fcmae', '*seer']
def main():
setup_default_logging()
args = parser.parse_args()
model_cfgs = []
model_names = []
if os.path.isdir(args.checkpoint):
# validate all checkpoints in a path with same model
checkpoints = glob.glob(args.checkpoint + '/*.pth.tar')
checkpoints += glob.glob(args.checkpoint + '/*.pth')
model_names = list_models(args.model)
model_cfgs = [(args.model, c) for c in sorted(checkpoints, key=natural_key)]
else:
if args.model == 'all':
# validate all models in a list of names with pretrained checkpoints
args.pretrained = True
model_names = list_models(
pretrained=True,
exclude_filters=_NON_IN1K_FILTERS,
)
model_cfgs = [(n, '') for n in model_names]
elif not is_model(args.model):
# model name doesn't exist, try as wildcard filter
model_names = list_models(
args.model,
pretrained=True,
)
model_cfgs = [(n, '') for n in model_names]
if not model_cfgs and os.path.isfile(args.model):
with open(args.model) as f:
model_names = [line.rstrip() for line in f]
model_cfgs = [(n, None) for n in model_names if n]
if len(model_cfgs):
_logger.info('Running bulk validation on these pretrained models: {}'.format(', '.join(model_names)))
results = []
try:
initial_batch_size = args.batch_size
for m, c in model_cfgs:
args.model = m
args.checkpoint = c
r = _try_run(args, initial_batch_size)
if 'error' in r:
continue
if args.checkpoint:
r['checkpoint'] = args.checkpoint
results.append(r)
except KeyboardInterrupt as e:
pass
results = sorted(results, key=lambda x: x['top1'], reverse=True)
else:
if args.retry:
results = _try_run(args, args.batch_size)
else:
results = validate(args)
if args.results_file:
write_results(args.results_file, results, format=args.results_format)
# output results in JSON to stdout w/ delimiter for runner script
print(f'--result\n{json.dumps(results, indent=4)}')
def write_results(results_file, results, format='csv'):
with open(results_file, mode='w') as cf:
if format == 'json':
json.dump(results, cf, indent=4)
else:
if not isinstance(results, (list, tuple)):
results = [results]
if not results:
return
dw = csv.DictWriter(cf, fieldnames=results[0].keys())
dw.writeheader()
for r in results:
dw.writerow(r)
cf.flush()
if __name__ == '__main__':
main()
| pytorch-image-models/validate.py/0 | {
"file_path": "pytorch-image-models/validate.py",
"repo_id": "pytorch-image-models",
"token_count": 10446
} | 254 |
# Models
<Tip warning={true}>
Smolagents is an experimental API which is subject to change at any time. Results returned by the agents
can vary as the APIs or underlying models are prone to change.
</Tip>
To learn more about agents and tools make sure to read the [introductory guide](../index). This page
contains the API docs for the underlying classes.
## Models
All model classes in smolagents support passing additional keyword arguments (like `temperature`, `max_tokens`, `top_p`, etc.) directly at instantiation time.
These parameters are automatically forwarded to the underlying model's completion calls, allowing you to configure model behavior such as creativity, response length, and sampling strategies.
### Base Model
The `Model` class serves as the foundation for all model implementations, providing the core interface that custom models must implement to work with agents.
[[autodoc]] Model
### API Model
The `ApiModel` class serves as the foundation for all API-based model implementations, providing common functionality for external API interactions, rate limiting, and client management that API-specific models inherit.
[[autodoc]] ApiModel
### TransformersModel
For convenience, we have added a `TransformersModel` that implements the points above by building a local `transformers` pipeline for the model_id given at initialization.
```python
from smolagents import TransformersModel
model = TransformersModel(model_id="HuggingFaceTB/SmolLM-135M-Instruct")
print(model([{"role": "user", "content": [{"type": "text", "text": "Ok!"}]}], stop_sequences=["great"]))
```
```text
>>> What a
```
You can pass any keyword arguments supported by the underlying model (such as `temperature`, `max_new_tokens`, `top_p`, etc.) directly at instantiation time. These are forwarded to the model completion call:
```python
model = TransformersModel(
model_id="HuggingFaceTB/SmolLM-135M-Instruct",
temperature=0.7,
max_new_tokens=1000
)
```
> [!TIP]
> You must have `transformers` and `torch` installed on your machine. Please run `pip install smolagents[transformers]` if it's not the case.
[[autodoc]] TransformersModel
### InferenceClientModel
The `InferenceClientModel` wraps huggingface_hub's [InferenceClient](https://huggingface.co/docs/huggingface_hub/main/en/guides/inference) for the execution of the LLM. It supports all [Inference Providers](https://huggingface.co/docs/inference-providers/index) available on the Hub: Cerebras, Cohere, Fal, Fireworks, HF-Inference, Hyperbolic, Nebius, Novita, Replicate, SambaNova, Together, and more.
You can also set a rate limit in requests per minute by using the `requests_per_minute` argument:
```python
from smolagents import InferenceClientModel
messages = [
{"role": "user", "content": [{"type": "text", "text": "Hello, how are you?"}]}
]
model = InferenceClientModel(provider="novita", requests_per_minute=60)
print(model(messages))
```
```text
>>> Of course! If you change your mind, feel free to reach out. Take care!
```
You can pass any keyword arguments supported by the underlying model (such as `temperature`, `max_tokens`, `top_p`, etc.) directly at instantiation time. These are forwarded to the model completion call:
```python
model = InferenceClientModel(
provider="novita",
requests_per_minute=60,
temperature=0.8,
max_tokens=500
)
```
[[autodoc]] InferenceClientModel
### LiteLLMModel
The `LiteLLMModel` leverages [LiteLLM](https://www.litellm.ai/) to support 100+ LLMs from various providers.
You can pass kwargs upon model initialization that will then be used whenever using the model, for instance below we pass `temperature`. You can also set a rate limit in requests per minute by using the `requests_per_minute` argument.
```python
from smolagents import LiteLLMModel
messages = [
{"role": "user", "content": [{"type": "text", "text": "Hello, how are you?"}]}
]
model = LiteLLMModel(model_id="anthropic/claude-3-5-sonnet-latest", temperature=0.2, max_tokens=10, requests_per_minute=60)
print(model(messages))
```
[[autodoc]] LiteLLMModel
### LiteLLMRouterModel
The `LiteLLMRouterModel` is a wrapper around the [LiteLLM Router](https://docs.litellm.ai/docs/routing) that leverages
advanced routing strategies: load-balancing across multiple deployments, prioritizing critical requests via queueing,
and implementing basic reliability measures such as cooldowns, fallbacks, and exponential backoff retries.
```python
from smolagents import LiteLLMRouterModel
messages = [
{"role": "user", "content": [{"type": "text", "text": "Hello, how are you?"}]}
]
model = LiteLLMRouterModel(
model_id="llama-3.3-70b",
model_list=[
{
"model_name": "llama-3.3-70b",
"litellm_params": {"model": "groq/llama-3.3-70b", "api_key": os.getenv("GROQ_API_KEY")},
},
{
"model_name": "llama-3.3-70b",
"litellm_params": {"model": "cerebras/llama-3.3-70b", "api_key": os.getenv("CEREBRAS_API_KEY")},
},
],
client_kwargs={
"routing_strategy": "simple-shuffle",
},
)
print(model(messages))
```
[[autodoc]] LiteLLMRouterModel
### OpenAIServerModel
This class lets you call any OpenAIServer compatible model.
Here's how you can set it (you can customise the `api_base` url to point to another server):
```py
import os
from smolagents import OpenAIServerModel
model = OpenAIServerModel(
model_id="gpt-4o",
api_base="https://api.openai.com/v1",
api_key=os.environ["OPENAI_API_KEY"],
)
```
You can pass any keyword arguments supported by the underlying model (such as `temperature`, `max_tokens`, `top_p`, etc.) directly at instantiation time. These are forwarded to the model completion call:
```py
model = OpenAIServerModel(
model_id="gpt-4o",
api_base="https://api.openai.com/v1",
api_key=os.environ["OPENAI_API_KEY"],
temperature=0.7,
max_tokens=1000,
top_p=0.9,
)
```
[[autodoc]] OpenAIServerModel
### AzureOpenAIServerModel
`AzureOpenAIServerModel` allows you to connect to any Azure OpenAI deployment.
Below you can find an example of how to set it up, note that you can omit the `azure_endpoint`, `api_key`, and `api_version` arguments, provided you've set the corresponding environment variables -- `AZURE_OPENAI_ENDPOINT`, `AZURE_OPENAI_API_KEY`, and `OPENAI_API_VERSION`.
Pay attention to the lack of an `AZURE_` prefix for `OPENAI_API_VERSION`, this is due to the way the underlying [openai](https://github.com/openai/openai-python) package is designed.
```py
import os
from smolagents import AzureOpenAIServerModel
model = AzureOpenAIServerModel(
model_id = os.environ.get("AZURE_OPENAI_MODEL"),
azure_endpoint=os.environ.get("AZURE_OPENAI_ENDPOINT"),
api_key=os.environ.get("AZURE_OPENAI_API_KEY"),
api_version=os.environ.get("OPENAI_API_VERSION")
)
```
[[autodoc]] AzureOpenAIServerModel
### AmazonBedrockServerModel
`AmazonBedrockServerModel` helps you connect to Amazon Bedrock and run your agent with any available models.
Below is an example setup. This class also offers additional options for customization.
```py
import os
from smolagents import AmazonBedrockServerModel
model = AmazonBedrockServerModel(
model_id = os.environ.get("AMAZON_BEDROCK_MODEL_ID"),
)
```
[[autodoc]] AmazonBedrockServerModel
### MLXModel
```python
from smolagents import MLXModel
model = MLXModel(model_id="HuggingFaceTB/SmolLM-135M-Instruct")
print(model([{"role": "user", "content": "Ok!"}], stop_sequences=["great"]))
```
```text
>>> What a
```
> [!TIP]
> You must have `mlx-lm` installed on your machine. Please run `pip install smolagents[mlx-lm]` if it's not the case.
[[autodoc]] MLXModel
### VLLMModel
Model to use [vLLM](https://docs.vllm.ai/) for fast LLM inference and serving.
```python
from smolagents import VLLMModel
model = VLLMModel(model_id="HuggingFaceTB/SmolLM-135M-Instruct")
print(model([{"role": "user", "content": "Ok!"}], stop_sequences=["great"]))
```
> [!TIP]
> You must have `vllm` installed on your machine. Please run `pip install smolagents[vllm]` if it's not the case.
[[autodoc]] VLLMModel
### Custom Model
You're free to create and use your own models to power your agent.
You could subclass the base `Model` class to create a model for your agent.
The main criteria is to subclass the `generate` method, with these two criteria:
1. It follows the [messages format](./chat_templating) (`List[Dict[str, str]]`) for its input `messages`, and it returns an object with a `.content` attribute.
2. It stops generating outputs at the sequences passed in the argument `stop_sequences`.
For defining your LLM, you can make a `CustomModel` class that inherits from the base `Model` class.
It should have a generate method that takes a list of [messages](./chat_templating) and returns an object with a .content attribute containing the text. The `generate` method also needs to accept a `stop_sequences` argument that indicates when to stop generating.
```python
from huggingface_hub import login, InferenceClient
from smolagents import Model
login("<YOUR_HUGGINGFACEHUB_API_TOKEN>")
model_id = "meta-llama/Llama-3.3-70B-Instruct"
client = InferenceClient(model=model_id)
class CustomModel(Model):
def generate(messages, stop_sequences=["Task"]):
response = client.chat_completion(messages, stop=stop_sequences, max_tokens=1024)
answer = response.choices[0].message
return answer
custom_model = CustomModel()
```
Additionally, `generate` can also take a `grammar` argument to allow [constrained generation](https://huggingface.co/docs/text-generation-inference/conceptual/guidance) in order to force properly-formatted agent outputs.
| smolagents/docs/source/en/reference/models.md/0 | {
"file_path": "smolagents/docs/source/en/reference/models.md",
"repo_id": "smolagents",
"token_count": 3183
} | 255 |
# Agents
<Tip warning={true}>
Smolagents एक experimental API है जो किसी भी समय बदल सकता है। एजेंट्स द्वारा लौटाए गए परिणाम भिन्न हो सकते हैं क्योंकि APIs या underlying मॉडल बदलने की संभावना रखते हैं।
</Tip>
Agents और tools के बारे में अधिक जानने के लिए [introductory guide](../index) पढ़ना सुनिश्चित करें।
यह पेज underlying क्लासेज के लिए API docs को शामिल करता है।
## Agents
हमारे एजेंट्स [`MultiStepAgent`] से इनहेरिट करते हैं, जिसका अर्थ है कि वे कई चरणों में कार्य कर सकते हैं, प्रत्येक चरण में एक विचार, फिर एक टूल कॉल और एक्जीक्यूशन शामिल होता है। [इस कॉन्सेप्चुअल गाइड](../conceptual_guides/react) में अधिक पढ़ें।
हम मुख्य [`Agent`] क्लास पर आधारित दो प्रकार के एजेंट्स प्रदान करते हैं।
- [`CodeAgent`] डिफ़ॉल्ट एजेंट है, यह अपने टूल कॉल्स को Python कोड में लिखता है।
- [`ToolCallingAgent`] अपने टूल कॉल्स को JSON में लिखता है।
दोनों को इनिशियलाइजेशन पर `model` और टूल्स की सूची `tools` आर्गुमेंट्स की आवश्यकता होती है।
### Agents की क्लासेज
[[autodoc]] MultiStepAgent
[[autodoc]] CodeAgent
[[autodoc]] ToolCallingAgent
### stream_to_gradio
[[autodoc]] stream_to_gradio
### GradioUI
[[autodoc]] GradioUI
## मॉडल्स
आप स्वतंत्र रूप से अपने स्वयं के मॉडल बना सकते हैं और उनका उपयोग कर सकते हैं।
आप अपने एजेंट के लिए कोई भी `model` कॉल करने योग्य उपयोग कर सकते हैं, जब तक कि:
1. यह अपने इनपुट `messages` के लिए [messages format](./chat_templating) (`List[Dict[str, str]]`) का पालन करता है, और यह एक `str` लौटाता है।
2. यह आर्गुमेंट `stop_sequences` में पास किए गए सीक्वेंस से *पहले* आउटपुट जनरेट करना बंद कर देता है।
अपने LLM को परिभाषित करने के लिए, आप एक `custom_model` मेथड बना सकते हैं जो [messages](./chat_templating) की एक सूची स्वीकार करता है और टेक्स्ट युक्त .content विशेषता वाला एक ऑब्जेक्ट लौटाता है। इस कॉलेबल को एक `stop_sequences` आर्गुमेंट भी स्वीकार करने की आवश्यकता होती है जो बताता है कि कब जनरेट करना और बंद करना है।
```python
from huggingface_hub import login, InferenceClient
login("<YOUR_HUGGINGFACEHUB_API_TOKEN>")
model_id = "meta-llama/Llama-3.3-70B-Instruct"
client = InferenceClient(model=model_id)
def custom_model(messages, stop_sequences=["Task"]):
response = client.chat_completion(messages, stop=stop_sequences, max_tokens=1000)
answer = response.choices[0].message
return answer
```
इसके अतिरिक्त, `custom_model` एक `grammar` आर्गुमेंट भी ले सकता है। जिस स्थिति में आप एजेंट इनिशियलाइजेशन पर एक `grammar` निर्दिष्ट करते हैं, यह आर्गुमेंट मॉडल के कॉल्स को आपके द्वारा इनिशियलाइजेशन पर परिभाषित `grammar` के साथ पास किया जाएगा, ताकि [constrained generation](https://huggingface.co/docs/text-generation-inference/conceptual/guidance) की अनुमति मिल सके जिससे उचित-फॉर्मेटेड एजेंट आउटपुट को फोर्स किया जा सके।
### TransformersModel
सुविधा के लिए, हमने एक `TransformersModel` जोड़ा है जो इनिशियलाइजेशन पर दिए गए model_id के लिए एक लोकल `transformers` पाइपलाइन बनाकर ऊपर के बिंदुओं को लागू करता है।
```python
from smolagents import TransformersModel
model = TransformersModel(model_id="HuggingFaceTB/SmolLM-135M-Instruct")
print(model([{"role": "user", "content": "Ok!"}], stop_sequences=["great"]))
```
```text
>>> What a
```
[[autodoc]] TransformersModel
### InferenceClientModel
`InferenceClientModel` LLM के एक्जीक्यूशन के लिए [HF Inference API](https://huggingface.co/docs/api-inference/index) क्लाइंट को रैप करता है।
```python
from smolagents import InferenceClientModel
messages = [
{"role": "user", "content": "Hello, how are you?"},
{"role": "assistant", "content": "I'm doing great. How can I help you today?"},
{"role": "user", "content": "No need to help, take it easy."},
]
model = InferenceClientModel()
print(model(messages))
```
```text
>>> Of course! If you change your mind, feel free to reach out. Take care!
```
[[autodoc]] InferenceClientModel
### LiteLLMModel
`LiteLLMModel` विभिन्न प्रदाताओं से 100+ LLMs को सपोर्ट करने के लिए [LiteLLM](https://www.litellm.ai/) का लाभ उठाता है।
आप मॉडल इनिशियलाइजेशन पर kwargs पास कर सकते हैं जो तब मॉडल का उपयोग करते समय प्रयोग किए जाएंगे, उदाहरण के लिए नीचे हम `temperature` पास करते हैं।
```python
from smolagents import LiteLLMModel
messages = [
{"role": "user", "content": "Hello, how are you?"},
{"role": "assistant", "content": "I'm doing great. How can I help you today?"},
{"role": "user", "content": "No need to help, take it easy."},
]
model = LiteLLMModel(model_id="anthropic/claude-3-5-sonnet-latest", temperature=0.2, max_tokens=10)
print(model(messages))
```
[[autodoc]] LiteLLMModel
### OpenAiServerModel
यह क्लास आपको किसी भी OpenAIServer कम्पैटिबल मॉडल को कॉल करने देती है।
यहाँ बताया गया है कि आप इसे कैसे सेट कर सकते हैं (आप दूसरे सर्वर को पॉइंट करने के लिए `api_base` url को कस्टमाइज़ कर सकते हैं):
```py
import os
from smolagents import OpenAIServerModel
model = OpenAIServerModel(
model_id="gpt-4o",
api_base="https://api.openai.com/v1",
api_key=os.environ["OPENAI_API_KEY"],
)
```
## Prompts
[[autodoc]] smolagents.agents.PromptTemplates
[[autodoc]] smolagents.agents.PlanningPromptTemplate
[[autodoc]] smolagents.agents.ManagedAgentPromptTemplate
[[autodoc]] smolagents.agents.FinalAnswerPromptTemplate
| smolagents/docs/source/hi/reference/agents.md/0 | {
"file_path": "smolagents/docs/source/hi/reference/agents.md",
"repo_id": "smolagents",
"token_count": 4209
} | 256 |
# Agentic RAG
[[open-in-colab]]
Retrieval-Augmented-Generation (RAG) 是“使用大语言模型(LLM)来回答用户查询,但基于从知识库中检索的信息”。它比使用普通或微调的 LLM 具有许多优势:举几个例子,它允许将答案基于真实事实并减少虚构;它允许提供 LLM 领域特定的知识;并允许对知识库中的信息访问进行精细控制。
但是,普通的 RAG 存在一些局限性,以下两点尤为突出:
- 它只执行一次检索步骤:如果结果不好,生成的内容也会不好。
- 语义相似性是以用户查询为参考计算的,这可能不是最优的:例如,用户查询通常是一个问题,而包含真实答案的文档通常是肯定语态,因此其相似性得分会比其他以疑问形式呈现的源文档低,从而导致错失相关信息的风险。
我们可以通过制作一个 RAG agent来缓解这些问题:非常简单,一个配备了检索工具的agent!这个 agent 将
会:✅ 自己构建查询和检索,✅ 如果需要的话会重新检索。
因此,它将比普通 RAG 更智能,因为它可以自己构建查询,而不是直接使用用户查询作为参考。这样,它可以更
接近目标文档,从而提高检索的准确性, [HyDE](https://huggingface.co/papers/2212.10496)。此 agent 可以
使用生成的片段,并在需要时重新检索,就像 [Self-Query](https://docs.llamaindex.ai/en/stable/examples/evaluation/RetryQuery/)。
我们现在开始构建这个系统. 🛠️
运行以下代码以安装所需的依赖包:
```bash
!pip install smolagents pandas langchain langchain-community sentence-transformers rank_bm25 --upgrade -q
```
你需要一个有效的 token 作为环境变量 `HF_TOKEN` 来调用 Inference Providers。我们使用 python-dotenv 来加载它。
```py
from dotenv import load_dotenv
load_dotenv()
```
我们首先加载一个知识库以在其上执行 RAG:此数据集是许多 Hugging Face 库的文档页面的汇编,存储为 markdown 格式。我们将仅保留 `transformers` 库的文档。然后通过处理数据集并将其存储到向量数据库中,为检索器准备知识库。我们将使用 [LangChain](https://python.langchain.com/docs/introduction/) 来利用其出色的向量数据库工具。
```py
import datasets
from langchain.docstore.document import Document
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_community.retrievers import BM25Retriever
knowledge_base = datasets.load_dataset("m-ric/huggingface_doc", split="train")
knowledge_base = knowledge_base.filter(lambda row: row["source"].startswith("huggingface/transformers"))
source_docs = [
Document(page_content=doc["text"], metadata={"source": doc["source"].split("/")[1]})
for doc in knowledge_base
]
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=500,
chunk_overlap=50,
add_start_index=True,
strip_whitespace=True,
separators=["\n\n", "\n", ".", " ", ""],
)
docs_processed = text_splitter.split_documents(source_docs)
```
现在文档已准备好。我们来一起构建我们的 agent RAG 系统!
👉 我们只需要一个 RetrieverTool,我们的 agent 可以利用它从知识库中检索信息。
由于我们需要将 vectordb 添加为工具的属性,我们不能简单地使用带有 `@tool` 装饰器的简单工具构造函数:因此我们将遵循 [tools 教程](../tutorials/tools) 中突出显示的高级设置。
```py
from smolagents import Tool
class RetrieverTool(Tool):
name = "retriever"
description = "Uses semantic search to retrieve the parts of transformers documentation that could be most relevant to answer your query."
inputs = {
"query": {
"type": "string",
"description": "The query to perform. This should be semantically close to your target documents. Use the affirmative form rather than a question.",
}
}
output_type = "string"
def __init__(self, docs, **kwargs):
super().__init__(**kwargs)
self.retriever = BM25Retriever.from_documents(
docs, k=10
)
def forward(self, query: str) -> str:
assert isinstance(query, str), "Your search query must be a string"
docs = self.retriever.invoke(
query,
)
return "\nRetrieved documents:\n" + "".join(
[
f"\n\n===== Document {str(i)} =====\n" + doc.page_content
for i, doc in enumerate(docs)
]
)
retriever_tool = RetrieverTool(docs_processed)
```
BM25 检索方法是一个经典的检索方法,因为它的设置速度非常快。为了提高检索准确性,你可以使用语义搜索,使用文档的向量表示替换 BM25:因此你可以前往 [MTEB Leaderboard](https://huggingface.co/spaces/mteb/leaderboard) 选择一个好的嵌入模型。
现在我们已经创建了一个可以从知识库中检索信息的工具,现在我们可以很容易地创建一个利用这个
`retriever_tool` 的 agent!此 agent 将使用如下参数初始化:
- `tools`:代理将能够调用的工具列表。
- `model`:为代理提供动力的 LLM。
我们的 `model` 必须是一个可调用对象,它接受一个消息的 list 作为输入,并返回文本。它还需要接受一个 stop_sequences 参数,指示何时停止生成。为了方便起见,我们直接使用包中提供的 `HfEngine` 类来获取调用 Hugging Face 的 Inference API 的 LLM 引擎。
接着,我们将使用 [meta-llama/Llama-3.3-70B-Instruct](meta-llama/Llama-3.3-70B-Instruct) 作为 llm 引
擎,因为:
- 它有一个长 128k 上下文,这对处理长源文档很有用。
- 它在 HF 的 Inference API 上始终免费提供!
_Note:_ 此 Inference API 托管基于各种标准的模型,部署的模型可能会在没有事先通知的情况下进行更新或替换。了解更多信息,请点击[这里](https://huggingface.co/docs/api-inference/supported-models)。
```py
from smolagents import InferenceClientModel, CodeAgent
agent = CodeAgent(
tools=[retriever_tool], model=InferenceClientModel(model_id="meta-llama/Llama-3.3-70B-Instruct"), max_steps=4, verbose=True
)
```
当我们初始化 CodeAgent 时,它已经自动获得了一个默认的系统提示,告诉 LLM 引擎按步骤处理并生成工具调用作为代码片段,但你可以根据需要替换此提示模板。接着,当其 `.run()` 方法被调用时,代理将负责调用 LLM 引擎,并在循环中执行工具调用,直到工具 `final_answer` 被调用,而其参数为最终答案。
```py
agent_output = agent.run("For a transformers model training, which is slower, the forward or the backward pass?")
print("Final output:")
print(agent_output)
```
| smolagents/docs/source/zh/examples/rag.md/0 | {
"file_path": "smolagents/docs/source/zh/examples/rag.md",
"repo_id": "smolagents",
"token_count": 3826
} | 257 |
"""
Async CodeAgent Example with Starlette
This example demonstrates how to use a CodeAgent in an async Starlette app,
running the agent in a background thread using anyio.to_thread.run_sync.
"""
import anyio.to_thread
from starlette.applications import Starlette
from starlette.requests import Request
from starlette.responses import JSONResponse
from starlette.routing import Route
from smolagents import CodeAgent, InferenceClientModel
# Create a simple agent instance (customize as needed)
def get_agent():
# You can set custom model, or tools as needed
return CodeAgent(
model=InferenceClientModel(model_id="Qwen/Qwen2.5-Coder-32B-Instruct"),
tools=[],
)
async def run_agent_in_thread(task: str):
agent = get_agent()
# The agent's run method is synchronous
result = await anyio.to_thread.run_sync(agent.run, task)
return result
async def run_agent_endpoint(request: Request):
data = await request.json()
task = data.get("task")
if not task:
return JSONResponse({"error": 'Missing "task" in request body.'}, status_code=400)
try:
result = await run_agent_in_thread(task)
return JSONResponse({"result": result})
except Exception as e:
return JSONResponse({"error": str(e)}, status_code=500)
routes = [
Route("/run-agent", run_agent_endpoint, methods=["POST"]),
]
app = Starlette(debug=True, routes=routes)
| smolagents/examples/async_agent/main.py/0 | {
"file_path": "smolagents/examples/async_agent/main.py",
"repo_id": "smolagents",
"token_count": 484
} | 258 |
import json
import os
import shutil
import textwrap
from pathlib import Path
# import tqdm.asyncio
from smolagents.utils import AgentError
def serialize_agent_error(obj):
if isinstance(obj, AgentError):
return {"error_type": obj.__class__.__name__, "message": obj.message}
else:
return str(obj)
def get_image_description(file_name: str, question: str, visual_inspection_tool) -> str:
prompt = f"""Write a caption of 5 sentences for this image. Pay special attention to any details that might be useful for someone answering the following question:
{question}. But do not try to answer the question directly!
Do not add any information that is not present in the image."""
return visual_inspection_tool(image_path=file_name, question=prompt)
def get_document_description(file_path: str, question: str, document_inspection_tool) -> str:
prompt = f"""Write a caption of 5 sentences for this document. Pay special attention to any details that might be useful for someone answering the following question:
{question}. But do not try to answer the question directly!
Do not add any information that is not present in the document."""
return document_inspection_tool.forward_initial_exam_mode(file_path=file_path, question=prompt)
def get_single_file_description(file_path: str, question: str, visual_inspection_tool, document_inspection_tool):
file_extension = file_path.split(".")[-1]
if file_extension in ["png", "jpg", "jpeg"]:
file_description = f" - Attached image: {file_path}"
file_description += (
f"\n -> Image description: {get_image_description(file_path, question, visual_inspection_tool)}"
)
return file_description
elif file_extension in ["pdf", "xls", "xlsx", "docx", "doc", "xml"]:
image_path = file_path.split(".")[0] + ".png"
if os.path.exists(image_path):
description = get_image_description(image_path, question, visual_inspection_tool)
file_path = image_path
else:
description = get_document_description(file_path, question, document_inspection_tool)
file_description = f" - Attached document: {file_path}"
file_description += f"\n -> File description: {description}"
return file_description
elif file_extension in ["mp3", "m4a", "wav"]:
return f" - Attached audio: {file_path}"
else:
return f" - Attached file: {file_path}"
def get_zip_description(file_path: str, question: str, visual_inspection_tool, document_inspection_tool):
folder_path = file_path.replace(".zip", "")
os.makedirs(folder_path, exist_ok=True)
shutil.unpack_archive(file_path, folder_path)
prompt_use_files = ""
for root, dirs, files in os.walk(folder_path):
for file in files:
file_path = os.path.join(root, file)
prompt_use_files += "\n" + textwrap.indent(
get_single_file_description(file_path, question, visual_inspection_tool, document_inspection_tool),
prefix=" ",
)
return prompt_use_files
def get_tasks_to_run(data, total: int, base_filename: Path, tasks_ids: list[int]):
f = base_filename.parent / f"{base_filename.stem}_answers.jsonl"
done = set()
if f.exists():
with open(f, encoding="utf-8") as fh:
done = {json.loads(line)["task_id"] for line in fh if line.strip()}
tasks = []
for i in range(total):
task_id = int(data[i]["task_id"])
if task_id not in done:
if tasks_ids is not None:
if task_id in tasks_ids:
tasks.append(data[i])
else:
tasks.append(data[i])
return tasks
| smolagents/examples/open_deep_research/scripts/run_agents.py/0 | {
"file_path": "smolagents/examples/open_deep_research/scripts/run_agents.py",
"repo_id": "smolagents",
"token_count": 1444
} | 259 |
[build-system]
requires = ["setuptools"]
build-backend = "setuptools.build_meta"
[project]
name = "smolagents"
version = "1.22.0.dev0"
description = "🤗 smolagents: a barebones library for agents. Agents write python code to call tools or orchestrate other agents."
authors = [
{ name="Aymeric Roucher", email="aymeric@hf.co" },
]
readme = "README.md"
requires-python = ">=3.10"
dependencies = [
"huggingface-hub>=0.31.2",
"requests>=2.32.3",
"rich>=13.9.4",
"jinja2>=3.1.4",
"pillow>=10.0.1", # Security fix for CVE-2023-4863: https://pillow.readthedocs.io/en/stable/releasenotes/10.0.1.html
"python-dotenv"
]
[project.optional-dependencies]
bedrock = [
"boto3>=1.36.18"
]
torch = [
"torch",
"torchvision",
"numpy>=1.21.2",
]
audio = [
"soundfile",
"smolagents[torch]",
]
docker = [
"docker>=7.1.0",
"websocket-client",
]
e2b = [
"e2b-code-interpreter>=1.0.3",
"python-dotenv>=1.0.1",
]
gradio = [
"gradio>=5.14.0", # Sidebar component GH-797
]
litellm = [
"litellm>=1.60.2",
]
mcp = [
"mcpadapt>=0.1.13", # Support structured output
"mcp",
]
mlx-lm = [
"mlx-lm",
]
openai = [
"openai>=1.58.1"
]
telemetry = [
"arize-phoenix",
"opentelemetry-sdk",
"opentelemetry-exporter-otlp",
"openinference-instrumentation-smolagents>=0.1.15" # Use new TokenUsage structure
]
toolkit = [
"ddgs>=9.0.0", # DuckDuckGoSearchTool
"markdownify>=0.14.1", # VisitWebpageTool
]
transformers = [
"accelerate",
"transformers>=4.0.0",
"smolagents[torch]",
]
vision = [
"helium",
"selenium",
]
vllm = [
"vllm",
"torch"
]
all = [
"smolagents[audio,docker,e2b,gradio,litellm,mcp,mlx-lm,openai,telemetry,toolkit,transformers,vision,bedrock]",
]
quality = [
"ruff>=0.9.0",
]
test = [
"ipython>=8.31.0", # for interactive environment tests
"pandas>=2.2.3",
"pytest>=8.1.0",
"pytest-datadir",
"pytest-timeout", # For test_all_docs: @pytest.mark.timeout
"python-dotenv>=1.0.1", # For test_all_docs
"smolagents[all]",
"rank-bm25", # For test_all_docs
"Wikipedia-API>=0.8.1",
"mlx[cpu]", # GH-1588
]
dev = [
"smolagents[quality,test]",
"sqlalchemy", # for ./examples
]
[tool.pytest.ini_options]
# Add the specified `OPTS` to the set of command line arguments as if they had been specified by the user.
addopts = "-sv --durations=0"
[tool.ruff]
line-length = 119
lint.ignore = [
"F403", # undefined-local-with-import-star
"E501", # line-too-long
]
lint.select = ["E", "F", "I", "W"]
[tool.ruff.lint.per-file-ignores]
"examples/*" = [
"E402", # module-import-not-at-top-of-file
]
[tool.ruff.lint.isort]
known-first-party = ["smolagents"]
lines-after-imports = 2
[tool.setuptools.package-data]
"smolagents.prompts" = ["*.yaml"]
[project.scripts]
smolagent = "smolagents.cli:main"
webagent = "smolagents.vision_web_browser:main"
| smolagents/pyproject.toml/0 | {
"file_path": "smolagents/pyproject.toml",
"repo_id": "smolagents",
"token_count": 1266
} | 260 |
#!/usr/bin/env python
# coding=utf-8
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import inspect
import json
import os
import pickle
import subprocess
import tempfile
import time
from io import BytesIO
from pathlib import Path
from textwrap import dedent
from typing import Any
import PIL.Image
import requests
from .default_tools import FinalAnswerTool
from .local_python_executor import CodeOutput, PythonExecutor
from .monitoring import LogLevel
from .tools import Tool, get_tools_definition_code
from .utils import AgentError
__all__ = ["E2BExecutor", "DockerExecutor", "WasmExecutor"]
try:
from dotenv import load_dotenv
load_dotenv()
except ModuleNotFoundError:
pass
class RemotePythonExecutor(PythonExecutor):
FINAL_ANSWER_EXCEPTION = "FinalAnswerException"
def __init__(self, additional_imports: list[str], logger):
self.additional_imports = additional_imports
self.logger = logger
self.logger.log("Initializing executor, hold on...")
self.installed_packages = []
def run_code_raise_errors(self, code: str) -> CodeOutput:
"""
Execute code, return the result and output, also determining if
the result is the final answer.
"""
raise NotImplementedError
def send_tools(self, tools: dict[str, Tool]):
if "final_answer" in tools:
self._patch_final_answer_with_exception(tools["final_answer"])
# Install tool packages
packages_to_install = {
pkg
for tool in tools.values()
for pkg in tool.to_dict()["requirements"]
if pkg not in self.installed_packages + ["smolagents"]
}
if packages_to_install:
self.installed_packages += self.install_packages(list(packages_to_install))
# Get tool definitions
code = get_tools_definition_code(tools)
if code:
code_output = self.run_code_raise_errors(code)
self.logger.log(code_output.logs)
def send_variables(self, variables: dict[str, Any]):
"""
Send variables to the kernel namespace using pickle.
"""
if not variables:
return
pickled_vars = base64.b64encode(pickle.dumps(variables)).decode()
code = f"""
import pickle, base64
vars_dict = pickle.loads(base64.b64decode('{pickled_vars}'))
locals().update(vars_dict)
"""
self.run_code_raise_errors(code)
def __call__(self, code_action: str) -> CodeOutput:
"""Run the code and determine if it is the final answer."""
return self.run_code_raise_errors(code_action)
def install_packages(self, additional_imports: list[str]):
if additional_imports:
code_output = self.run_code_raise_errors(f"!pip install {' '.join(additional_imports)}")
self.logger.log(code_output.logs)
return additional_imports
def _patch_final_answer_with_exception(self, final_answer_tool: FinalAnswerTool):
"""Patch the FinalAnswerTool to raise an exception.
This is necessary because the remote executors
rely on the FinalAnswerTool to detect the final answer.
It modifies the `forward` method of the FinalAnswerTool to raise
a `FinalAnswerException` with the final answer as a pickled value.
This allows the executor to catch this exception and return the final answer.
Args:
final_answer_tool (`FinalAnswerTool`): FinalAnswerTool instance to patch.
"""
# Create a new class that inherits from the original FinalAnswerTool
class _FinalAnswerTool(final_answer_tool.__class__):
pass
# Add a new forward method that raises the FinalAnswerException
# - Define the new forward method function
def forward(self, *args, **kwargs) -> Any:
import base64
import pickle
class FinalAnswerException(Exception):
def __init__(self, value):
self.value = value
raise FinalAnswerException(base64.b64encode(pickle.dumps(self._forward(*args, **kwargs))).decode())
# - Set the new forward method function to the _FinalAnswerTool class
_FinalAnswerTool.forward = forward
# Rename the original forward method to _forward
# - Get the original forward method function from the final_answer_tool instance
original_forward_function = final_answer_tool.forward.__func__
# - Set the new _forward method function to the _FinalAnswerTool class
_FinalAnswerTool._forward = original_forward_function
# - Update the source code of the new forward method to match the original but with the new name
_FinalAnswerTool._forward.__source__ = inspect.getsource(original_forward_function).replace(
"def forward(", "def _forward("
)
# Set the new class as the class of the final_answer_tool instance
final_answer_tool.__class__ = _FinalAnswerTool
class E2BExecutor(RemotePythonExecutor):
"""
Executes Python code using E2B.
Args:
additional_imports (`list[str]`): Additional imports to install.
logger (`Logger`): Logger to use.
**kwargs: Additional arguments to pass to the E2B Sandbox.
"""
def __init__(self, additional_imports: list[str], logger, **kwargs):
super().__init__(additional_imports, logger)
try:
from e2b_code_interpreter import Sandbox
except ModuleNotFoundError:
raise ModuleNotFoundError(
"""Please install 'e2b' extra to use E2BExecutor: `pip install 'smolagents[e2b]'`"""
)
self.sandbox = Sandbox(**kwargs)
self.installed_packages = self.install_packages(additional_imports)
self.logger.log("E2B is running", level=LogLevel.INFO)
def run_code_raise_errors(self, code: str) -> CodeOutput:
execution = self.sandbox.run_code(code)
execution_logs = "\n".join([str(log) for log in execution.logs.stdout])
# Handle errors
if execution.error:
# Check if the error is a FinalAnswerException
if execution.error.name == RemotePythonExecutor.FINAL_ANSWER_EXCEPTION:
final_answer = pickle.loads(base64.b64decode(execution.error.value))
return CodeOutput(output=final_answer, logs=execution_logs, is_final_answer=True)
# Construct error message
error_message = (
f"{execution_logs}\n"
f"Executing code yielded an error:\n"
f"{execution.error.name}\n"
f"{execution.error.value}\n"
f"{execution.error.traceback}"
)
raise AgentError(error_message, self.logger)
# Handle results
if not execution.results:
return CodeOutput(output=None, logs=execution_logs, is_final_answer=False)
for result in execution.results:
if not result.is_main_result:
continue
# Handle image outputs
for attribute_name in ["jpeg", "png"]:
img_data = getattr(result, attribute_name, None)
if img_data is not None:
decoded_bytes = base64.b64decode(img_data.encode("utf-8"))
return CodeOutput(
output=PIL.Image.open(BytesIO(decoded_bytes)), logs=execution_logs, is_final_answer=False
)
# Handle other data formats
for attribute_name in [
"chart",
"data",
"html",
"javascript",
"json",
"latex",
"markdown",
"pdf",
"svg",
"text",
]:
data = getattr(result, attribute_name, None)
if data is not None:
return CodeOutput(output=data, logs=execution_logs, is_final_answer=False)
# If no main result found, return None
return CodeOutput(output=None, logs=execution_logs, is_final_answer=False)
def cleanup(self):
"""Clean up the E2B sandbox and resources."""
try:
if hasattr(self, "sandbox"):
self.logger.log("Shutting down sandbox...", level=LogLevel.INFO)
self.sandbox.kill()
self.logger.log("Sandbox cleanup completed", level=LogLevel.INFO)
del self.sandbox
except Exception as e:
self.logger.log_error(f"Error during cleanup: {e}")
class DockerExecutor(RemotePythonExecutor):
"""
Executes Python code using Jupyter Kernel Gateway in a Docker container.
"""
def __init__(
self,
additional_imports: list[str],
logger,
host: str = "127.0.0.1",
port: int = 8888,
image_name: str = "jupyter-kernel",
build_new_image: bool = True,
container_run_kwargs: dict[str, Any] | None = None,
dockerfile_content: str | None = None,
):
"""
Initialize the Docker-based Jupyter Kernel Gateway executor.
Args:
additional_imports: Additional imports to install.
logger: Logger to use.
host: Host to bind to.
port: Port to bind to.
image_name: Name of the Docker image to use. If the image doesn't exist, it will be built.
build_new_image: If True, the image will be rebuilt even if it already exists.
container_run_kwargs: Additional keyword arguments to pass to the Docker container run command.
dockerfile_content: Custom Dockerfile content. If None, uses default.
"""
super().__init__(additional_imports, logger)
try:
import docker
from websocket import create_connection
except ModuleNotFoundError:
raise ModuleNotFoundError(
"Please install 'docker' extra to use DockerExecutor: `pip install 'smolagents[docker]'`"
)
self.host = host
self.port = port
self.image_name = image_name
self.dockerfile_content = dockerfile_content or dedent(
"""\
FROM python:3.12-bullseye
RUN pip install jupyter_kernel_gateway jupyter_client
EXPOSE 8888
CMD ["jupyter", "kernelgateway", "--KernelGatewayApp.ip='0.0.0.0'", "--KernelGatewayApp.port=8888", "--KernelGatewayApp.allow_origin='*'"]
"""
)
# Initialize Docker
try:
self.client = docker.from_env()
except docker.errors.DockerException as e:
raise RuntimeError("Could not connect to Docker daemon: make sure Docker is running.") from e
# Build and start container
try:
# Check if image exists, unless forced to rebuild
if not build_new_image:
try:
self.client.images.get(self.image_name)
self.logger.log(f"Using existing Docker image: {self.image_name}", level=LogLevel.INFO)
except docker.errors.ImageNotFound:
self.logger.log(f"Image {self.image_name} not found, building...", level=LogLevel.INFO)
build_new_image = True
if build_new_image:
self.logger.log(f"Building Docker image {self.image_name}...", level=LogLevel.INFO)
dockerfile_path = Path(__file__).parent / "Dockerfile"
if not dockerfile_path.exists():
with open(dockerfile_path, "w") as f:
f.write(self.dockerfile_content)
_, build_logs = self.client.images.build(
path=str(dockerfile_path.parent), dockerfile=str(dockerfile_path), tag=self.image_name
)
for log_chunk in build_logs:
# Only log non-empty messages
if log_message := log_chunk.get("stream", "").rstrip():
self.logger.log(log_message, level=LogLevel.DEBUG)
self.logger.log(f"Starting container on {host}:{port}...", level=LogLevel.INFO)
# Create base container parameters
container_kwargs = {}
if container_run_kwargs:
container_kwargs.update(container_run_kwargs)
# Ensure required port mapping and background running
if not isinstance(container_kwargs.get("ports"), dict):
container_kwargs["ports"] = {}
container_kwargs["ports"]["8888/tcp"] = (host, port)
container_kwargs["detach"] = True
self.container = self.client.containers.run(self.image_name, **container_kwargs)
retries = 0
while self.container.status != "running" and retries < 5:
self.logger.log(f"Container status: {self.container.status}, waiting...", level=LogLevel.INFO)
time.sleep(1)
self.container.reload()
retries += 1
self.base_url = f"http://{host}:{port}"
# Create new kernel via HTTP
r = requests.post(f"{self.base_url}/api/kernels")
if r.status_code != 201:
error_details = {
"status_code": r.status_code,
"headers": dict(r.headers),
"url": r.url,
"body": r.text,
"request_method": r.request.method,
"request_headers": dict(r.request.headers),
"request_body": r.request.body,
}
self.logger.log_error(f"Failed to create kernel. Details: {json.dumps(error_details, indent=2)}")
raise RuntimeError(f"Failed to create kernel: Status {r.status_code}\nResponse: {r.text}") from None
self.kernel_id = r.json()["id"]
ws_url = f"ws://{host}:{port}/api/kernels/{self.kernel_id}/channels"
self.ws = create_connection(ws_url)
self.installed_packages = self.install_packages(additional_imports)
self.logger.log(
f"Container {self.container.short_id} is running with kernel {self.kernel_id}", level=LogLevel.INFO
)
except Exception as e:
self.cleanup()
raise RuntimeError(f"Failed to initialize Jupyter kernel: {e}") from e
def run_code_raise_errors(self, code_action: str) -> CodeOutput:
try:
# Send execute request
msg_id = self._send_execute_request(code_action)
# Collect output and results
outputs = []
result = None
is_final_answer = False
while True:
msg = json.loads(self.ws.recv())
parent_msg_id = msg.get("parent_header", {}).get("msg_id")
# Skip unrelated messages
if parent_msg_id != msg_id:
continue
msg_type = msg.get("msg_type", "")
msg_content = msg.get("content", {})
if msg_type == "stream":
outputs.append(msg_content["text"])
elif msg_type == "execute_result":
result = msg_content["data"].get("text/plain", None)
elif msg_type == "error":
if msg_content.get("ename", "") == RemotePythonExecutor.FINAL_ANSWER_EXCEPTION:
result = pickle.loads(base64.b64decode(msg_content.get("evalue", "")))
is_final_answer = True
else:
raise AgentError("\n".join(msg_content.get("traceback", [])), self.logger)
elif msg_type == "status" and msg_content["execution_state"] == "idle":
break
return CodeOutput(output=result, logs="".join(outputs), is_final_answer=is_final_answer)
except Exception as e:
self.logger.log_error(f"Code execution failed: {e}")
raise
def _send_execute_request(self, code: str) -> str:
"""Send code execution request to kernel."""
import uuid
# Generate a unique message ID
msg_id = str(uuid.uuid4())
# Create execute request
execute_request = {
"header": {
"msg_id": msg_id,
"username": "anonymous",
"session": str(uuid.uuid4()),
"msg_type": "execute_request",
"version": "5.0",
},
"parent_header": {},
"metadata": {},
"content": {
"code": code,
"silent": False,
"store_history": True,
"user_expressions": {},
"allow_stdin": False,
},
}
self.ws.send(json.dumps(execute_request))
return msg_id
def cleanup(self):
"""Clean up the Docker container and resources."""
try:
if hasattr(self, "container"):
self.logger.log(f"Stopping and removing container {self.container.short_id}...", level=LogLevel.INFO)
self.container.stop()
self.container.remove()
self.logger.log("Container cleanup completed", level=LogLevel.INFO)
del self.container
except Exception as e:
self.logger.log_error(f"Error during cleanup: {e}")
def delete(self):
"""Ensure cleanup on deletion."""
self.cleanup()
class WasmExecutor(RemotePythonExecutor):
"""
Remote Python code executor in a sandboxed WebAssembly environment powered by Pyodide and Deno.
This executor combines Deno's secure runtime with Pyodide's WebAssembly‑compiled Python interpreter to deliver s
trong isolation guarantees while enabling full Python execution.
Args:
additional_imports (`list[str]`): Additional Python packages to install in the Pyodide environment.
logger (`Logger`): Logger to use for output and errors.
deno_path (`str`, optional): Path to the Deno executable. If not provided, will use "deno" from PATH.
deno_permissions (`list[str]`, optional): List of permissions to grant to the Deno runtime.
Default is minimal permissions needed for execution.
timeout (`int`, optional): Timeout in seconds for code execution. Default is 60 seconds.
"""
def __init__(
self,
additional_imports: list[str],
logger,
deno_path: str = "deno",
deno_permissions: list[str] | None = None,
timeout: int = 60,
):
super().__init__(additional_imports, logger)
# Check if Deno is installed
try:
subprocess.run([deno_path, "--version"], capture_output=True, check=True)
except (subprocess.SubprocessError, FileNotFoundError):
raise RuntimeError(
"Deno is not installed or not found in PATH. Please install Deno from https://deno.land/"
)
self.deno_path = deno_path
self.timeout = timeout
# Default minimal permissions needed
if deno_permissions is None:
# Use minimal permissions for Deno execution
home_dir = os.getenv("HOME")
deno_permissions = [
"allow-net="
+ ",".join(
[
"0.0.0.0:8000", # allow requests to the local server
"cdn.jsdelivr.net:443", # allow loading pyodide packages
"pypi.org:443,files.pythonhosted.org:443", # allow pyodide install packages from PyPI
]
),
f"allow-read={home_dir}/.cache/deno",
f"allow-write={home_dir}/.cache/deno",
]
self.deno_permissions = [f"--{perm}" for perm in deno_permissions]
# Create the Deno JavaScript runner file
self._create_deno_runner()
# Install additional packages
self.installed_packages = self.install_packages(additional_imports)
self.logger.log("WasmExecutor is running", level=LogLevel.INFO)
def _create_deno_runner(self):
"""Create the Deno JavaScript file that will run Pyodide and execute Python code."""
self.runner_dir = tempfile.mkdtemp(prefix="pyodide_deno_")
self.runner_path = os.path.join(self.runner_dir, "pyodide_runner.js")
# Create the JavaScript runner file
with open(self.runner_path, "w") as f:
f.write(self.JS_CODE)
# Start the Deno server
self._start_deno_server()
def _start_deno_server(self):
"""Start the Deno server that will run our JavaScript code."""
cmd = [self.deno_path, "run"] + self.deno_permissions + [self.runner_path]
# Start the server process
self.server_process = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
text=True,
)
# Wait for the server to start
time.sleep(2) # Give the server time to start
# Check if the server started successfully
if self.server_process.poll() is not None:
stderr = self.server_process.stderr.read()
raise RuntimeError(f"Failed to start Deno server: {stderr}")
self.server_url = "http://localhost:8000" # TODO: Another port?
# Test the connection
try:
response = requests.get(self.server_url)
if response.status_code != 200:
raise RuntimeError(f"Server responded with status code {response.status_code}: {response.text}")
except requests.RequestException as e:
raise RuntimeError(f"Failed to connect to Deno server: {e}")
def run_code_raise_errors(self, code: str) -> CodeOutput:
"""
Execute Python code in the Pyodide environment and return the result.
Args:
code (`str`): Python code to execute.
Returns:
`CodeOutput`: Code output containing the result, logs, and whether it is the final answer.
"""
try:
# Prepare the request payload
payload = {
"code": code,
"packages": self.installed_packages,
}
# Send the request to the Deno server
response = requests.post(self.server_url, json=payload, timeout=self.timeout)
if response.status_code != 200:
raise AgentError(f"Server error: {response.text}", self.logger)
result = None
is_final_answer = False
# Parse the response
result_data = response.json()
# Process the result
if result_data.get("result"):
result = result_data.get("result")
# Check for execution errors
elif result_data.get("error"):
error = result_data["error"]
if (
error.get("pythonExceptionType") == RemotePythonExecutor.FINAL_ANSWER_EXCEPTION
and "pythonExceptionValue" in error
):
result = pickle.loads(base64.b64decode(error["pythonExceptionValue"]))
is_final_answer = True
else:
error_message = f"{error.get('name', 'Error')}: {error.get('message', 'Unknown error')}"
if "stack" in error:
error_message += f"\n{error['stack']}"
raise AgentError(error_message, self.logger)
# Get the execution logs
execution_logs = result_data.get("stdout", "")
# Handle image results
if isinstance(result, dict) and result.get("type") == "image":
image_data = result.get("data", "")
decoded_bytes = base64.b64decode(image_data.encode("utf-8"))
return PIL.Image.open(BytesIO(decoded_bytes)), execution_logs
return CodeOutput(output=result, logs=execution_logs, is_final_answer=is_final_answer)
except requests.RequestException as e:
raise AgentError(f"Failed to communicate with Deno server: {e}", self.logger)
def install_packages(self, additional_imports: list[str]) -> list[str]:
"""
Install additional Python packages in the Pyodide environment.
Args:
additional_imports (`list[str]`): Package names to install.
Returns:
list[str]: Installed packages.
"""
# In Pyodide, we don't actually install packages here, but we keep track of them
# to load them when executing code
# TODO: Install here instead?
self.logger.log(f"Adding packages to load: {', '.join(additional_imports)}", level=LogLevel.INFO)
return additional_imports
def cleanup(self):
"""Clean up resources used by the executor."""
if hasattr(self, "server_process") and self.server_process:
self.logger.log("Stopping Deno server...", level=LogLevel.INFO)
self.server_process.terminate()
try:
self.server_process.wait(timeout=5)
except subprocess.TimeoutExpired:
self.server_process.kill()
# Remove the temporary directory
if hasattr(self, "runner_dir") and os.path.exists(self.runner_dir):
import shutil
shutil.rmtree(self.runner_dir)
def delete(self):
"""Ensure cleanup on deletion."""
self.cleanup()
JS_CODE = dedent("""\
// pyodide_runner.js - Runs Python code in Pyodide within Deno
import { serve } from "https://deno.land/std/http/server.ts";
import { loadPyodide } from "npm:pyodide";
// Initialize Pyodide instance
const pyodidePromise = loadPyodide();
// Function to execute Python code and return the result
async function executePythonCode(code) {
const pyodide = await pyodidePromise;
// Create a capture for stdout
pyodide.runPython(`
import sys
import io
sys.stdout = io.StringIO()
`);
// Execute the code and capture any errors
let result = null;
let error = null;
let stdout = "";
try {
// Execute the code
result = await pyodide.runPythonAsync(code);
// Get captured stdout
stdout = pyodide.runPython("sys.stdout.getvalue()");
} catch (e) {
error = {
name: e.constructor.name,
message: e.message,
stack: e.stack
};
// Extract Python exception details
if (e.constructor.name === "PythonError") {
// Get the Python exception type from the error message: at the end of the traceback
const errorMatch = e.message.match(/\\n([^:]+Exception): /);
if (errorMatch) {
error.pythonExceptionType = errorMatch[1].split(".").pop();
}
// If the error is a FinalAnswerException, extract its the encoded value
if (error.pythonExceptionType === "FinalAnswerException") {
// Extract the base64 encoded value from the error message
const valueMatch = e.message.match(/FinalAnswerException: (.*?)(?:\\n|$)/);
if (valueMatch) {
error.pythonExceptionValue = valueMatch[1];
}
}
}
}
return {
result,
stdout,
error
};
}
// Start a simple HTTP server to receive code execution requests
//const port = 8765;
//console.log(`Starting Pyodide server on port ${port}`);
serve(async (req) => {
if (req.method === "POST") {
try {
const body = await req.json();
const { code, packages = [] } = body;
// Load any requested packages
if (packages && packages.length > 0) {
const pyodide = await pyodidePromise;
//await pyodide.loadPackagesFromImports(code);
await pyodide.loadPackage("micropip");
const micropip = pyodide.pyimport("micropip");
try {
await micropip.install(packages);
} catch (e) {
console.error(`Failed to load package ${pkg}: ${e.message}`);
}
}
const result = await executePythonCode(code);
return new Response(JSON.stringify(result), {
headers: { "Content-Type": "application/json" }
});
} catch (e) {
return new Response(JSON.stringify({ error: e.message }), {
status: 500,
headers: { "Content-Type": "application/json" }
});
}
}
return new Response("Pyodide-Deno Executor is running. Send POST requests with code to execute.", {
headers: { "Content-Type": "text/plain" }
});
});
""")
| smolagents/src/smolagents/remote_executors.py/0 | {
"file_path": "smolagents/src/smolagents/remote_executors.py",
"repo_id": "smolagents",
"token_count": 13653
} | 261 |
# coding=utf-8
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
import tempfile
import unittest
from unittest.mock import Mock, patch
import pytest
from smolagents.agent_types import AgentAudio, AgentImage, AgentText
from smolagents.gradio_ui import GradioUI, pull_messages_from_step, stream_to_gradio
from smolagents.memory import ActionStep, FinalAnswerStep, PlanningStep, ToolCall
from smolagents.models import ChatMessageStreamDelta
from smolagents.monitoring import Timing, TokenUsage
class GradioUITester(unittest.TestCase):
def setUp(self):
"""Initialize test environment"""
self.temp_dir = tempfile.mkdtemp()
self.mock_agent = Mock()
self.ui = GradioUI(agent=self.mock_agent, file_upload_folder=self.temp_dir)
self.allowed_types = [".pdf", ".docx", ".txt"]
def tearDown(self):
"""Clean up test environment"""
shutil.rmtree(self.temp_dir)
def test_upload_file_default_types(self):
"""Test default allowed file types"""
default_types = [".pdf", ".docx", ".txt"]
for file_type in default_types:
with tempfile.NamedTemporaryFile(suffix=file_type) as temp_file:
mock_file = Mock()
mock_file.name = temp_file.name
textbox, uploads_log = self.ui.upload_file(mock_file, [])
self.assertIn("File uploaded:", textbox.value)
self.assertEqual(len(uploads_log), 1)
self.assertTrue(os.path.exists(os.path.join(self.temp_dir, os.path.basename(temp_file.name))))
def test_upload_file_default_types_disallowed(self):
"""Test default disallowed file types"""
disallowed_types = [".exe", ".sh", ".py", ".jpg"]
for file_type in disallowed_types:
with tempfile.NamedTemporaryFile(suffix=file_type) as temp_file:
mock_file = Mock()
mock_file.name = temp_file.name
textbox, uploads_log = self.ui.upload_file(mock_file, [])
self.assertEqual(textbox.value, "File type disallowed")
self.assertEqual(len(uploads_log), 0)
def test_upload_file_success(self):
"""Test successful file upload scenario"""
with tempfile.NamedTemporaryFile(suffix=".txt") as temp_file:
mock_file = Mock()
mock_file.name = temp_file.name
textbox, uploads_log = self.ui.upload_file(mock_file, [])
self.assertIn("File uploaded:", textbox.value)
self.assertEqual(len(uploads_log), 1)
self.assertTrue(os.path.exists(os.path.join(self.temp_dir, os.path.basename(temp_file.name))))
self.assertEqual(uploads_log[0], os.path.join(self.temp_dir, os.path.basename(temp_file.name)))
def test_upload_file_none(self):
"""Test scenario when no file is selected"""
textbox, uploads_log = self.ui.upload_file(None, [])
self.assertEqual(textbox.value, "No file uploaded")
self.assertEqual(len(uploads_log), 0)
def test_upload_file_invalid_type(self):
"""Test disallowed file type"""
with tempfile.NamedTemporaryFile(suffix=".exe") as temp_file:
mock_file = Mock()
mock_file.name = temp_file.name
textbox, uploads_log = self.ui.upload_file(mock_file, [])
self.assertEqual(textbox.value, "File type disallowed")
self.assertEqual(len(uploads_log), 0)
def test_upload_file_special_chars(self):
"""Test scenario with special characters in filename"""
with tempfile.NamedTemporaryFile(suffix=".txt") as temp_file:
# Create a new temporary file with special characters
special_char_name = os.path.join(os.path.dirname(temp_file.name), "test@#$%^&*.txt")
shutil.copy(temp_file.name, special_char_name)
try:
mock_file = Mock()
mock_file.name = special_char_name
with patch("shutil.copy"):
textbox, uploads_log = self.ui.upload_file(mock_file, [])
self.assertIn("File uploaded:", textbox.value)
self.assertEqual(len(uploads_log), 1)
self.assertIn("test_____", uploads_log[0])
finally:
# Clean up the special character file
if os.path.exists(special_char_name):
os.remove(special_char_name)
def test_upload_file_custom_types(self):
"""Test custom allowed file types"""
with tempfile.NamedTemporaryFile(suffix=".csv") as temp_file:
mock_file = Mock()
mock_file.name = temp_file.name
textbox, uploads_log = self.ui.upload_file(mock_file, [], allowed_file_types=[".csv"])
self.assertIn("File uploaded:", textbox.value)
self.assertEqual(len(uploads_log), 1)
class TestStreamToGradio:
"""Tests for the stream_to_gradio function."""
@patch("smolagents.gradio_ui.pull_messages_from_step")
def test_stream_to_gradio_memory_step(self, mock_pull_messages):
"""Test streaming a memory step"""
# Create mock agent and memory step
mock_agent = Mock()
mock_agent.run = Mock(return_value=[Mock(spec=ActionStep)])
mock_agent.model = Mock()
# Mock the pull_messages_from_step function to return some messages
mock_message = Mock()
mock_pull_messages.return_value = [mock_message]
# Call stream_to_gradio
result = list(stream_to_gradio(mock_agent, "test task"))
# Verify that pull_messages_from_step was called and the message was yielded
mock_pull_messages.assert_called_once()
assert result == [mock_message]
def test_stream_to_gradio_stream_delta(self):
"""Test streaming a ChatMessageStreamDelta"""
# Create mock agent and stream delta
mock_agent = Mock()
mock_delta = ChatMessageStreamDelta(content="Hello")
mock_agent.run = Mock(return_value=[mock_delta])
mock_agent.model = Mock()
# Call stream_to_gradio
result = list(stream_to_gradio(mock_agent, "test task"))
# Verify that the content was yielded
assert result == ["Hello"]
def test_stream_to_gradio_multiple_deltas(self):
"""Test streaming multiple ChatMessageStreamDeltas"""
# Create mock agent and stream deltas
mock_agent = Mock()
mock_delta1 = ChatMessageStreamDelta(content="Hello")
mock_delta2 = ChatMessageStreamDelta(content=" world")
mock_agent.run = Mock(return_value=[mock_delta1, mock_delta2])
mock_agent.model = Mock()
# Call stream_to_gradio
result = list(stream_to_gradio(mock_agent, "test task"))
# Verify that the content was accumulated and yielded
assert result == ["Hello", "Hello world"]
@pytest.mark.parametrize(
"task,task_images,reset_memory,additional_args",
[
("simple task", None, False, None),
("task with images", ["image1.png", "image2.png"], False, None),
("task with reset", None, True, None),
("task with args", None, False, {"arg1": "value1"}),
("complex task", ["image.png"], True, {"arg1": "value1", "arg2": "value2"}),
],
)
def test_stream_to_gradio_parameters(self, task, task_images, reset_memory, additional_args):
"""Test that stream_to_gradio passes parameters correctly to agent.run"""
# Create mock agent
mock_agent = Mock()
mock_agent.run = Mock(return_value=[])
# Call stream_to_gradio
list(
stream_to_gradio(
mock_agent,
task=task,
task_images=task_images,
reset_agent_memory=reset_memory,
additional_args=additional_args,
)
)
# Verify that agent.run was called with the right parameters
mock_agent.run.assert_called_once_with(
task, images=task_images, stream=True, reset=reset_memory, additional_args=additional_args
)
class TestPullMessagesFromStep:
def test_action_step_basic(
self,
):
"""Test basic ActionStep processing."""
step = ActionStep(
step_number=1,
model_output="This is the model output",
observations="Some execution logs",
error=None,
timing=Timing(start_time=1.0, end_time=3.5),
token_usage=TokenUsage(input_tokens=100, output_tokens=50),
)
messages = list(pull_messages_from_step(step))
assert len(messages) == 5 # step number, model_output, logs, footnote, divider
for message, expected_content in zip(
messages,
[
"**Step 1**",
"This is the model output",
"execution logs",
"Input tokens: 100 | Output tokens: 50 | Duration: 2.5",
"-----",
],
):
assert expected_content in message.content
def test_action_step_with_tool_calls(self):
"""Test ActionStep with tool calls."""
step = ActionStep(
step_number=2,
tool_calls=[ToolCall(name="test_tool", arguments={"answer": "Test answer"}, id="tool_call_1")],
observations="Tool execution logs",
timing=Timing(start_time=1.0, end_time=2.5),
token_usage=TokenUsage(input_tokens=100, output_tokens=50),
)
messages = list(pull_messages_from_step(step))
assert len(messages) == 5 # step, tool call, logs, footnote, divider
assert messages[1].content == "Test answer"
assert "Used tool test_tool" in messages[1].metadata["title"]
@pytest.mark.parametrize(
"tool_name, args, expected",
[
("python_interpreter", "print('Hello')", "```python\nprint('Hello')\n```"),
("regular_tool", {"key": "value"}, "{'key': 'value'}"),
("string_args_tool", "simple string", "simple string"),
],
)
def test_action_step_tool_call_formats(self, tool_name, args, expected):
"""Test different formats of tool calls."""
tool_call = Mock()
tool_call.name = tool_name
tool_call.arguments = args
step = ActionStep(
step_number=1,
tool_calls=[tool_call],
timing=Timing(start_time=1.0, end_time=2.5),
token_usage=TokenUsage(input_tokens=100, output_tokens=50),
)
messages = list(pull_messages_from_step(step))
tool_message = next(
msg
for msg in messages
if msg.role == "assistant" and msg.metadata and msg.metadata.get("title", "").startswith("🛠️")
)
assert expected in tool_message.content
def test_action_step_with_error(self):
"""Test ActionStep with error."""
step = ActionStep(
step_number=3,
error="This is an error message",
timing=Timing(start_time=1.0, end_time=2.0),
token_usage=TokenUsage(input_tokens=100, output_tokens=200),
)
messages = list(pull_messages_from_step(step))
error_message = next((m for m in messages if "error" in str(m.content).lower()), None)
assert error_message is not None
assert "This is an error message" in error_message.content
def test_action_step_with_images(self):
"""Test ActionStep with observation images."""
step = ActionStep(
step_number=4,
observations_images=["image1.png", "image2.jpg"],
token_usage=TokenUsage(input_tokens=100, output_tokens=200),
timing=Timing(start_time=1.0, end_time=2.0),
)
with patch("smolagents.gradio_ui.AgentImage") as mock_agent_image:
mock_agent_image.return_value.to_string.side_effect = lambda: "path/to/image.png"
messages = list(pull_messages_from_step(step))
image_messages = [m for m in messages if "image" in str(m).lower()]
assert len(image_messages) == 2
assert "path/to/image.png" in str(image_messages[0])
@pytest.mark.parametrize(
"skip_model_outputs, expected_messages_length, token_usage",
[(False, 4, TokenUsage(input_tokens=80, output_tokens=30)), (True, 2, None)],
)
def test_planning_step(self, skip_model_outputs, expected_messages_length, token_usage):
"""Test PlanningStep processing."""
step = PlanningStep(
plan="1. First step\n2. Second step",
model_input_messages=Mock(),
model_output_message=Mock(),
token_usage=token_usage,
timing=Timing(start_time=1.0, end_time=2.0),
)
messages = list(pull_messages_from_step(step, skip_model_outputs=skip_model_outputs))
assert len(messages) == expected_messages_length # [header, plan,] footnote, divider
expected_contents = [
"**Planning step**",
"1. First step\n2. Second step",
"Input tokens: 80 | Output tokens: 30" if token_usage else "",
"-----",
]
for message, expected_content in zip(messages, expected_contents[-expected_messages_length:]):
assert expected_content in message.content
if not token_usage:
assert "Input tokens: 80 | Output tokens: 30" not in message.content
@pytest.mark.parametrize(
"answer_type, answer_value, expected_content",
[
(AgentText, "This is a text answer", "**Final answer:**\nThis is a text answer\n"),
(lambda: "Plain string", "Plain string", "**Final answer:** Plain string"),
],
)
def test_final_answer_step(self, answer_type, answer_value, expected_content):
"""Test FinalAnswerStep with different answer types."""
try:
final_answer = answer_type()
except TypeError:
with patch.object(answer_type, "to_string", return_value=answer_value):
final_answer = answer_type(answer_value)
step = FinalAnswerStep(
output=final_answer,
)
messages = list(pull_messages_from_step(step))
assert len(messages) == 1
assert messages[0].content == expected_content
def test_final_answer_step_image(self):
"""Test FinalAnswerStep with image answer."""
with patch.object(AgentImage, "to_string", return_value="path/to/image.png"):
step = FinalAnswerStep(output=AgentImage("path/to/image.png"))
messages = list(pull_messages_from_step(step))
assert len(messages) == 1
assert messages[0].content["path"] == "path/to/image.png"
assert messages[0].content["mime_type"] == "image/png"
def test_final_answer_step_audio(self):
"""Test FinalAnswerStep with audio answer."""
with patch.object(AgentAudio, "to_string", return_value="path/to/audio.wav"):
step = FinalAnswerStep(output=AgentAudio("path/to/audio.wav"))
messages = list(pull_messages_from_step(step))
assert len(messages) == 1
assert messages[0].content["path"] == "path/to/audio.wav"
assert messages[0].content["mime_type"] == "audio/wav"
def test_unsupported_step_type(self):
"""Test handling of unsupported step types."""
class UnsupportedStep(Mock):
pass
step = UnsupportedStep()
with pytest.raises(ValueError, match="Unsupported step type"):
list(pull_messages_from_step(step))
| smolagents/tests/test_gradio_ui.py/0 | {
"file_path": "smolagents/tests/test_gradio_ui.py",
"repo_id": "smolagents",
"token_count": 7075
} | 262 |
install-server:
cd server && make install
install-server-cpu:
cd server && make install-server
install-router:
cargo install --path backends/v3/
install-launcher:
cargo install --path launcher/
install-benchmark:
cargo install --path benchmark/
install: install-server install-router install-launcher
install-cpu: install-server-cpu install-router install-launcher
server-dev:
cd server && make run-dev
router-dev:
cd router && cargo run -- --port 8080
rust-tests: install-router install-launcher
cargo test
install-integration-tests:
cd integration-tests && pip install -r requirements.txt
cd clients/python && pip install .
integration-tests: install-integration-tests
pytest -s -vv -m "not private" integration-tests
update-integration-tests: install-integration-tests
pytest -s -vv --snapshot-update integration-tests
python-server-tests:
HF_HUB_ENABLE_HF_TRANSFER=1 pytest -s -vv -m "not private" server/tests
python-client-tests:
pytest clients/python/tests
python-tests: python-server-tests python-client-tests
run-falcon-7b-instruct:
text-generation-launcher --model-id tiiuae/falcon-7b-instruct --port 8080
run-falcon-7b-instruct-quantize:
text-generation-launcher --model-id tiiuae/falcon-7b-instruct --quantize bitsandbytes --port 8080
clean:
rm -rf target aml
preview_doc:
doc-builder preview text-generation-inference docs/source --not_python_module
| text-generation-inference/Makefile/0 | {
"file_path": "text-generation-inference/Makefile",
"repo_id": "text-generation-inference",
"token_count": 468
} | 263 |
# Text-generation-inference - Gaudi backend
## Description
This is the TGI backend for Intel Gaudi. This backend is composed of the tgi server optimized for Gaudi hardware.
## Build your own image
The simplest way to build TGI with the Gaudi backend is to use the provided `Makefile`:
Option 1: From the project root directory:
```bash
make -C backends/gaudi image
```
Option 2: From the Gaudi backend directory:
```bash
cd backends/gaudi
make image
```
You can now run the server with the following command:
Option 1: Sharded:
```bash
model=meta-llama/Llama-3.1-8B-Instruct
hf_token=$(cat ${HOME}/.cache/huggingface/token)
volume=${HOME}/.cache/huggingface
docker run --runtime=habana --ipc=host --cap-add=sys_nice \
-p 8080:80 -v $volume:/data \
-e LOG_LEVEL=debug -e HF_TOKEN=$hf_token \
tgi-gaudi --model-id $model \
--sharded true --num-shard 8 \
--max-input-tokens 512 --max-total-tokens 1024 --max-batch-size 8 --max-batch-prefill-tokens 2048
```
Option 2: Non-sharded:
```bash
model=meta-llama/Llama-3.1-8B-Instruct
hf_token=$(cat ${HOME}/.cache/huggingface/token)
volume=${HOME}/.cache/huggingface
docker run --runtime=habana --ipc=host --cap-add=sys_nice \
-p 8080:80 -v $volume:/data \
-e LOG_LEVEL=debug -e HF_TOKEN=$hf_token \
tgi-gaudi --model-id $model \
--max-input-tokens 512 --max-total-tokens 1024 --max-batch-size 4 --max-batch-prefill-tokens 2048
```
## Contributing
### Local Development
This is useful if you want to run the server locally for better debugging.
```bash
make -C backends/gaudi run-local-dev-container
```
Then run the following command inside the container to install tgi for gaudi:
```bash
make -C backends/gaudi local-dev-install
```
Add rust to path:
```bash
. "$HOME/.cargo/env"
```
Option 1: Run the server (sharded model):
```bash
LOG_LEVEL=debug text-generation-launcher \
--model-id meta-llama/Llama-3.1-8B-Instruct \
--sharded true \
--num-shard 8 \
--max-input-tokens 512 \
--max-total-tokens 1024 \
--max-batch-size 8 \
--max-batch-prefill-tokens 2048
```
Option 2: Run the server (non-sharded model):
```bash
LOG_LEVEL=debug text-generation-launcher \
--model-id meta-llama/Llama-3.1-8B-Instruct \
--max-input-tokens 512 \
--max-total-tokens 1024 \
--max-batch-size 4 \
--max-batch-prefill-tokens 2048
```
You can then test the server with the following curl command from another terminal (can be outside the container):
```bash
curl 127.0.0.1:8080/generate \
-X POST \
-d '{"inputs":"What is Deep Learning?","parameters":{"max_new_tokens":20}}' \
-H 'Content-Type: application/json'
```
### Integration tests
Install the dependencies:
```bash
pip install -r integration-tests/requirements.txt
```
To run the integration tests, you need to first build the image:
```bash
make -C backends/gaudi image
```
Then run the following command to run the integration tests (CI tests):
```bash
make -C backends/gaudi run-integration-tests
```
To run the integration tests with all models, you can run the following command:
```bash
make -C backends/gaudi run-integration-tests-with-all-models
```
To capture the expected outputs for the integration tests, you can run the following command:
```bash
make -C backends/gaudi capture-expected-outputs-for-integration-tests
```
#### How the integration tests works
The integration tests works as follows:
1. Start a tgi server in a container, similar to the command:
```bash
docker run --runtime=habana --ipc=host --cap-add=sys_nice \
-p 8080:80 -v $volume:/data \
-e LOG_LEVEL=debug -e HF_TOKEN=$hf_token \
tgi-gaudi --model-id $model \
--max-input-tokens 512 --max-total-tokens 1024 --max-batch-size 4 --max-batch-prefill-tokens 2048
```
2. Do a /generate request to the server, similar to the command:
```bash
curl 127.0.0.1:8080/generate \
-X POST \
-d '{"inputs":"What is Deep Learning?","parameters":{"max_new_tokens":20}}' \
-H 'Content-Type: application/json'
```
3. Check the output of the server against the expected output:
```python
assert curl_output == expected_output
```
This is the repeated for a set of models and configurations.
| text-generation-inference/backends/gaudi/README.md/0 | {
"file_path": "text-generation-inference/backends/gaudi/README.md",
"repo_id": "text-generation-inference",
"token_count": 1492
} | 264 |
from typing import Optional
import torch
import torch.nn as nn
try:
import habana_frameworks.torch.hpu # noqa: F401
convert_from_uint4 = torch.ops.hpu.convert_from_uint4
except Exception as e:
hpu_import_exception = e
def error_raiser_hpu(*args, **kwargs):
raise ValueError(
f"Trying to use HPU, but could not import the HPU framework with the following error: {hpu_import_exception}"
)
convert_from_uint4 = error_raiser_hpu
AWQ_REVERSE_ORDER = [0, 4, 1, 5, 2, 6, 3, 7]
def unpack_awq(qweight: torch.Tensor, qzeros: torch.Tensor, bits: int):
shifts = torch.arange(0, 32, bits, device=qzeros.device)
# unpacking columnwise
iweights = torch.bitwise_right_shift(qweight[:, :, None], shifts[None, None, :]).to(
torch.int8 # smallest dtype available
)
iweights = iweights.view(iweights.shape[0], -1)
# unpacking columnwise
if qzeros is not None:
izeros = torch.bitwise_right_shift(
qzeros[:, :, None], shifts[None, None, :]
).to(
torch.int8 # smallest dtype available
)
izeros = izeros.view(izeros.shape[0], -1)
else:
izeros = qzeros
return iweights, izeros
def reverse_awq_order(iweights: torch.Tensor, izeros: torch.Tensor, bits: int):
reverse_order_tensor = torch.arange(
iweights.shape[-1],
dtype=torch.int32,
device=izeros.device,
)
reverse_order_tensor = reverse_order_tensor.view(-1, 32 // bits)
reverse_order_tensor = reverse_order_tensor[:, AWQ_REVERSE_ORDER]
reverse_order_tensor = reverse_order_tensor.view(-1)
if izeros is not None:
izeros = izeros[:, reverse_order_tensor]
iweights = iweights[:, reverse_order_tensor]
return iweights, izeros
def unpack_weight_and_zeros(qweight, qzeros, bits):
# Unpack the qweight and qzeros tensors
iweight, izeros = unpack_awq(qweight, qzeros, bits)
# Reverse the order of the iweight and izeros tensors
iweight, izeros = reverse_awq_order(iweight, izeros, bits)
# overflow checks
iweight = torch.bitwise_and(iweight, (2**bits) - 1)
izeros = torch.bitwise_and(izeros, (2**bits) - 1)
return iweight, izeros
def pack_tensor(input, bits=4):
normal = input.to(torch.int32)
q = torch.zeros(
(normal.shape[0], normal.shape[1] // 32 * bits),
dtype=torch.int32,
device=input.device,
)
i = 0
col = 0
while col < q.shape[1]:
for j in range(i, i + (32 // bits)):
q[:, col] |= normal[:, j] << (bits * (j - i))
i += 32 // bits
col += 1
q = q.to(torch.int32)
return q
class WQLinear(nn.Module):
def __init__(
self, w_bit, group_size, qweight, qzeros, scales, bias: Optional[torch.Tensor]
):
super().__init__()
if w_bit not in [4]:
raise NotImplementedError("Only 4-bit are supported for now.")
self.in_features = qweight.shape[0]
self.out_features = qweight.shape[1] * 32 // w_bit
self.w_bit = w_bit
self.group_size = group_size if group_size != -1 else self.in_features
# quick sanity check (make sure aligment)
assert self.in_features % self.group_size == 0
assert self.out_features % (32 // self.w_bit) == 0
self.qweight = qweight
self.qzeros = qzeros
self.scales = scales
self.bias = bias
self._preprocessing()
def _preprocessing(self):
device = self.qweight.device
weight, zeros = unpack_weight_and_zeros(
self.qweight.cpu(), self.qzeros.cpu(), self.w_bit
)
self.qweight = pack_tensor(weight).to(device)
self.qzeros = pack_tensor(zeros).to(device)
@torch.no_grad()
def forward(self, x):
out_shape = x.shape[:-1] + (self.out_features,)
x = x.reshape(-1, x.shape[-1])
weights = convert_from_uint4(self.qweight, self.scales, self.qzeros, x.dtype)
outputs = torch.matmul(x, weights)
outputs = outputs + self.bias if self.bias is not None else outputs
outputs = outputs.reshape(out_shape)
return outputs
| text-generation-inference/backends/gaudi/server/text_generation_server/layers/awq/quantize/hpu.py/0 | {
"file_path": "text-generation-inference/backends/gaudi/server/text_generation_server/layers/awq/quantize/hpu.py",
"repo_id": "text-generation-inference",
"token_count": 1871
} | 265 |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional, Tuple, List
import torch
from torch import nn
import habana_frameworks.torch as htorch
from text_generation_server.layers.attention import (
paged_attention,
attention,
set_block_mapping,
Seqlen,
HPUPagedAttentionMetadata,
)
from text_generation_server.layers.attention.kv_cache import get_kv_scales
from text_generation_server.layers import (
TensorParallelEmbedding,
TensorParallelRowLinear,
TensorParallelColumnLinear,
SpeculativeHead,
)
from text_generation_server.layers.layernorm import (
FastRMSNorm,
)
from .flash_qwen2_modeling import Qwen2MLP
from text_generation_server.layers.rotary import PositionRotaryEmbedding
class Qwen3Attention(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(self, config, prefix, weights, layer_idx, rotary_emb):
super().__init__()
self.config = config
self.layer_idx = layer_idx
self.head_dim = getattr(
config, "head_dim", config.hidden_size // config.num_attention_heads
)
self.num_key_value_groups = (
config.num_attention_heads // config.num_key_value_heads
)
self.num_heads = config.num_attention_heads
self.attention_dropout = config.attention_dropout
self.softmax_scale = self.head_dim**-0.5
self.rotary_emb = rotary_emb
if self.num_heads % weights.process_group.size() != 0:
raise ValueError(
f"`num_heads` must be divisible by `num_shards` (got `num_heads`: {self.num_heads} "
f"and `num_shards`: {weights.process_group.size()}"
)
self.num_heads = self.num_heads // weights.process_group.size()
self.num_key_value_heads = (
config.num_key_value_heads // weights.process_group.size()
)
self.query_key_value = TensorParallelColumnLinear.load_multi(
config,
prefixes=[f"{prefix}.q_proj", f"{prefix}.k_proj", f"{prefix}.v_proj"],
dim=0,
weights=weights,
bias=False,
)
self.kv_scales = get_kv_scales(weights, f"{prefix}")
self.o_proj = TensorParallelRowLinear.load(
config,
prefix=f"{prefix}.o_proj",
weights=weights,
bias=False,
)
self.num_groups = self.num_heads // self.num_key_value_heads
self.kv_head_mapping = torch.arange(
0, self.num_key_value_heads, dtype=torch.int32, device=weights.device
).repeat_interleave(self.num_groups)
self.max_past = (
config.sliding_window if config.sliding_window is not None else -1
)
self.q_norm = FastRMSNorm.load(
prefix=f"{prefix}.q_norm",
weights=weights,
eps=config.rms_norm_eps,
)
self.k_norm = FastRMSNorm.load(
prefix=f"{prefix}.k_norm",
weights=weights,
eps=config.rms_norm_eps,
)
self.sliding_window = config.sliding_window
if not (
self.config.use_sliding_window
and getattr(self.config, "sliding_window", None) is not None
and self.layer_idx >= self.config.max_window_layers
):
self.sliding_window = None
def forward(
self,
hidden_states,
cos,
sin,
cu_seqlen_prefill,
kv_cache,
slots,
seqlen,
hpu_attention_meta,
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
input_shape = hidden_states.shape[:-1]
hidden_shape = (*input_shape, -1, self.head_dim)
qkv = self.query_key_value(hidden_states)
query_states, key_states, value_states = qkv.split(
[
self.head_dim * self.num_heads,
self.head_dim * self.num_key_value_heads,
self.head_dim * self.num_key_value_heads,
],
dim=1,
)
query_states, _ = self.q_norm(query_states.view(hidden_shape))
key_states, _ = self.k_norm(key_states.view(hidden_shape))
value_states = value_states.view(hidden_shape)
self.rotary_emb(query_states, key_states, cos, sin)
kv_cache.store(
key=key_states,
value=value_states,
slots=slots,
kv_scales=self.kv_scales,
)
# Prefill
if cu_seqlen_prefill is not None:
# sdpa
attn_output = attention(
query=query_states,
key=key_states,
value=value_states,
kv_cache=kv_cache,
kv_scales=self.kv_scales,
seqlen=seqlen,
softmax_scale=self.softmax_scale,
window_size_left=self.max_past,
)
# Decode
else:
attn_output = paged_attention(
query_states,
kv_cache,
self.kv_head_mapping,
self.softmax_scale,
seqlen,
kv_scales=self.kv_scales,
hpu_attention_meta=hpu_attention_meta,
window_size_left=self.max_past,
)
attn_output = attn_output.reshape(*input_shape, -1).contiguous()
return self.o_proj(attn_output)
class Qwen3DecoderLayer(nn.Module):
def __init__(self, config, prefix, weights, layer_idx: int, rotary_emb):
super().__init__()
self.hidden_size = config.hidden_size
self.self_attn = Qwen3Attention(
config=config,
prefix=f"{prefix}.self_attn",
weights=weights,
layer_idx=layer_idx,
rotary_emb=rotary_emb,
)
self.mlp = Qwen2MLP(config=config, prefix=f"{prefix}.mlp", weights=weights)
self.input_layernorm = FastRMSNorm.load(
prefix=f"{prefix}.input_layernorm", weights=weights, eps=config.rms_norm_eps
)
self.post_attention_layernorm = FastRMSNorm.load(
prefix=f"{prefix}.post_attention_layernorm",
weights=weights,
eps=config.rms_norm_eps,
)
def forward(
self,
hidden_states,
residual,
cos,
sin,
cu_seqlen_prefill,
kv_cache,
slots,
seqlen,
hpu_attention_meta,
) -> torch.Tensor:
residual = hidden_states
hidden_states, _ = self.input_layernorm(hidden_states)
# Self Attention
hidden_states = self.self_attn(
hidden_states,
cos,
sin,
cu_seqlen_prefill,
kv_cache,
slots,
seqlen,
hpu_attention_meta,
)
hidden_states = residual + hidden_states
# Fully Connected
residual = hidden_states
hidden_states, _ = self.post_attention_layernorm(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = residual + hidden_states
return hidden_states
class Qwen3Model(nn.Module):
def __init__(self, config, prefix: str, weights):
super().__init__()
self.config = config
self.padding_idx = config.pad_token_id
self.vocab_size = config.vocab_size
head_dim = getattr(
config, "head_dim", config.hidden_size // config.num_attention_heads
)
rotary_emb = PositionRotaryEmbedding.static(
config=config,
dim=head_dim,
base=config.rope_theta,
device=weights.device,
)
self.layers = nn.ModuleList(
[
Qwen3DecoderLayer(
config=config,
prefix=f"{prefix}.layers.{layer_idx}",
weights=weights,
layer_idx=layer_idx,
rotary_emb=rotary_emb,
)
for layer_idx in range(config.num_hidden_layers)
]
)
self.norm = FastRMSNorm.load(
prefix=f"{prefix}.norm", weights=weights, eps=config.rms_norm_eps
)
def forward(
self,
inputs_embeds: torch.Tensor,
position_ids: torch.Tensor,
cu_seqlen_prefill: Optional[torch.Tensor],
kv_cache: List[Tuple[torch.Tensor, torch.Tensor]],
slots: torch.Tensor,
seqlen: Seqlen,
hpu_attention_meta: Optional[HPUPagedAttentionMetadata],
) -> torch.Tensor:
if hpu_attention_meta is not None:
hpu_attention_meta = set_block_mapping(
hpu_attention_meta, inputs_embeds.shape[0]
)
hidden_states = inputs_embeds
# create position embeddings to be shared across the decoder layers
cos, sin = self.layers[0].self_attn.rotary_emb.get_cos_sin(
position_ids,
)
residual = None
lazy_mode = htorch.utils.internal.is_lazy()
if lazy_mode:
htorch.core.mark_step()
for i, decoder_layer in enumerate(self.layers):
hidden_states = decoder_layer(
hidden_states,
residual,
cos,
sin,
cu_seqlen_prefill,
kv_cache[i],
slots,
seqlen,
hpu_attention_meta,
)
if lazy_mode:
htorch.core.mark_step()
hidden_states, _ = self.norm(hidden_states)
# add hidden states from the last decoder layer
return hidden_states
class Qwen3ForCausalLM(nn.Module):
def __init__(self, prefix: str, config, weights):
super().__init__()
self.model = Qwen3Model(config=config, prefix="model", weights=weights)
self.vocab_size = config.vocab_size
if config.tie_word_embeddings:
suffix = "model.embed_tokens"
else:
suffix = "lm_head"
self.lm_head = SpeculativeHead.load(
config,
prefix=f"{prefix}.{suffix}" if prefix else suffix,
weights=weights,
)
self.embed_tokens = TensorParallelEmbedding(
prefix=f"{prefix}.embed_tokens" if prefix else "model.embed_tokens",
weights=weights,
)
def forward(
self,
input_ids: torch.Tensor,
position_ids: torch.Tensor,
cu_seqlen_prefill: Optional[torch.Tensor],
kv_cache: List[Tuple[torch.Tensor, torch.Tensor]],
slots: torch.Tensor,
seqlen: Seqlen,
hpu_attention_meta: Optional[HPUPagedAttentionMetadata],
lm_head_indices: Optional[torch.Tensor] = None,
adapter_data: Optional[torch.Tensor] = None,
) -> torch.Tensor:
inputs_embeds = self.embed_tokens(input_ids)
# decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
hidden_states = self.model(
inputs_embeds,
position_ids,
cu_seqlen_prefill,
kv_cache,
slots,
seqlen,
hpu_attention_meta,
)
# Only compute necessary logits, and do not upcast them to float if we are not computing the loss
if lm_head_indices is not None:
hidden_states = hidden_states[lm_head_indices]
logits = self.lm_head(hidden_states)
return logits
| text-generation-inference/backends/gaudi/server/text_generation_server/models/custom_modeling/flash_qwen3_modeling.py/0 | {
"file_path": "text-generation-inference/backends/gaudi/server/text_generation_server/models/custom_modeling/flash_qwen3_modeling.py",
"repo_id": "text-generation-inference",
"token_count": 6026
} | 266 |
import inspect
import torch
from abc import ABC, abstractmethod
from typing import List, Tuple, Optional, TypeVar, Type, Dict
from collections import defaultdict
from transformers import PreTrainedTokenizerBase
from text_generation_server.models.types import Batch, Generation
from text_generation_server.models.globals import BLOCK_SIZE
from text_generation_server.utils.speculate import get_speculate
from text_generation_server.pb.generate_pb2 import InfoResponse
from text_generation_server.adapters.weights import LayerAdapterWeights
from text_generation_server.pb import generate_pb2
BASE_MODEL_ADAPTER_ID = "__base_model__"
B = TypeVar("B", bound=Batch)
class Model(ABC):
def __init__(
self,
model_id: str,
model: torch.nn.Module,
tokenizer: PreTrainedTokenizerBase,
requires_padding: bool,
dtype: torch.dtype,
device: torch.device,
rank: int = 0,
world_size: int = 1,
sliding_window: Optional[int] = None,
speculate: Optional[int] = None,
adapter_id: str = BASE_MODEL_ADAPTER_ID,
support_chunking: bool = False,
):
self.model_id = model_id
self.model = model.eval()
self.tokenizer = tokenizer
# all_special_ids is not set correctly if the rust tokenizer is unpacked
# TODO report this to transformers.
other_special_ids = {
id for id, token in tokenizer.added_tokens_decoder.items() if token.special
}
self.all_special_ids = set(tokenizer.all_special_ids)
self.all_special_ids.update(other_special_ids)
self.requires_padding = requires_padding
self.dtype = dtype
self.device = device
self.rank = rank
self.world_size = world_size
self.sliding_window = sliding_window if sliding_window != -1 else None
self.layer_to_adapter_weights: Dict[str, LayerAdapterWeights] = defaultdict(
LayerAdapterWeights
)
self.loaded_adapters = set()
self.static_adapter_id = adapter_id
if speculate is None:
speculate = get_speculate()
self.speculate = speculate
self.has_position_ids = (
inspect.signature(model.forward).parameters.get("position_ids", None)
is not None
)
self.check_initialized()
@property
def info(self) -> InfoResponse:
if self.requires_padding and self.sliding_window is not None:
raise NotImplementedError("sliding_window is not implemented with padding")
return InfoResponse(
requires_padding=self.requires_padding,
dtype=str(self.dtype),
device_type=self.device.type,
window_size=None,
speculate=self.speculate,
block_size=BLOCK_SIZE,
)
@property
@abstractmethod
def batch_type(self) -> Type[B]:
raise NotImplementedError
@abstractmethod
def generate_token(
self, batch: B
) -> Tuple[List[Generation], Optional[B], Tuple[int, int]]:
raise NotImplementedError
def warmup(
self, batch: generate_pb2.WarmupRequest
) -> Tuple[Optional[int], Optional[int], Optional[int]]:
self.generate_token(batch)
return None, None, None
def decode_token(
self,
all_input_ids: List[int],
prefix_offset: int = 0,
read_offset: int = 0,
skip_special_tokens: bool = False,
) -> Tuple[str, int, int]:
"""Hack to hopefully support generate_stream for the maximum number of tokenizers"""
# The prefix text is necessary only to defeat cleanup algorithms in the decode
# which decide to add a space or not depending on the surrounding ids.
prefix_text = self.tokenizer.decode(
all_input_ids[prefix_offset:read_offset],
skip_special_tokens=skip_special_tokens,
)
new_text = self.tokenizer.decode(
all_input_ids[prefix_offset:], skip_special_tokens=skip_special_tokens
)
if len(new_text) > len(prefix_text) and not new_text.endswith("�"):
# utf-8 char at the end means it's a potential unfinished byte sequence
# from byte fallback tokenization.
# If it's in the middle, it's probably a real invalid id generated
# by the model
new_text = new_text[len(prefix_text) :]
return new_text, read_offset, len(all_input_ids)
else:
return "", prefix_offset, read_offset
def check_initialized(self):
uninitialized_parameters = []
for n, p in self.model.named_parameters():
if p.data.device == torch.device("meta"):
uninitialized_parameters.append(n)
if uninitialized_parameters:
raise RuntimeError(
f"found uninitialized parameters in model {self.__class__.__name__}: {uninitialized_parameters}"
)
| text-generation-inference/backends/gaudi/server/text_generation_server/models/model.py/0 | {
"file_path": "text-generation-inference/backends/gaudi/server/text_generation_server/models/model.py",
"repo_id": "text-generation-inference",
"token_count": 2095
} | 267 |
[package]
name = "text-generation-router-llamacpp"
version.workspace = true
edition.workspace = true
authors.workspace = true
homepage.workspace = true
[build-dependencies]
bindgen = "0.71.1"
pkg-config = "0.3.31"
[dependencies]
async-trait = "0.1.85"
clap = "4.5.27"
hf-hub.workspace = true
num_cpus = "1.16.0"
text-generation-router = { path = "../../router" }
thiserror = "2.0.11"
tokenizers.workspace = true
tokio = { version = "1.43.0", features = ["process"] }
tokio-stream = "0.1.17"
tracing = "0.1.41"
| text-generation-inference/backends/llamacpp/Cargo.toml/0 | {
"file_path": "text-generation-inference/backends/llamacpp/Cargo.toml",
"repo_id": "text-generation-inference",
"token_count": 216
} | 268 |
import copy
import logging
import time
from abc import ABC
from enum import Enum
from typing import List, Optional, Tuple
import torch
from loguru import logger
from transformers import AutoTokenizer, PreTrainedTokenizerBase
from optimum.neuron.configuration_utils import NeuronConfig
from transformers.generation import GenerationConfig
from optimum.neuron import NeuronModelForCausalLM
from optimum.neuron.generation import TokenSelector
from .model import get_export_kwargs_from_env
from .pb.generate_pb2 import (
Batch,
CachedBatch,
FinishReason,
GeneratedText,
Generation,
InfoResponse,
Request,
Tokens,
)
# Disable optimum-neuron warnings as it seems to block the server after a while
optimum_logger = logging.getLogger("optimum.neuron")
optimum_logger.setLevel("CRITICAL")
class Generator(ABC):
"""An abstract class to represent the workhorse behind TextGenerationService.
Ideally, it should not rely on protobuf constructs, but in a first step it does.
Implementations would typically need a model and a tokenizer to implement the Generator methods.
"""
@property
def info(self) -> InfoResponse:
"""This should simply return the expected InfoResponse"""
raise NotImplementedError
def warmup(self, batch: Batch) -> int:
"""Verify if the hardware can support the target load.
Args:
batch (`Batch`):
A batch corresponding to the maximum number of concurrent requests.
Return:
The maximum number of tokens the model supports.
"""
raise NotImplementedError
def prefill(self, batch: Batch) -> Tuple[List[Generation], CachedBatch]:
"""Prefill is called whenever new requests need to be added.
When this method returns successfully, a decode method will follow
with both the current and newly prefilled batch(es).
Args:
batch (`Batch`):
A batch containing the new requests.
Return:
A list of `Generation` for each request and a `CachedBatch` containing all pending requests.
"""
raise NotImplementedError
def decode(self, batches: List[Batch]) -> Tuple[List[Generation], CachedBatch]:
"""Decode after a prefill or another decode."""
raise NotImplementedError
def filter(self, batch_id: int, request_ids: List[int]) -> CachedBatch:
"""Remove requests that are not listed from the specified batch"""
raise NotImplementedError
def clear(self):
"""Remove all requests from the generator"""
raise NotImplementedError
@classmethod
def from_pretrained(cls, model_id: str, revision: Optional[str]):
"""Factory method "a la transformers" """
raise NotImplementedError
class Slot:
"""Represents a slot in a static batch"""
class State(Enum):
EMPTY = 0
PAUSE = 1
READY = 2
def __init__(self, id: int, tokenizer: PreTrainedTokenizerBase):
self._id = id
self._tokenizer = tokenizer
self.clear()
def clear(self):
"""Clear the slot and mark it as available."""
self._state = Slot.State.EMPTY
self._batch_id = None
self._request_id = None
self._inputs = ""
self._truncate = 0
self._generation_config = None
self._tokens = []
self._mask = torch.tensor([])
self._selector = None
self._generated_tokens = 0
self._next_text_token_start = 0
self._next_text_token_end = 0
self._generated_text = ""
self._next_text = ""
@property
def id(self) -> int:
return self._id
@property
def state(self) -> "Slot.State":
return self._state
@property
def batch_id(self) -> int:
return self._batch_id
@property
def request_id(self) -> int:
return self._request_id
@property
def cached_text(self) -> str:
return self._inputs + self._generated_text
@property
def generation_config(self) -> GenerationConfig:
return self._generation_config
@property
def generated_tokens(self) -> int:
return self._generated_tokens
def assign(
self, batch_id: int, request: Request, generation_config: GenerationConfig
):
"""Assign a request to a slot.
Args:
request (`Request`):
The request to be assigned. Contains the inputs and tokens selection parameters.
generation_config (`transformers.GenerationConfig`):
The base generation config (might be modified by the request generation parameters).
"""
self._state = Slot.State.READY
self._batch_id = batch_id
self._request_id = request.id
self._inputs = request.inputs
if request.truncate:
self._truncate = request.truncate
self._generation_config = copy.deepcopy(generation_config)
# Update generation config with request parameters
self._generation_config.do_sample = request.parameters.do_sample
if self._generation_config.do_sample:
if request.parameters.temperature != 0:
self._generation_config.temperature = request.parameters.temperature
if request.parameters.top_k != 0:
self._generation_config.top_k = request.parameters.top_k
if request.parameters.top_p != 0:
self._generation_config.top_p = request.parameters.top_p
if request.parameters.typical_p != 0:
self._generation_config.typical_p = request.parameters.typical_p
else:
# Set the sampling parameters to emulate greedy decoding when using on-device sampling
self._generation_config.temperature = 1.0
self._generation_config.top_k = 1
self._generation_config.top_p = 1.0
self._generation_config.typical_p = 1.0
if request.parameters.repetition_penalty != 0:
self._generation_config.repetition_penalty = (
request.parameters.repetition_penalty
)
self.seed = request.parameters.seed
self._generation_config.max_new_tokens = (
request.stopping_parameters.max_new_tokens
)
self._max_new_tokens = self._generation_config.max_new_tokens
stop_strings = request.stopping_parameters.stop_sequences
if stop_strings:
self._generation_config.stop_strings = stop_strings
def reset(
self,
input_ids: torch.LongTensor,
attention_mask: torch.LongTensor,
selector: TokenSelector,
):
"""Reset the slot for the next generation.
Args:
input_ids: (`torch.LongTensor`):
The new input_ids to use to generate the next token.
attention_mask: (`torch.LongTensor`):
The new attention_mask to use to generate the next token.
selector: (`optimum.neuron.generation.TokenSelector`):
An object implementing the updated token selection logic.
"""
self._tokens = input_ids.clone()
self._next_text_token_start = 0
self._next_text_token_end = torch.numel(self._tokens)
self._next_text = ""
self._mask = attention_mask.clone()
self._selector = selector
def pause(self):
"""Mark the current slot as paused for generation.
Note that the KV cache for this slot will still be filled.
"""
self._state = Slot.State.PAUSE
def resume(self):
"""Mark the slot as ready for generation."""
self._state = Slot.State.READY
def _decode_next_tokens(
self,
) -> str:
"""Hack to hopefully support generate_stream for the maximum number of tokenizers"""
# We need to include the tokens that produced the last text to defeat cleanup algorithms in the decode
# which decide to add a space or not depending on the surrounding ids.
new_text = self._tokenizer.decode(
self._tokens[self._next_text_token_start :], skip_special_tokens=False
)
if new_text.endswith("�"):
# utf-8 char at the end means it's a potential unfinished byte sequence
# from byte fallback tokenization.
return ""
# Compare the generated text with the one using only the tokens producing the last one
last_text = self._tokenizer.decode(
self._tokens[self._next_text_token_start : self._next_text_token_end],
skip_special_tokens=False,
)
if len(new_text) == len(last_text):
# Nothing new was actually generated
return ""
# Return the decoded text and store its token offsets
self._next_text_token_start = self._next_text_token_end
self._next_text_token_end = torch.numel(self._tokens)
return new_text[len(last_text) :]
def append(self, next_token: int) -> str:
"""Append a new generated token to this slot
The new token is added to the list of generated tokens, which impacts
directly the generated_text and stopped property.
The new token is however not added immediately to the slot inputs: it will
be added later on when it has effectively been used to produce the next token.
Args:
next_token (`int`):
The newly generated token.
Return:
The corresponding decoded text (if any).
"""
self._tokens = torch.cat([self._tokens, torch.LongTensor([next_token])])
self._mask = torch.cat([self._mask, torch.LongTensor([1])])
self._generated_tokens += 1
next_text = self._decode_next_tokens()
# Now that a new token has been generated, we can append the previous one to the generated text
self._generated_text += self._next_text
self._next_text = next_text
return next_text
def select(
self, input_ids: torch.LongTensor, logits: torch.Tensor
) -> torch.LongTensor:
"""Select the next token from the candidate logits.
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
The sequence used as a prompt for the generation (not used in all generation modes).
logits (`torch.Tensor` of shape `(batch_size, sequence_length)`):
The logits corresponding to the generated tokens.
Return:
`torch.LongTensor`: A scalar torch.LongTensor` containing the selected token.
"""
return self._selector.select(input_ids, logits)[0]
@property
def stopped(self) -> bool:
# Transformers stopping criteria expects a batch of input ids
input_ids = torch.unsqueeze(self._tokens, dim=0)
return self._selector.stopping_criteria(input_ids, None)
@property
def generated_text(self) -> str:
return self._generated_text + self._next_text
@property
def next_token(self) -> int:
return None if len(self._tokens) == 0 else self._tokens[-1]
@property
def attention_mask(self) -> torch.LongTensor:
return self._mask
@property
def max_token(self) -> int:
return self._generation_config.max_length
@property
def max_new_tokens(self) -> int:
# The current value of max_new_tokens: might be different of the target max_new_tokens
# if the slot has been paused and resumed.
return self._generation_config.max_new_tokens
@property
def truncate(self) -> int:
return self._truncate
class NeuronGenerator(Generator):
"""A Generator for Neuron models."""
def __init__(
self,
model: NeuronModelForCausalLM,
tokenizer: PreTrainedTokenizerBase,
):
self.model = model
if not isinstance(self.model, NeuronModelForCausalLM):
raise ValueError("The model must be a NeuronModelForCausalLM.")
if (
model.neuron_config.batch_size > 1
and not model.neuron_config.continuous_batching
):
raise ValueError(
"The neuron model must be compiled with continuous_batching=True."
)
# Specify padding and truncation options for decoder-only architecture
tokenizer.pad_token_id = tokenizer.eos_token_id
tokenizer.padding_side = "left"
tokenizer.truncation_side = "left"
self.tokenizer = tokenizer
self.special_tokens = self.tokenizer.all_special_ids
self.slots = [
Slot(i, tokenizer) for i in range(self.model.neuron_config.batch_size)
]
self.batch_id = 0
@property
def on_device_sampling(self) -> bool:
return getattr(self.model.neuron_config, "on_device_sampling", False)
@property
def info(self) -> InfoResponse:
"""Returns the expected InfoResponse."""
dtype = getattr(self.model.config, "torch_dtype", "float32")
return InfoResponse(
requires_padding=True,
dtype=str(dtype),
device_type="xla",
)
def warmup(self, batch: Batch) -> int:
"""Verify if the hardware can support the target load.
Args:
batch (`Batch`):
A batch corresponding to the maximum number of concurrent requests.
Return:
The maximum number of tokens the model supports.
"""
# Just check that the warmup request parameters match the model capacity
batch_size = self.model.neuron_config.batch_size
if len(batch.requests) > batch_size:
raise ValueError(
f"Inconsistent batch_size configuration: Please make sure the batch_size in the compiled model (currently {batch_size}) matches the batch_size passed to TGI. The compiled model.neuron_config.batch_size is usually in the neuron section of the model config.json file. You may also have passed it into optimum-cli during the compilation process. The batch size for TGI is usually set in the environment as MAX_BATCH_SIZE."
)
self.prefill(batch)
self.clear()
return (
self.model.neuron_config.batch_size
* self.model.neuron_config.sequence_length
)
def max_prefill_length(self) -> int:
if hasattr(self.model.neuron_config, "max_context_length"):
return self.model.neuron_config.max_context_length
return self.model.neuron_config.sequence_length
def prefill(self, batch: Batch) -> Tuple[List[Generation], CachedBatch]:
"""Prefill new requests.
Args:
batch (`Batch`):
A batch containing the new requests.
Return:
A list of `Generation` for each request and a `CachedBatch` containing all pending requests.
"""
slots = {state: [] for state in Slot.State}
for slot in self.slots:
slots[slot.state].append(slot)
active_slots = slots[Slot.State.READY]
empty_slots = slots[Slot.State.EMPTY]
if len(empty_slots) < len(batch.requests):
raise ValueError(
f"Cannot prefill {len(batch.requests)} new request(s) with only {len(empty_slots)} empty slots."
f" Please align max_batch_size with the static batch size: {self.model.neuron_config.batch_size}."
)
# Assign each request to an empty slot
logger.debug(
f"Prefilling {len(batch.requests)} new request(s) with {len(empty_slots)} empty slot(s)"
)
new_slots = []
for request in batch.requests:
slot = empty_slots.pop()
slot.assign(self.batch_id, request, self.model.generation_config)
new_slots.append(slot)
logger.debug(
f"Request {slot.request_id} assigned to slot {slot.id} with and max_new_tokens {slot.max_new_tokens}"
)
prefill_slots = new_slots
seq_ids = torch.tensor([slot.id for slot in prefill_slots])
# Reconstruct the full inputs (without padding) as seen by the model.
# This comprises:
# - the inputs for new requests,
# - only when rebuilding the cache, the inputs and the generated text that has already
# been cached (i.e. excluding the last generated token) for unfinished requests.
inputs = []
max_length = 0
for slot in prefill_slots:
inputs.append(slot.cached_text)
# Apply truncation, making sure we fit into static dimensions
if slot.truncate == 0:
max_length = self.max_prefill_length()
elif (
slot.truncate > max_length and slot.truncate < self.max_prefill_length()
):
max_length = slot.truncate
# Tokenize with padding and truncation
padded_inputs = self.tokenizer(
inputs,
return_tensors="pt",
padding=True,
truncation=True,
max_length=max_length,
)
input_ids = padded_inputs.input_ids
attention_mask = padded_inputs.attention_mask
sampling_params = (
torch.zeros(input_ids.shape[0], 3) if self.on_device_sampling else None
)
# Pause previously active slots during generation
for slot in active_slots:
slot.pause()
# Each slot must be reset with the padded inputs and masks
for i, slot in enumerate(prefill_slots):
if slot.state != slot.state.EMPTY:
if slot.truncate > 0 and slot.truncate < input_ids.shape[-1]:
# Apply per-request truncation
input_ids[i, : -slot.truncate] = self.tokenizer.pad_token_id
attention_mask[i, : -slot.truncate] = 0
slot_input_ids = input_ids[i : i + 1, :]
# Padded input ids are also required to set logits processors and stopping criterias
selector = TokenSelector.create(
slot_input_ids,
slot.generation_config,
self.model,
self.model.neuron_config.sequence_length,
tokenizer=self.tokenizer,
seed=slot.seed,
)
slot_input_ids = slot_input_ids.squeeze(dim=0).type(torch.int64)
slot_attention_mask = attention_mask[i]
slot.reset(slot_input_ids, slot_attention_mask, selector)
if sampling_params is not None:
sampling_params[i, 0] = slot.generation_config.top_k
sampling_params[i, 1] = slot.generation_config.top_p
sampling_params[i, 2] = slot.generation_config.temperature
# Note: when rebuilding cache on prefill, the new tokens on paused slots will be ignored,
# as they have already been generated and sent back in the last decode.
model_inputs = self.model.prepare_inputs_for_prefill(
input_ids,
attention_mask=attention_mask,
seq_ids=seq_ids,
sampling_params=sampling_params,
)
tokens_or_logits = self.model(**model_inputs)[0]
generation, next_batch = self._generate_token(
prefill_slots, self.batch_id, tokens_or_logits, input_ids
)
self.batch_id += 1
# Reactivate previously active slots for the next decode
for i, slot in enumerate(active_slots):
slot.resume()
logger.debug("Model ready for decoding")
if next_batch is not None:
logger.debug(
f"Next batch is {next_batch.id} with requests: {next_batch.request_ids}"
)
return generation, next_batch
def decode(
self, batches: List[CachedBatch]
) -> Tuple[List[Generation], CachedBatch]:
"""Decode the specified prefilled requests.
Args:
batches (`List[CachedBatch]`):
A list of previous batches containing the prefilled requests.
Return:
A list of `Generation` for each request and a `CachedBatch` containing all pending requests.
"""
# batches contains a list composed of:
# - the batch id returned by the last decode,
# - the batch id(s) returned by the last prefill(s)
# Batches are always concatenated during prefill, so we can
# just carry on with decoding. We adopt the id of the first
# batch in the list as our next batch id.
next_batch_id = batches[0].id
request_ids = []
for batch in batches:
request_ids += batch.request_ids
cleared_request_ids = []
for slot in self.slots:
if slot.state == slot.State.READY and slot.request_id not in request_ids:
cleared_request_ids.append(slot.request_id)
slot.clear()
if len(cleared_request_ids) > 0:
logger.info(
f"Clearing slot for requests {cleared_request_ids} as they are not requested."
)
active_slots = [slot for slot in self.slots if slot.state == slot.State.READY]
if len(active_slots) < len(request_ids):
raise ValueError(
"Unable to decode tokens for non-prefilled batches (probably due to a previous failure)"
)
decode_slots = active_slots
seq_ids = torch.tensor([slot.id for slot in decode_slots])
# Reconstruct input_ids and attention_mask from decode slots
n_slots = len(decode_slots)
input_ids = torch.full(
[n_slots, 1], fill_value=self.tokenizer.eos_token_id, dtype=torch.int64
)
max_length = 0
for slot in decode_slots:
max_length = max(max_length, slot.attention_mask.size(-1))
attention_mask = torch.zeros([n_slots, max_length], dtype=torch.int64)
sampling_params = torch.zeros(n_slots, 3) if self.on_device_sampling else None
for i, slot in enumerate(decode_slots):
if slot.state != Slot.State.EMPTY:
# input_ids are simply the tokens generated by the last decode or prefill requests (other tokens are cached)
input_ids[i, 0] = slot.next_token
attention_mask[i, : slot.attention_mask.size(-1)] = slot.attention_mask
if sampling_params is not None:
sampling_params[i, 0] = slot.generation_config.top_k
sampling_params[i, 1] = slot.generation_config.top_p
sampling_params[i, 2] = slot.generation_config.temperature
model_inputs = self.model.prepare_inputs_for_decode(
input_ids,
attention_mask=attention_mask,
seq_ids=seq_ids,
sampling_params=sampling_params,
)
tokens_or_logits = self.model(**model_inputs)[0]
return self._generate_token(
decode_slots, next_batch_id, tokens_or_logits, input_ids
)
def _generate_token(
self,
slots: List[Slot],
next_batch_id: int,
tokens_or_logits: torch.Tensor,
input_ids: torch.LongTensor,
) -> Tuple[List[Generation], CachedBatch]:
generations = []
active_slots = False
for i, slot in enumerate(slots):
if slot.state != Slot.State.READY:
continue
request_id = slot.request_id
slot_input_ids = input_ids[i : i + 1, :]
if self.on_device_sampling:
next_token = tokens_or_logits[i]
else:
next_token_logits = tokens_or_logits[i : i + 1, -1, :]
next_token = slot.select(slot_input_ids, next_token_logits)
next_token_text = slot.append(next_token)
generated_text = None
finish_reason = None
if next_token == self.tokenizer.eos_token_id:
finish_reason = FinishReason.FINISH_REASON_EOS_TOKEN
elif slot.stopped:
if slot.generated_tokens == slot.max_new_tokens:
finish_reason = FinishReason.FINISH_REASON_LENGTH
else:
finish_reason = FinishReason.FINISH_REASON_STOP_SEQUENCE
if finish_reason is not None:
# We must include the generated text for each finished sequence in the response
generated_text = GeneratedText(
text=slot.generated_text,
generated_tokens=slot.generated_tokens,
finish_reason=finish_reason,
)
logger.debug(
f"Decode complete for request {request_id} with {slot.generated_tokens} tokens"
)
# mark the slot as available
slot.clear()
else:
active_slots = True
generations.append(
Generation(
request_id=request_id,
prefill_tokens=None,
tokens=Tokens(
ids=[next_token],
logprobs=[0],
texts=[next_token_text],
is_special=[next_token in self.special_tokens],
),
generated_text=generated_text,
)
)
batch = None
if active_slots:
# Whatever initial batch these requests came from, we always return all pending requests in a single batch
request_ids = [
slot.request_id for slot in self.slots if slot.state == Slot.State.READY
]
batch = self._cached_batch(next_batch_id, request_ids)
else:
logger.debug("No more pending requests")
return generations, batch
def _cached_batch(self, batch_id: int, request_ids: List):
size = len(request_ids)
max_tokens = size * self.model.neuron_config.sequence_length
return CachedBatch(
id=batch_id, request_ids=request_ids, size=size, max_tokens=max_tokens
)
def filter(self, batch_id: int, keep_request_ids: List[int]) -> CachedBatch:
"""Remove requests that are not listed from the specified batch
Args:
batch_id (`int`):
The id of a cached batch.
keep_ids(`List[int]`):
The list of requests that must be kept.
Return:
A `CachedBatch` containing the pending requests.
"""
keep_slot_ids = [
slot.id for slot in self.slots if slot.request_id in keep_request_ids
]
self._clear(keep_slot_ids)
return self._cached_batch(batch_id, keep_request_ids)
def clear(self, batch_id: Optional[int] = None):
"""Remove a subset or all requests from the generator"""
keep_ids = []
if batch_id is not None:
keep_ids = [slot.id for slot in self.slots if slot.batch_id != batch_id]
return self._clear(keep_ids)
def _clear(self, keep_slot_ids: List):
for slot in self.slots:
if slot.state != Slot.State.EMPTY and slot.id not in keep_slot_ids:
logger.debug(f"Removing slot {slot.id} with request {slot.request_id}")
slot.clear()
@classmethod
def from_pretrained(cls, model_id: str, revision: str = None):
"""Instantiate a NeuronGenerator.
Args:
model_id (`str`):
A hub model id or the path to a local model. This path must also contain a Tokenizer.
revision (`Optional[str]`, defaults to `None`):
The revision of the model on the HuggingFace hub.
Returns:
A NeuronGenerator.
"""
try:
neuron_config = NeuronConfig.from_pretrained(model_id, revision=revision)
except Exception as e:
logger.debug(
"NeuronConfig.from_pretrained failed for model %s, revision %s: %s",
model_id,
revision,
e,
)
neuron_config = None
start = time.time()
if neuron_config is None:
export_kwargs = get_export_kwargs_from_env()
logger.info(f"Exporting model to neuron with config: {export_kwargs}.")
model = NeuronModelForCausalLM.from_pretrained(
model_id,
revision=revision,
low_cpu_mem_usage=True,
export=True,
**export_kwargs,
)
else:
logger.info(
"Loading model on neuron devices (this can take a few minutes)."
)
model = NeuronModelForCausalLM.from_pretrained(
model_id, low_cpu_mem_usage=True, revision=revision
)
end = time.time()
logger.info(f"Model successfully loaded in {end - start:.2f} s.")
tokenizer = AutoTokenizer.from_pretrained(model_id, revision=revision)
return cls(model, tokenizer)
| text-generation-inference/backends/neuron/server/text_generation_server/generator.py/0 | {
"file_path": "text-generation-inference/backends/neuron/server/text_generation_server/generator.py",
"repo_id": "text-generation-inference",
"token_count": 12958
} | 269 |
from helpers import create_request
from text_generation_server.generator import NeuronGenerator
from text_generation_server.pb.generate_pb2 import Batch
def test_prefill(neuron_model_config):
"""Verify that a prefill for a single request generates the expected output."""
config_name = neuron_model_config["name"]
neuron_model_path = neuron_model_config["neuron_model_path"]
generator = NeuronGenerator.from_pretrained(neuron_model_path)
max_batch_size = 4
assert generator.model.neuron_config.batch_size >= max_batch_size
for num_requests in [1, max_batch_size]:
for do_sample in [True, False]:
mode = "sample" if do_sample else "greedy"
print(f"[{mode}]: {num_requests} requests")
_test_prefill(config_name, generator, num_requests, do_sample)
generator.clear()
def _test_prefill(config_name, generator, batch_size, do_sample):
requests = []
max_new_tokens = 20
input_text = (
"It was a bright cold day in April, and the clocks were striking thirteen."
)
for i in range(batch_size):
requests.append(
create_request(
id=i,
inputs=input_text,
do_sample=do_sample,
max_new_tokens=max_new_tokens,
)
)
# Let's be pessimistic when estimating max_tokens
max_length = generator.max_prefill_length()
batch = Batch(
id=0, requests=requests, size=batch_size, max_tokens=batch_size * max_length
)
generations, next_batch = generator.prefill(batch)
assert next_batch.size == batch_size
# Whatever was passed as max_tokens, the server will correct it
# because of static batching
assert next_batch.max_tokens == batch_size * max_length
assert len(generations) == batch_size
if do_sample:
expectations = {
"llama": [358, " I"],
"qwen2": [576, " The"],
"granite": [308, " ("],
}[config_name]
else:
expectations = {
"llama": [578, " The"],
"qwen2": [358, " I"],
"granite": [203, "\n"],
}[config_name]
for g in generations:
tokens = g.tokens
assert tokens.ids[0] == expectations[0]
assert tokens.texts[0] == expectations[1]
def test_prefill_truncate(neuron_model_config):
config_name = neuron_model_config["name"]
neuron_model_path = neuron_model_config["neuron_model_path"]
generator = NeuronGenerator.from_pretrained(neuron_model_path)
batch_size = generator.model.neuron_config.batch_size
# We apply truncation to all requests but the first one
truncate = [
None,
] + [i * 3 for i in range(1, batch_size)]
input_text = (
"Two gin-scented tears trickled down the sides of his nose."
" But it was all right, everything was all right, the struggle was finished."
" He had won the victory over himself. He loved Big Brother."
)
requests = []
for i in range(batch_size):
requests.append(create_request(id=i, inputs=input_text, truncate=truncate[i]))
max_length = generator.max_prefill_length()
batch = Batch(
id=0, requests=requests, size=batch_size, max_tokens=batch_size * max_length
)
generations, _ = generator.prefill(batch)
# Even if the input text is identical for all requests, the first generated token might
# be different because of the truncation
expectations = {
"llama": [" He", "iens", "\x08", " He"],
"qwen2": [" He", " The", " He", " He"],
"granite": ["\n", "\n", " I", " He"],
}[config_name]
for i, g in enumerate(generations):
tokens = g.tokens
assert (
tokens.texts[0] == expectations[i]
), f"Request {i} expected [{expectations[i]}], got [{tokens.texts[0]}]"
| text-generation-inference/backends/neuron/tests/server/test_prefill.py/0 | {
"file_path": "text-generation-inference/backends/neuron/tests/server/test_prefill.py",
"repo_id": "text-generation-inference",
"token_count": 1606
} | 270 |
#!/bin/bash
set -ex
TRT_VER_BASE="10.8.0"
TRT_VER_FULL="${TRT_VER_BASE}.43"
CUDA_VER="12.8"
CUDNN_VER="9.7.0.66-1"
NCCL_VER="2.25.1-1+cuda${CUDA_VER}"
CUBLAS_VER="${CUDA_VER}.3.14-1"
NVRTC_VER="${CUDA_VER}.61-1"
for i in "$@"; do
case $i in
--TRT_VER=?*) TRT_VER="${i#*=}";;
--CUDA_VER=?*) CUDA_VER="${i#*=}";;
--CUDNN_VER=?*) CUDNN_VER="${i#*=}";;
--NCCL_VER=?*) NCCL_VER="${i#*=}";;
--CUBLAS_VER=?*) CUBLAS_VER="${i#*=}";;
*) ;;
esac
shift
done
NVCC_VERSION_OUTPUT=$(nvcc --version)
if [[ $(echo $NVCC_VERSION_OUTPUT | grep -oP "\d+\.\d+" | head -n 1) != ${CUDA_VER} ]]; then
echo "The version of pre-installed CUDA is not equal to ${CUDA_VER}."
exit 1
fi
install_ubuntu_requirements() {
apt-get update && apt-get install -y --no-install-recommends gnupg2 curl ca-certificates
ARCH=$(uname -m)
if [ "$ARCH" = "amd64" ];then ARCH="x86_64";fi
if [ "$ARCH" = "aarch64" ];then ARCH="sbsa";fi
curl -fsSLO https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2404/${ARCH}/cuda-keyring_1.1-1_all.deb
dpkg -i cuda-keyring_1.1-1_all.deb
rm /etc/apt/sources.list.d/cuda-ubuntu2404-x86_64.list
apt-get update
if [[ $(apt list --installed | grep libcudnn9) ]]; then
apt-get remove --purge -y --allow-change-held-packages libcudnn9*
fi
if [[ $(apt list --installed | grep libnccl) ]]; then
apt-get remove --purge -y --allow-change-held-packages libnccl*
fi
if [[ $(apt list --installed | grep libcublas) ]]; then
apt-get remove --purge -y --allow-change-held-packages libcublas*
fi
if [[ $(apt list --installed | grep cuda-nvrtc-dev) ]]; then
apt-get remove --purge -y --allow-change-held-packages cuda-nvrtc-dev*
fi
CUBLAS_CUDA_VERSION=$(echo $CUDA_VER | sed 's/\./-/g')
apt-get install -y --no-install-recommends libcudnn9-cuda-12=${CUDNN_VER} libcudnn9-dev-cuda-12=${CUDNN_VER}
apt-get install -y --no-install-recommends libnccl2=${NCCL_VER} libnccl-dev=${NCCL_VER}
apt-get install -y --no-install-recommends libcublas-${CUBLAS_CUDA_VERSION}=${CUBLAS_VER} libcublas-dev-${CUBLAS_CUDA_VERSION}=${CUBLAS_VER}
# NVRTC static library doesn't exist in NGC PyTorch container.
NVRTC_CUDA_VERSION=$(echo $CUDA_VER | sed 's/\./-/g')
apt-get install -y --no-install-recommends cuda-nvrtc-dev-${NVRTC_CUDA_VERSION}=${NVRTC_VER}
apt-get clean
rm -rf /var/lib/apt/lists/*
}
install_centos_requirements() {
CUBLAS_CUDA_VERSION=$(echo $CUDA_VER | sed 's/\./-/g')
yum -y update
yum -y install epel-release
yum remove -y libnccl* && yum -y install libnccl-${NCCL_VER} libnccl-devel-${NCCL_VER}
yum remove -y libcublas* && yum -y install libcublas-${CUBLAS_CUDA_VERSION}-${CUBLAS_VER} libcublas-devel-${CUBLAS_CUDA_VERSION}-${CUBLAS_VER}
yum clean all
}
install_tensorrt() {
#PY_VERSION=$(python3 -c 'import sys; print(".".join(map(str, sys.version_info[0:2])))')
#PARSED_PY_VERSION=$(echo "${PY_VERSION//./}")
TRT_CUDA_VERSION="12.8"
if [ -z "$RELEASE_URL_TRT" ];then
ARCH=${TRT_TARGETARCH}
if [ -z "$ARCH" ];then ARCH=$(uname -m);fi
if [ "$ARCH" = "arm64" ];then ARCH="aarch64";fi
if [ "$ARCH" = "amd64" ];then ARCH="x86_64";fi
if [ "$ARCH" = "x86_64" ];then DIR_NAME="x64-agnostic"; else DIR_NAME=${ARCH};fi
if [ "$ARCH" = "aarch64" ];then OS1="Ubuntu22_04" && OS2="Ubuntu-24.04" && OS="ubuntu-24.04"; else OS1="Linux" && OS2="Linux" && OS="linux";fi
RELEASE_URL_TRT=https://developer.nvidia.com/downloads/compute/machine-learning/tensorrt/${TRT_VER_BASE}/tars/TensorRT-${TRT_VER_FULL}.${OS2}.${ARCH}-gnu.cuda-${TRT_CUDA_VERSION}.tar.gz
fi
wget --no-verbose ${RELEASE_URL_TRT} -O /tmp/TensorRT.tar
tar -xf /tmp/TensorRT.tar -C /usr/local/
mv /usr/local/TensorRT-${TRT_VER_FULL} /usr/local/tensorrt
# pip3 install /usr/local/tensorrt/python/tensorrt-*-cp${PARSED_PY_VERSION}-*.whl
rm -rf /tmp/TensorRT.tar
}
# Install base packages depending on the base OS
ID=$(grep -oP '(?<=^ID=).+' /etc/os-release | tr -d '"')
case "$ID" in
debian)
install_ubuntu_requirements
install_tensorrt
;;
ubuntu)
install_ubuntu_requirements
install_tensorrt
;;
centos)
install_centos_requirements
install_tensorrt
;;
*)
echo "Unable to determine OS..."
exit 1
;;
esac
| text-generation-inference/backends/trtllm/scripts/install_tensorrt.sh/0 | {
"file_path": "text-generation-inference/backends/trtllm/scripts/install_tensorrt.sh",
"repo_id": "text-generation-inference",
"token_count": 2083
} | 271 |
/// Inspired by https://github.com/hatoo/oha/blob/bb989ea3cd77727e7743e7daa60a19894bb5e901/src/monitor.rs
use crate::generation::{Decode, Message, Prefill};
use ratatui::crossterm::event::{KeyCode, KeyEvent, KeyModifiers};
use ratatui::layout::{Alignment, Constraint, Direction, Layout};
use ratatui::style::{Color, Modifier, Style};
use ratatui::text::{Line, Span};
use ratatui::widgets::{
Axis, BarChart, Block, Borders, Chart, Dataset, Gauge, GraphType, Paragraph, Tabs,
};
use ratatui::{symbols, Frame};
use text_generation_client::ClientError;
use tokio::sync::mpsc;
/// TUI powered App
pub(crate) struct App {
pub(crate) running: bool,
pub(crate) data: Data,
completed_runs: Vec<usize>,
completed_batch: usize,
current_batch: usize,
current_tab: usize,
touched_tab: bool,
zoom: bool,
is_error: bool,
tokenizer_name: String,
sequence_length: u32,
decode_length: u32,
n_run: usize,
receiver: mpsc::Receiver<Result<Message, ClientError>>,
}
impl App {
pub(crate) fn new(
receiver: mpsc::Receiver<Result<Message, ClientError>>,
tokenizer_name: String,
sequence_length: u32,
decode_length: u32,
n_run: usize,
batch_size: Vec<u32>,
) -> Self {
let current_tab = 0;
let completed_runs: Vec<usize> = (0..batch_size.len()).map(|_| 0).collect();
let completed_batch = 0;
let current_batch = 0;
let is_error = false;
let data = Data::new(n_run, batch_size);
Self {
running: true,
data,
completed_runs,
completed_batch,
current_batch,
current_tab,
touched_tab: false,
zoom: false,
is_error,
tokenizer_name,
sequence_length,
decode_length,
n_run,
receiver,
}
}
/// Handle crossterm key events
pub(crate) fn handle_key_event(&mut self, key_event: KeyEvent) {
match key_event {
// Increase and wrap tab
KeyEvent {
code: KeyCode::Right,
..
}
| KeyEvent {
code: KeyCode::Tab, ..
} => {
self.touched_tab = true;
self.current_tab = (self.current_tab + 1) % self.data.batch_size.len();
}
// Decrease and wrap tab
KeyEvent {
code: KeyCode::Left,
..
} => {
self.touched_tab = true;
if self.current_tab > 0 {
self.current_tab -= 1;
} else {
self.current_tab = self.data.batch_size.len() - 1;
}
}
// Zoom on throughput/latency fig
KeyEvent {
code: KeyCode::Char('+'),
..
} => {
self.zoom = true;
}
// Unzoom on throughput/latency fig
KeyEvent {
code: KeyCode::Char('-'),
..
} => {
self.zoom = false;
}
// Quit
KeyEvent {
code: KeyCode::Char('q'),
..
}
| KeyEvent {
code: KeyCode::Char('c'),
modifiers: KeyModifiers::CONTROL,
..
} => {
self.running = false;
}
_ => (),
}
}
/// Get all pending messages from generation task
pub(crate) fn tick(&mut self) {
while let Ok(message) = self.receiver.try_recv() {
match message {
Ok(message) => match message {
Message::Prefill(step) => self.data.push_prefill(step, self.current_batch),
Message::Decode(step) => self.data.push_decode(step, self.current_batch),
Message::EndRun => {
self.completed_runs[self.current_batch] += 1;
}
Message::EndBatch => {
self.data.end_batch(self.current_batch);
self.completed_batch += 1;
if self.current_batch < self.data.batch_size.len() - 1 {
// Only go to next tab if the user never touched the tab keys
if !self.touched_tab {
self.current_tab += 1;
}
self.current_batch += 1;
}
}
Message::Warmup => {}
},
Err(_) => self.is_error = true,
}
}
}
/// Render frame
pub fn render(&mut self, f: &mut Frame) {
let batch_progress =
(self.completed_batch as f64 / self.data.batch_size.len() as f64).clamp(0.0, 1.0);
let run_progress =
(self.completed_runs[self.current_batch] as f64 / self.n_run as f64).clamp(0.0, 1.0);
// Vertical layout
let row5 = Layout::default()
.direction(Direction::Vertical)
.constraints(
[
Constraint::Length(1),
Constraint::Length(3),
Constraint::Length(3),
Constraint::Length(13),
Constraint::Min(10),
]
.as_ref(),
)
.split(f.area());
// Top row horizontal layout
let top = Layout::default()
.direction(Direction::Horizontal)
.constraints([Constraint::Percentage(50), Constraint::Percentage(50)].as_ref())
.split(row5[2]);
// Mid row horizontal layout
let mid = Layout::default()
.direction(Direction::Horizontal)
.constraints(
[
Constraint::Percentage(25),
Constraint::Percentage(25),
Constraint::Percentage(25),
Constraint::Percentage(25),
]
.as_ref(),
)
.split(row5[3]);
// Left mid row vertical layout
let prefill_text = Layout::default()
.direction(Direction::Vertical)
.constraints([Constraint::Length(8), Constraint::Length(5)].as_ref())
.split(mid[0]);
// Right mid row vertical layout
let decode_text = Layout::default()
.direction(Direction::Vertical)
.constraints([Constraint::Length(8), Constraint::Length(5)].as_ref())
.split(mid[2]);
let decode_text_latency = Layout::default()
.direction(Direction::Horizontal)
.constraints([Constraint::Percentage(50), Constraint::Percentage(50)].as_ref())
.split(decode_text[0]);
// Bottom row horizontal layout
let bottom = Layout::default()
.direction(Direction::Horizontal)
.constraints([Constraint::Percentage(50), Constraint::Percentage(50)].as_ref())
.split(row5[4]);
// Title
let title = Block::default()
.borders(Borders::NONE)
.title(format!(
"Model: {} | Sequence Length: {} | Decode Length: {}",
self.tokenizer_name, self.sequence_length, self.decode_length
))
.style(
Style::default()
.add_modifier(Modifier::BOLD)
.fg(Color::White),
);
f.render_widget(title, row5[0]);
// Helper
let helper = Block::default()
.borders(Borders::NONE)
.title("<- | tab | ->: change batch tab | q / CTRL + c: quit | +/-: zoom")
.title_alignment(Alignment::Right)
.style(Style::default().fg(Color::White));
f.render_widget(helper, row5[0]);
// Batch tabs
let titles: Vec<Line> = self
.data
.batch_size
.iter()
.map(|b| {
Line::from(vec![Span::styled(
format!("Batch: {b}"),
Style::default().fg(Color::White),
)])
})
.collect();
let tabs = Tabs::new(titles)
.block(Block::default().borders(Borders::ALL).title("Tabs"))
.select(self.current_tab)
.style(Style::default().fg(Color::LightCyan))
.highlight_style(
Style::default()
.add_modifier(Modifier::BOLD)
.bg(Color::Black),
);
f.render_widget(tabs, row5[1]);
// Total progress bar
let color = if self.is_error {
Color::Red
} else {
Color::LightGreen
};
let batch_gauge = progress_gauge(
"Total Progress",
format!("{} / {}", self.completed_batch, self.data.batch_size.len()),
batch_progress,
color,
);
f.render_widget(batch_gauge, top[0]);
// Batch progress Bar
let color = if self.is_error {
Color::Red
} else {
Color::LightBlue
};
let run_gauge = progress_gauge(
"Batch Progress",
format!(
"{} / {}",
self.completed_runs[self.current_batch], self.n_run
),
run_progress,
color,
);
f.render_widget(run_gauge, top[1]);
// Prefill text infos
let prefill_latency_block = latency_paragraph(
&mut self.data.prefill_latencies[self.current_tab],
"Prefill",
);
let prefill_throughput_block =
throughput_paragraph(&self.data.prefill_throughputs[self.current_tab], "Prefill");
f.render_widget(prefill_latency_block, prefill_text[0]);
f.render_widget(prefill_throughput_block, prefill_text[1]);
// Prefill latency histogram
let histo_width = 7;
let bins = if mid[1].width < 2 {
0
} else {
(mid[1].width as usize - 2) / (histo_width + 1)
}
.max(2);
let histo_data =
latency_histogram_data(&self.data.prefill_latencies[self.current_tab], bins);
let histo_data_str: Vec<(&str, u64)> =
histo_data.iter().map(|(l, v)| (l.as_str(), *v)).collect();
let prefill_histogram =
latency_histogram(&histo_data_str, "Prefill").bar_width(histo_width as u16);
f.render_widget(prefill_histogram, mid[1]);
// Decode text info
let decode_latency_block = latency_paragraph(
&mut self.data.decode_latencies[self.current_tab],
"Decode Total",
);
let decode_token_latency_block = latency_paragraph(
&mut self.data.decode_token_latencies[self.current_tab],
"Decode Token",
);
let decode_throughput_block =
throughput_paragraph(&self.data.decode_throughputs[self.current_tab], "Decode");
f.render_widget(decode_latency_block, decode_text_latency[0]);
f.render_widget(decode_token_latency_block, decode_text_latency[1]);
f.render_widget(decode_throughput_block, decode_text[1]);
// Decode latency histogram
let histo_data =
latency_histogram_data(&self.data.decode_latencies[self.current_tab], bins);
let histo_data_str: Vec<(&str, u64)> =
histo_data.iter().map(|(l, v)| (l.as_str(), *v)).collect();
let decode_histogram =
latency_histogram(&histo_data_str, "Decode").bar_width(histo_width as u16);
f.render_widget(decode_histogram, mid[3]);
// Prefill latency/throughput chart
let prefill_latency_throughput_chart = latency_throughput_chart(
&self.data.prefill_batch_latency_throughput,
&self.data.batch_size,
self.zoom,
"Prefill",
);
f.render_widget(prefill_latency_throughput_chart, bottom[0]);
// Decode latency/throughput chart
let decode_latency_throughput_chart = latency_throughput_chart(
&self.data.decode_batch_latency_throughput,
&self.data.batch_size,
self.zoom,
"Decode",
);
f.render_widget(decode_latency_throughput_chart, bottom[1]);
}
}
/// App internal data struct
pub(crate) struct Data {
pub(crate) batch_size: Vec<u32>,
pub(crate) prefill_latencies: Vec<Vec<f64>>,
pub(crate) prefill_throughputs: Vec<Vec<f64>>,
pub(crate) decode_latencies: Vec<Vec<f64>>,
pub(crate) decode_token_latencies: Vec<Vec<f64>>,
pub(crate) decode_throughputs: Vec<Vec<f64>>,
pub(crate) prefill_batch_latency_throughput: Vec<(f64, f64)>,
pub(crate) decode_batch_latency_throughput: Vec<(f64, f64)>,
}
impl Data {
fn new(n_run: usize, batch_size: Vec<u32>) -> Self {
let prefill_latencies: Vec<Vec<f64>> = (0..batch_size.len())
.map(|_| Vec::with_capacity(n_run))
.collect();
let prefill_throughputs: Vec<Vec<f64>> = prefill_latencies.clone();
let decode_latencies: Vec<Vec<f64>> = prefill_latencies.clone();
let decode_token_latencies: Vec<Vec<f64>> = decode_latencies.clone();
let decode_throughputs: Vec<Vec<f64>> = prefill_throughputs.clone();
let prefill_batch_latency_throughput: Vec<(f64, f64)> =
Vec::with_capacity(batch_size.len());
let decode_batch_latency_throughput: Vec<(f64, f64)> =
prefill_batch_latency_throughput.clone();
Self {
batch_size,
prefill_latencies,
prefill_throughputs,
decode_latencies,
decode_token_latencies,
decode_throughputs,
prefill_batch_latency_throughput,
decode_batch_latency_throughput,
}
}
fn push_prefill(&mut self, prefill: Prefill, batch_idx: usize) {
let latency = prefill.latency.as_micros() as f64 / 1000.0;
self.prefill_latencies[batch_idx].push(latency);
self.prefill_throughputs[batch_idx].push(prefill.throughput);
}
fn push_decode(&mut self, decode: Decode, batch_idx: usize) {
let latency = decode.latency.as_micros() as f64 / 1000.0;
let token_latency = decode.token_latency.as_micros() as f64 / 1000.0;
self.decode_latencies[batch_idx].push(latency);
self.decode_token_latencies[batch_idx].push(token_latency);
self.decode_throughputs[batch_idx].push(decode.throughput);
}
fn end_batch(&mut self, batch_idx: usize) {
self.prefill_batch_latency_throughput.push((
self.prefill_latencies[batch_idx].iter().sum::<f64>()
/ self.prefill_latencies[batch_idx].len() as f64,
self.prefill_throughputs[batch_idx].iter().sum::<f64>()
/ self.prefill_throughputs[batch_idx].len() as f64,
));
self.decode_batch_latency_throughput.push((
self.decode_latencies[batch_idx].iter().sum::<f64>()
/ self.decode_latencies[batch_idx].len() as f64,
self.decode_throughputs[batch_idx].iter().sum::<f64>()
/ self.decode_throughputs[batch_idx].len() as f64,
));
}
}
/// Progress bar
fn progress_gauge(title: &str, label: String, progress: f64, color: Color) -> Gauge {
Gauge::default()
.block(Block::default().title(title).borders(Borders::ALL))
.gauge_style(Style::default().fg(color))
.label(Span::raw(label))
.ratio(progress)
}
/// Throughput paragraph
fn throughput_paragraph<'a>(throughput: &[f64], name: &'static str) -> Paragraph<'a> {
// Throughput average/high/low texts
let throughput_texts = statis_spans(throughput, "tokens/secs");
// Throughput block
Paragraph::new(throughput_texts).block(
Block::default()
.title(Span::raw(format!("{name} Throughput")))
.borders(Borders::ALL),
)
}
/// Latency paragraph
fn latency_paragraph<'a>(latency: &mut [f64], name: &'static str) -> Paragraph<'a> {
// Latency average/high/low texts
let mut latency_texts = statis_spans(latency, "ms");
// Sort latency for percentiles
float_ord::sort(latency);
let latency_percentiles = crate::utils::percentiles(latency, &[50, 90, 99]);
// Latency p50/p90/p99 texts
let colors = [Color::LightGreen, Color::LightYellow, Color::LightRed];
for (i, (name, value)) in latency_percentiles.iter().enumerate() {
let span = Line::from(vec![Span::styled(
format!("{name}: {value:.2} ms"),
Style::default().fg(colors[i]),
)]);
latency_texts.push(span);
}
Paragraph::new(latency_texts).block(
Block::default()
.title(Span::raw(format!("{name} Latency")))
.borders(Borders::ALL),
)
}
/// Average/High/Low spans
fn statis_spans<'a>(data: &[f64], unit: &'static str) -> Vec<Line<'a>> {
vec![
Line::from(vec![Span::styled(
format!(
"Average: {:.2} {unit}",
data.iter().sum::<f64>() / data.len() as f64
),
Style::default().fg(Color::LightBlue),
)]),
Line::from(vec![Span::styled(
format!(
"Lowest: {:.2} {unit}",
data.iter()
.min_by(|a, b| a.total_cmp(b))
.unwrap_or(&f64::NAN)
),
Style::default().fg(Color::Reset),
)]),
Line::from(vec![Span::styled(
format!(
"Highest: {:.2} {unit}",
data.iter()
.max_by(|a, b| a.total_cmp(b))
.unwrap_or(&f64::NAN)
),
Style::default().fg(Color::Reset),
)]),
]
}
/// Latency histogram data
fn latency_histogram_data(latency: &[f64], bins: usize) -> Vec<(String, u64)> {
let histo_data: Vec<(String, u64)> = {
let histo = crate::utils::histogram(latency, bins);
histo
.into_iter()
.map(|(label, v)| (format!("{label:.2}"), v as u64))
.collect()
};
histo_data
}
/// Latency Histogram
fn latency_histogram<'a>(
histo_data_str: &'a Vec<(&'a str, u64)>,
name: &'static str,
) -> BarChart<'a> {
BarChart::default()
.block(
Block::default()
.title(format!("{name} latency histogram"))
.style(Style::default().fg(Color::LightYellow).bg(Color::Reset))
.borders(Borders::ALL),
)
.data(histo_data_str.as_slice())
}
/// Latency/Throughput chart
fn latency_throughput_chart<'a>(
latency_throughput: &'a [(f64, f64)],
batch_sizes: &'a [u32],
zoom: bool,
name: &'static str,
) -> Chart<'a> {
let latency_iter = latency_throughput.iter().map(|(l, _)| l);
let throughput_iter = latency_throughput.iter().map(|(_, t)| t);
// Get extreme values
let min_latency: f64 = *latency_iter
.clone()
.min_by(|a, b| a.total_cmp(b))
.unwrap_or(&f64::NAN);
let max_latency: f64 = *latency_iter
.max_by(|a, b| a.total_cmp(b))
.unwrap_or(&f64::NAN);
let min_throughput: f64 = *throughput_iter
.clone()
.min_by(|a, b| a.total_cmp(b))
.unwrap_or(&f64::NAN);
let max_throughput: f64 = *throughput_iter
.max_by(|a, b| a.total_cmp(b))
.unwrap_or(&f64::NAN);
// Char min max values
let min_x = if zoom {
((min_latency - 0.05 * min_latency) / 100.0).floor() * 100.0
} else {
0.0
};
let max_x = ((max_latency + 0.05 * max_latency) / 100.0).ceil() * 100.0;
let step_x = (max_x - min_x) / 4.0;
// Chart min max values
let min_y = if zoom {
((min_throughput - 0.05 * min_throughput) / 100.0).floor() * 100.0
} else {
0.0
};
let max_y = ((max_throughput + 0.05 * max_throughput) / 100.0).ceil() * 100.0;
let step_y = (max_y - min_y) / 4.0;
// Labels
let mut x_labels = vec![Span::styled(
format!("{min_x:.2}"),
Style::default()
.add_modifier(Modifier::BOLD)
.fg(Color::Gray)
.bg(Color::Reset),
)];
for i in 0..3 {
x_labels.push(Span::styled(
format!("{:.2}", min_x + ((i + 1) as f64 * step_x)),
Style::default().fg(Color::Gray).bg(Color::Reset),
));
}
x_labels.push(Span::styled(
format!("{max_x:.2}"),
Style::default()
.add_modifier(Modifier::BOLD)
.fg(Color::Gray)
.bg(Color::Reset),
));
// Labels
let mut y_labels = vec![Span::styled(
format!("{min_y:.2}"),
Style::default()
.add_modifier(Modifier::BOLD)
.fg(Color::Gray)
.bg(Color::Reset),
)];
for i in 0..3 {
y_labels.push(Span::styled(
format!("{:.2}", min_y + ((i + 1) as f64 * step_y)),
Style::default().fg(Color::Gray).bg(Color::Reset),
));
}
y_labels.push(Span::styled(
format!("{max_y:.2}"),
Style::default()
.add_modifier(Modifier::BOLD)
.fg(Color::Gray)
.bg(Color::Reset),
));
// Chart dataset
let colors = color_vec();
let datasets: Vec<Dataset> = (0..latency_throughput.len())
.map(|i| {
let color_idx = i % colors.len();
Dataset::default()
.name(batch_sizes[i].to_string())
.marker(symbols::Marker::Block)
.style(Style::default().fg(colors[color_idx]))
.graph_type(GraphType::Scatter)
.data(&latency_throughput[i..(i + 1)])
})
.collect();
// Chart
Chart::new(datasets)
.style(Style::default().fg(Color::Cyan).bg(Color::Reset))
.block(
Block::default()
.title(Span::styled(
format!("{name} throughput over latency"),
Style::default().fg(Color::Gray).bg(Color::Reset),
))
.borders(Borders::ALL),
)
.x_axis(
Axis::default()
.title("ms")
.style(Style::default().fg(Color::Gray).bg(Color::Reset))
.labels(x_labels)
.bounds([min_x, max_x]),
)
.y_axis(
Axis::default()
.title("tokens/secs")
.style(Style::default().fg(Color::Gray).bg(Color::Reset))
.labels(y_labels)
.bounds([min_y, max_y]),
)
}
// Colors for latency/throughput chart
fn color_vec() -> Vec<Color> {
vec![
Color::Red,
Color::Green,
Color::Yellow,
Color::Blue,
Color::Magenta,
Color::Cyan,
Color::Gray,
Color::DarkGray,
Color::LightRed,
Color::LightGreen,
Color::LightYellow,
Color::LightBlue,
Color::LightMagenta,
Color::LightCyan,
]
}
| text-generation-inference/benchmark/src/app.rs/0 | {
"file_path": "text-generation-inference/benchmark/src/app.rs",
"repo_id": "text-generation-inference",
"token_count": 12188
} | 272 |
import pytest
from text_generation.types import Parameters, Request
from text_generation.errors import ValidationError
def test_parameters_validation():
# Test best_of
Parameters(best_of=1)
with pytest.raises(ValidationError):
Parameters(best_of=0)
with pytest.raises(ValidationError):
Parameters(best_of=-1)
Parameters(best_of=2, do_sample=True)
with pytest.raises(ValidationError):
Parameters(best_of=2)
with pytest.raises(ValidationError):
Parameters(best_of=2, seed=1)
# Test repetition_penalty
Parameters(repetition_penalty=1)
with pytest.raises(ValidationError):
Parameters(repetition_penalty=0)
with pytest.raises(ValidationError):
Parameters(repetition_penalty=-1)
# Test seed
Parameters(seed=1)
with pytest.raises(ValidationError):
Parameters(seed=-1)
# Test temperature
Parameters(temperature=1)
with pytest.raises(ValidationError):
Parameters(temperature=0)
with pytest.raises(ValidationError):
Parameters(temperature=-1)
# Test top_k
Parameters(top_k=1)
with pytest.raises(ValidationError):
Parameters(top_k=0)
with pytest.raises(ValidationError):
Parameters(top_k=-1)
# Test top_p
Parameters(top_p=0.5)
with pytest.raises(ValidationError):
Parameters(top_p=0)
with pytest.raises(ValidationError):
Parameters(top_p=-1)
with pytest.raises(ValidationError):
Parameters(top_p=1)
# Test truncate
Parameters(truncate=1)
with pytest.raises(ValidationError):
Parameters(truncate=0)
with pytest.raises(ValidationError):
Parameters(truncate=-1)
# Test typical_p
Parameters(typical_p=0.5)
with pytest.raises(ValidationError):
Parameters(typical_p=0)
with pytest.raises(ValidationError):
Parameters(typical_p=-1)
with pytest.raises(ValidationError):
Parameters(typical_p=1)
def test_request_validation():
Request(inputs="test")
with pytest.raises(ValidationError):
Request(inputs="")
Request(inputs="test", stream=True)
Request(inputs="test", parameters=Parameters(best_of=2, do_sample=True))
with pytest.raises(ValidationError):
Request(
inputs="test", parameters=Parameters(best_of=2, do_sample=True), stream=True
)
| text-generation-inference/clients/python/tests/test_types.py/0 | {
"file_path": "text-generation-inference/clients/python/tests/test_types.py",
"repo_id": "text-generation-inference",
"token_count": 984
} | 273 |
# Consuming Text Generation Inference
There are many ways to consume Text Generation Inference (TGI) server in your applications. After launching the server, you can use the [Messages API](https://huggingface.co/docs/text-generation-inference/en/messages_api) `/v1/chat/completions` route and make a `POST` request to get results from the server. You can also pass `"stream": true` to the call if you want TGI to return a stream of tokens.
For more information on the API, consult the OpenAPI documentation of `text-generation-inference` available [here](https://huggingface.github.io/text-generation-inference).
You can make the requests using any tool of your preference, such as curl, Python, or TypeScript. For an end-to-end experience, we've open-sourced [ChatUI](https://github.com/huggingface/chat-ui), a chat interface for open-access models.
## curl
After a successful server launch, you can query the model using the `v1/chat/completions` route, to get responses that are compliant to the OpenAI Chat Completion spec:
```bash
curl localhost:8080/v1/chat/completions \
-X POST \
-d '{
"model": "tgi",
"messages": [
{
"role": "system",
"content": "You are a helpful assistant."
},
{
"role": "user",
"content": "What is deep learning?"
}
],
"stream": true,
"max_tokens": 20
}' \
-H 'Content-Type: application/json'
```
For non-chat use-cases, you can also use the `/generate` and `/generate_stream` routes.
```bash
curl 127.0.0.1:8080/generate \
-X POST \
-d '{
"inputs":"What is Deep Learning?",
"parameters":{
"max_new_tokens":20
}
}' \
-H 'Content-Type: application/json'
```
## Python
### Inference Client
[`huggingface_hub`](https://huggingface.co/docs/huggingface_hub/main/en/index) is a Python library to interact with the Hugging Face Hub, including its endpoints. It provides a high-level class, [`huggingface_hub.InferenceClient`](https://huggingface.co/docs/huggingface_hub/package_reference/inference_client#huggingface_hub.InferenceClient), which makes it easy to make calls to TGI's Messages API. `InferenceClient` also takes care of parameter validation and provides a simple-to-use interface.
Install `huggingface_hub` package via pip.
```bash
pip install huggingface_hub
```
You can now use `InferenceClient` the exact same way you would use `OpenAI` client in Python
```python
from huggingface_hub import InferenceClient
client = InferenceClient(
base_url="http://localhost:8080/v1/",
)
output = client.chat.completions.create(
model="tgi",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "Count to 10"},
],
stream=True,
max_tokens=1024,
)
for chunk in output:
print(chunk.choices[0].delta.content)
```
You can check out more details about OpenAI compatibility [here](https://huggingface.co/docs/huggingface_hub/en/guides/inference#openai-compatibility).
There is also an async version of the client, `AsyncInferenceClient`, based on `asyncio` and `aiohttp`. You can find docs for it [here](https://huggingface.co/docs/huggingface_hub/package_reference/inference_client#huggingface_hub.AsyncInferenceClient)
### OpenAI Client
You can directly use the OpenAI [Python](https://github.com/openai/openai-python) or [JS](https://github.com/openai/openai-node) clients to interact with TGI.
Install the OpenAI Python package via pip.
```bash
pip install openai
```
```python
from openai import OpenAI
# init the client but point it to TGI
client = OpenAI(
base_url="http://localhost:8080/v1/",
api_key="-"
)
chat_completion = client.chat.completions.create(
model="tgi",
messages=[
{"role": "system", "content": "You are a helpful assistant." },
{"role": "user", "content": "What is deep learning?"}
],
stream=True
)
# iterate and print stream
for message in chat_completion:
print(message)
```
## UI
### Gradio
Gradio is a Python library that helps you build web applications for your machine learning models with a few lines of code. It has a `ChatInterface` wrapper that helps create neat UIs for chatbots. Let's take a look at how to create a chatbot with streaming mode using TGI and Gradio. Let's install Gradio and Hub Python library first.
```bash
pip install huggingface-hub gradio
```
Assume you are serving your model on port 8080, we will query through [InferenceClient](consuming_tgi#inference-client).
```python
import gradio as gr
from huggingface_hub import InferenceClient
client = InferenceClient(base_url="http://127.0.0.1:8080")
def inference(message, history):
partial_message = ""
output = client.chat.completions.create(
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": message},
],
stream=True,
max_tokens=1024,
)
for chunk in output:
partial_message += chunk.choices[0].delta.content
yield partial_message
gr.ChatInterface(
inference,
type="messages",
description="This is the demo for Gradio UI consuming TGI endpoint.",
title="Gradio 🤝 TGI",
examples=["Are tomatoes vegetables?"],
).queue().launch()
```
You can check out the UI and try the demo directly here 👇
<div class="block dark:hidden">
<iframe
src="https://merve-gradio-tgi-2.hf.space?__theme=light"
width="850"
height="750"
></iframe>
</div>
<div class="hidden dark:block">
<iframe
src="https://merve-gradio-tgi-2.hf.space?__theme=dark"
width="850"
height="750"
></iframe>
</div>
You can read more about how to customize a `ChatInterface` [here](https://www.gradio.app/guides/creating-a-chatbot-fast).
### ChatUI
[ChatUI](https://github.com/huggingface/chat-ui) is an open-source interface built for consuming LLMs. It offers many customization options, such as web search with SERP API and more. ChatUI can automatically consume the TGI server and even provides an option to switch between different TGI endpoints. You can try it out at [Hugging Chat](https://huggingface.co/chat/), or use the [ChatUI Docker Space](https://huggingface.co/new-space?template=huggingchat/chat-ui-template) to deploy your own Hugging Chat to Spaces.
To serve both ChatUI and TGI in same environment, simply add your own endpoints to the `MODELS` variable in `.env.local` file inside the `chat-ui` repository. Provide the endpoints pointing to where TGI is served.
```
{
// rest of the model config here
"endpoints": [{"url": "https://HOST:PORT/generate_stream"}]
}
```

| text-generation-inference/docs/source/basic_tutorials/consuming_tgi.md/0 | {
"file_path": "text-generation-inference/docs/source/basic_tutorials/consuming_tgi.md",
"repo_id": "text-generation-inference",
"token_count": 2308
} | 274 |
# Quantization
TGI offers many quantization schemes to run LLMs effectively and fast based on your use-case. TGI supports GPTQ, AWQ, bits-and-bytes, EETQ, Marlin, EXL2 and fp8 quantization.
To leverage GPTQ, AWQ, Marlin and EXL2 quants, you must provide pre-quantized weights. Whereas for bits-and-bytes, EETQ and fp8, weights are quantized by TGI on the fly.
We recommend using the official quantization scripts for creating your quants:
1. [AWQ](https://github.com/casper-hansen/AutoAWQ/blob/main/examples/quantize.py)
2. [GPTQ/ Marlin](https://github.com/AutoGPTQ/AutoGPTQ/blob/main/examples/quantization/basic_usage.py)
3. [EXL2](https://github.com/turboderp/exllamav2/blob/master/doc/convert.md)
For on-the-fly quantization you simply need to pass one of the supported quantization types and TGI takes care of the rest.
## Quantization with bitsandbytes, EETQ & fp8
bitsandbytes is a library used to apply 8-bit and 4-bit quantization to models. Unlike GPTQ quantization, bitsandbytes doesn't require a calibration dataset or any post-processing – weights are automatically quantized on load. However, inference with bitsandbytes is slower than GPTQ or FP16 precision.
8-bit quantization enables multi-billion parameter scale models to fit in smaller hardware without degrading performance too much.
In TGI, you can use 8-bit quantization by adding `--quantize bitsandbytes` like below 👇
```bash
docker run --gpus all --shm-size 1g -p 8080:80 -v $volume:/data ghcr.io/huggingface/text-generation-inference:3.3.4 --model-id $model --quantize bitsandbytes
```
4-bit quantization is also possible with bitsandbytes. You can choose one of the following 4-bit data types: 4-bit float (`fp4`), or 4-bit `NormalFloat` (`nf4`). These data types were introduced in the context of parameter-efficient fine-tuning, but you can apply them for inference by automatically converting the model weights on load.
In TGI, you can use 4-bit quantization by adding `--quantize bitsandbytes-nf4` or `--quantize bitsandbytes-fp4` like below 👇
```bash
docker run --gpus all --shm-size 1g -p 8080:80 -v $volume:/data ghcr.io/huggingface/text-generation-inference:3.3.4 --model-id $model --quantize bitsandbytes-nf4
```
You can get more information about 8-bit quantization by reading this [blog post](https://huggingface.co/blog/hf-bitsandbytes-integration), and 4-bit quantization by reading [this blog post](https://huggingface.co/blog/4bit-transformers-bitsandbytes).
Similarly you can use pass you can pass `--quantize eetq` or `--quantize fp8` for respective quantization schemes.
In addition to this, TGI allows creating GPTQ quants directly by passing the model weights and a calibration dataset.
## Quantization with GPTQ
GPTQ is a post-training quantization method to make the model smaller. It quantizes the layers by finding a compressed version of that weight, that will yield a minimum mean squared error like below 👇
Given a layer \\(l\\) with weight matrix \\(W_{l}\\) and layer input \\(X_{l}\\), find quantized weight \\(\\hat{W}_{l}\\):
$$({\hat{W}_{l}}^{*} = argmin_{\hat{W_{l}}} ||W_{l}X-\hat{W}_{l}X||^{2}_{2})$$
TGI allows you to both run an already GPTQ quantized model (see available models [here](https://huggingface.co/models?search=gptq)) or quantize a model of your choice using quantization script. You can run a quantized model by simply passing --quantize like below 👇
```bash
docker run --gpus all --shm-size 1g -p 8080:80 -v $volume:/data ghcr.io/huggingface/text-generation-inference:3.3.4 --model-id $model --quantize gptq
```
Note that TGI's GPTQ implementation doesn't use [AutoGPTQ](https://github.com/PanQiWei/AutoGPTQ) under the hood. However, models quantized using AutoGPTQ or Optimum can still be served by TGI.
To quantize a given model using GPTQ with a calibration dataset, simply run
```bash
text-generation-server quantize tiiuae/falcon-40b /data/falcon-40b-gptq
# Add --upload-to-model-id MYUSERNAME/falcon-40b to push the created model to the hub directly
```
This will create a new directory with the quantized files which you can use with,
```bash
text-generation-launcher --model-id /data/falcon-40b-gptq/ --sharded true --num-shard 2 --quantize gptq
```
You can learn more about the quantization options by running `text-generation-server quantize --help`.
If you wish to do more with GPTQ models (e.g. train an adapter on top), you can read about transformers GPTQ integration [here](https://huggingface.co/blog/gptq-integration).
You can learn more about GPTQ from the [paper](https://arxiv.org/pdf/2210.17323.pdf).
| text-generation-inference/docs/source/conceptual/quantization.md/0 | {
"file_path": "text-generation-inference/docs/source/conceptual/quantization.md",
"repo_id": "text-generation-inference",
"token_count": 1442
} | 275 |
# Text-generation-launcher arguments
<!-- WRAP CODE BLOCKS -->
```shell
Text Generation Launcher
Usage: text-generation-launcher [OPTIONS]
Options:
```
## MODEL_ID
```shell
--model-id <MODEL_ID>
The name of the model to load. Can be a MODEL_ID as listed on <https://hf.co/models> like `gpt2` or `OpenAssistant/oasst-sft-1-pythia-12b`. Or it can be a local directory containing the necessary files as saved by `save_pretrained(...)` methods of transformers
[env: MODEL_ID=]
[default: bigscience/bloom-560m]
```
## REVISION
```shell
--revision <REVISION>
The actual revision of the model if you're referring to a model on the hub. You can use a specific commit id or a branch like `refs/pr/2`
[env: REVISION=]
```
## VALIDATION_WORKERS
```shell
--validation-workers <VALIDATION_WORKERS>
The number of tokenizer workers used for payload validation and truncation inside the router
[env: VALIDATION_WORKERS=]
[default: 2]
```
## SHARDED
```shell
--sharded <SHARDED>
Whether to shard the model across multiple GPUs By default text-generation-inference will use all available GPUs to run the model. Setting it to `false` deactivates `num_shard`
[env: SHARDED=]
[possible values: true, false]
```
## NUM_SHARD
```shell
--num-shard <NUM_SHARD>
The number of shards to use if you don't want to use all GPUs on a given machine. You can use `CUDA_VISIBLE_DEVICES=0,1 text-generation-launcher... --num_shard 2` and `CUDA_VISIBLE_DEVICES=2,3 text-generation-launcher... --num_shard 2` to launch 2 copies with 2 shard each on a given machine with 4 GPUs for instance
[env: NUM_SHARD=]
```
## QUANTIZE
```shell
--quantize <QUANTIZE>
Quantization method to use for the model. It is not necessary to specify this option for pre-quantized models, since the quantization method is read from the model configuration.
Marlin kernels will be used automatically for GPTQ/AWQ models.
[env: QUANTIZE=]
Possible values:
- awq: 4 bit quantization. Requires a specific AWQ quantized model: <https://hf.co/models?search=awq>. Should replace GPTQ models wherever possible because of the better latency
- compressed-tensors: Compressed tensors, which can be a mixture of different quantization methods
- eetq: 8 bit quantization, doesn't require specific model. Should be a drop-in replacement to bitsandbytes with much better performance. Kernels are from <https://github.com/NetEase-FuXi/EETQ.git>
- exl2: Variable bit quantization. Requires a specific EXL2 quantized model: <https://hf.co/models?search=exl2>. Requires exllama2 kernels and does not support tensor parallelism (num_shard > 1)
- gptq: 4 bit quantization. Requires a specific GTPQ quantized model: <https://hf.co/models?search=gptq>. text-generation-inference will use exllama (faster) kernels wherever possible, and use triton kernel (wider support) when it's not. AWQ has faster kernels
- marlin: 4 bit quantization. Requires a specific Marlin quantized model: <https://hf.co/models?search=marlin>
- bitsandbytes: Bitsandbytes 8bit. Can be applied on any model, will cut the memory requirement in half, but it is known that the model will be much slower to run than the native f16
- bitsandbytes-nf4: Bitsandbytes 4bit. Can be applied on any model, will cut the memory requirement by 4x, but it is known that the model will be much slower to run than the native f16
- bitsandbytes-fp4: Bitsandbytes 4bit. nf4 should be preferred in most cases but maybe this one has better perplexity performance for you model
- fp8: [FP8](https://developer.nvidia.com/blog/nvidia-arm-and-intel-publish-fp8-specification-for-standardization-as-an-interchange-format-for-ai/) (e4m3) works on H100 and above This dtype has native ops should be the fastest if available. This is currently not the fastest because of local unpacking + padding to satisfy matrix multiplication limitations
```
## SPECULATE
```shell
--speculate <SPECULATE>
The number of input_ids to speculate on If using a medusa model, the heads will be picked up automatically Other wise, it will use n-gram speculation which is relatively free in terms of compute, but the speedup heavily depends on the task
[env: SPECULATE=]
```
## DTYPE
```shell
--dtype <DTYPE>
The dtype to be forced upon the model. This option cannot be used with `--quantize`
[env: DTYPE=]
[possible values: float16, bfloat16]
```
## KV_CACHE_DTYPE
```shell
--kv-cache-dtype <KV_CACHE_DTYPE>
Specify the dtype for the key-value cache. When this option is not provided, the dtype of the model is used (typically `float16` or `bfloat16`). Currently the only supported value are `fp8_e4m3fn` and `fp8_e5m2` on CUDA
[env: KV_CACHE_DTYPE=]
[possible values: fp8_e4m3fn, fp8_e5m2]
```
## TRUST_REMOTE_CODE
```shell
--trust-remote-code
Whether you want to execute hub modelling code. Explicitly passing a `revision` is encouraged when loading a model with custom code to ensure no malicious code has been contributed in a newer revision
[env: TRUST_REMOTE_CODE=]
```
## MAX_CONCURRENT_REQUESTS
```shell
--max-concurrent-requests <MAX_CONCURRENT_REQUESTS>
The maximum amount of concurrent requests for this particular deployment. Having a low limit will refuse clients requests instead of having them wait for too long and is usually good to handle backpressure correctly
[env: MAX_CONCURRENT_REQUESTS=]
[default: 128]
```
## MAX_BEST_OF
```shell
--max-best-of <MAX_BEST_OF>
This is the maximum allowed value for clients to set `best_of`. Best of makes `n` generations at the same time, and return the best in terms of overall log probability over the entire generated sequence
[env: MAX_BEST_OF=]
[default: 2]
```
## MAX_STOP_SEQUENCES
```shell
--max-stop-sequences <MAX_STOP_SEQUENCES>
This is the maximum allowed value for clients to set `stop_sequences`. Stop sequences are used to allow the model to stop on more than just the EOS token, and enable more complex "prompting" where users can preprompt the model in a specific way and define their "own" stop token aligned with their prompt
[env: MAX_STOP_SEQUENCES=]
[default: 4]
```
## MAX_TOP_N_TOKENS
```shell
--max-top-n-tokens <MAX_TOP_N_TOKENS>
This is the maximum allowed value for clients to set `top_n_tokens`. `top_n_tokens` is used to return information about the the `n` most likely tokens at each generation step, instead of just the sampled token. This information can be used for downstream tasks like for classification or ranking
[env: MAX_TOP_N_TOKENS=]
[default: 5]
```
## MAX_INPUT_TOKENS
```shell
--max-input-tokens <MAX_INPUT_TOKENS>
This is the maximum allowed input length (expressed in number of tokens) for users. The larger this value, the longer prompt users can send which can impact the overall memory required to handle the load. Please note that some models have a finite range of sequence they can handle. Default to min(max_allocatable, max_position_embeddings) - 1
[env: MAX_INPUT_TOKENS=]
```
## MAX_INPUT_LENGTH
```shell
--max-input-length <MAX_INPUT_LENGTH>
Legacy version of [`Args::max_input_tokens`]
[env: MAX_INPUT_LENGTH=]
```
## MAX_TOTAL_TOKENS
```shell
--max-total-tokens <MAX_TOTAL_TOKENS>
This is the most important value to set as it defines the "memory budget" of running clients requests. Clients will send input sequences and ask to generate `max_new_tokens` on top. with a value of `1512` users can send either a prompt of `1000` and ask for `512` new tokens, or send a prompt of `1` and ask for `1511` max_new_tokens. The larger this value, the larger amount each request will be in your RAM and the less effective batching can be. Default to min(max_allocatable, max_position_embeddings)
[env: MAX_TOTAL_TOKENS=]
```
## WAITING_SERVED_RATIO
```shell
--waiting-served-ratio <WAITING_SERVED_RATIO>
This represents the ratio of waiting queries vs running queries where you want to start considering pausing the running queries to include the waiting ones into the same batch. `waiting_served_ratio=1.2` Means when 12 queries are waiting and there's only 10 queries left in the current batch we check if we can fit those 12 waiting queries into the batching strategy, and if yes, then batching happens delaying the 10 running queries by a `prefill` run.
This setting is only applied if there is room in the batch as defined by `max_batch_total_tokens`.
[env: WAITING_SERVED_RATIO=]
[default: 0.3]
```
## MAX_BATCH_PREFILL_TOKENS
```shell
--max-batch-prefill-tokens <MAX_BATCH_PREFILL_TOKENS>
Limits the number of tokens for the prefill operation. Since this operation take the most memory and is compute bound, it is interesting to limit the number of requests that can be sent. Default to `max_input_tokens + 50` to give a bit of room
[env: MAX_BATCH_PREFILL_TOKENS=]
```
## MAX_BATCH_TOTAL_TOKENS
```shell
--max-batch-total-tokens <MAX_BATCH_TOTAL_TOKENS>
**IMPORTANT** This is one critical control to allow maximum usage of the available hardware.
This represents the total amount of potential tokens within a batch. When using padding (not recommended) this would be equivalent of `batch_size` * `max_total_tokens`.
However in the non-padded (flash attention) version this can be much finer.
For `max_batch_total_tokens=1000`, you could fit `10` queries of `total_tokens=100` or a single query of `1000` tokens.
Overall this number should be the largest possible amount that fits the remaining memory (after the model is loaded). Since the actual memory overhead depends on other parameters like if you're using quantization, flash attention or the model implementation, text-generation-inference infers this number automatically if not provided ensuring that the value is as large as possible.
[env: MAX_BATCH_TOTAL_TOKENS=]
```
## MAX_WAITING_TOKENS
```shell
--max-waiting-tokens <MAX_WAITING_TOKENS>
This setting defines how many tokens can be passed before forcing the waiting queries to be put on the batch (if the size of the batch allows for it). New queries require 1 `prefill` forward, which is different from `decode` and therefore you need to pause the running batch in order to run `prefill` to create the correct values for the waiting queries to be able to join the batch.
With a value too small, queries will always "steal" the compute to run `prefill` and running queries will be delayed by a lot.
With a value too big, waiting queries could wait for a very long time before being allowed a slot in the running batch. If your server is busy that means that requests that could run in ~2s on an empty server could end up running in ~20s because the query had to wait for 18s.
This number is expressed in number of tokens to make it a bit more "model" agnostic, but what should really matter is the overall latency for end users.
[env: MAX_WAITING_TOKENS=]
[default: 20]
```
## MAX_BATCH_SIZE
```shell
--max-batch-size <MAX_BATCH_SIZE>
Enforce a maximum number of requests per batch Specific flag for hardware targets that do not support unpadded inference
[env: MAX_BATCH_SIZE=]
```
## CUDA_GRAPHS
```shell
--cuda-graphs <CUDA_GRAPHS>
Specify the batch sizes to compute cuda graphs for. Use "0" to disable. Default = "1,2,4,8,16,32"
[env: CUDA_GRAPHS=]
```
## HOSTNAME
```shell
--hostname <HOSTNAME>
The IP address to listen on
[env: HOSTNAME=]
[default: 0.0.0.0]
```
## PORT
```shell
-p, --port <PORT>
The port to listen on
[env: PORT=]
[default: 3000]
```
## PROMETHEUS_PORT
```shell
-p, --prometheus-port <PROMETHEUS_PORT>
The Prometheus port to listen on
[env: PROMETHEUS_PORT=]
[default: 9000]
```
## SHARD_UDS_PATH
```shell
--shard-uds-path <SHARD_UDS_PATH>
The name of the socket for gRPC communication between the webserver and the shards
[env: SHARD_UDS_PATH=]
[default: /tmp/text-generation-server]
```
## MASTER_ADDR
```shell
--master-addr <MASTER_ADDR>
The address the master shard will listen on. (setting used by torch distributed)
[env: MASTER_ADDR=]
[default: localhost]
```
## MASTER_PORT
```shell
--master-port <MASTER_PORT>
The address the master port will listen on. (setting used by torch distributed)
[env: MASTER_PORT=]
[default: 29500]
```
## HUGGINGFACE_HUB_CACHE
```shell
--huggingface-hub-cache <HUGGINGFACE_HUB_CACHE>
The location of the huggingface hub cache. Used to override the location if you want to provide a mounted disk for instance
[env: HUGGINGFACE_HUB_CACHE=]
```
## WEIGHTS_CACHE_OVERRIDE
```shell
--weights-cache-override <WEIGHTS_CACHE_OVERRIDE>
The location of the huggingface hub cache. Used to override the location if you want to provide a mounted disk for instance
[env: WEIGHTS_CACHE_OVERRIDE=]
```
## DISABLE_CUSTOM_KERNELS
```shell
--disable-custom-kernels
For some models (like bloom), text-generation-inference implemented custom cuda kernels to speed up inference. Those kernels were only tested on A100. Use this flag to disable them if you're running on different hardware and encounter issues
[env: DISABLE_CUSTOM_KERNELS=]
```
## CUDA_MEMORY_FRACTION
```shell
--cuda-memory-fraction <CUDA_MEMORY_FRACTION>
Limit the CUDA available memory. The allowed value equals the total visible memory multiplied by cuda-memory-fraction
[env: CUDA_MEMORY_FRACTION=]
[default: 1.0]
```
## ROPE_SCALING
```shell
--rope-scaling <ROPE_SCALING>
Rope scaling will only be used for RoPE models and allow rescaling the position rotary to accomodate for larger prompts.
Goes together with `rope_factor`.
`--rope-factor 2.0` gives linear scaling with a factor of 2.0 `--rope-scaling dynamic` gives dynamic scaling with a factor of 1.0 `--rope-scaling linear` gives linear scaling with a factor of 1.0 (Nothing will be changed basically)
`--rope-scaling linear --rope-factor` fully describes the scaling you want
[env: ROPE_SCALING=]
[possible values: linear, dynamic]
```
## ROPE_FACTOR
```shell
--rope-factor <ROPE_FACTOR>
Rope scaling will only be used for RoPE models See `rope_scaling`
[env: ROPE_FACTOR=]
```
## JSON_OUTPUT
```shell
--json-output
Outputs the logs in JSON format (useful for telemetry)
[env: JSON_OUTPUT=]
```
## OTLP_ENDPOINT
```shell
--otlp-endpoint <OTLP_ENDPOINT>
[env: OTLP_ENDPOINT=]
```
## OTLP_SERVICE_NAME
```shell
--otlp-service-name <OTLP_SERVICE_NAME>
[env: OTLP_SERVICE_NAME=]
[default: text-generation-inference.router]
```
## CORS_ALLOW_ORIGIN
```shell
--cors-allow-origin <CORS_ALLOW_ORIGIN>
[env: CORS_ALLOW_ORIGIN=]
```
## API_KEY
```shell
--api-key <API_KEY>
[env: API_KEY=]
```
## WATERMARK_GAMMA
```shell
--watermark-gamma <WATERMARK_GAMMA>
[env: WATERMARK_GAMMA=]
```
## WATERMARK_DELTA
```shell
--watermark-delta <WATERMARK_DELTA>
[env: WATERMARK_DELTA=]
```
## NGROK
```shell
--ngrok
Enable ngrok tunneling
[env: NGROK=]
```
## NGROK_AUTHTOKEN
```shell
--ngrok-authtoken <NGROK_AUTHTOKEN>
ngrok authentication token
[env: NGROK_AUTHTOKEN=]
```
## NGROK_EDGE
```shell
--ngrok-edge <NGROK_EDGE>
ngrok edge
[env: NGROK_EDGE=]
```
## TOKENIZER_CONFIG_PATH
```shell
--tokenizer-config-path <TOKENIZER_CONFIG_PATH>
The path to the tokenizer config file. This path is used to load the tokenizer configuration which may include a `chat_template`. If not provided, the default config will be used from the model hub
[env: TOKENIZER_CONFIG_PATH=]
```
## DISABLE_GRAMMAR_SUPPORT
```shell
--disable-grammar-support
Disable outlines grammar constrained generation. This is a feature that allows you to generate text that follows a specific grammar
[env: DISABLE_GRAMMAR_SUPPORT=]
```
## ENV
```shell
-e, --env
Display a lot of information about your runtime environment
```
## MAX_CLIENT_BATCH_SIZE
```shell
--max-client-batch-size <MAX_CLIENT_BATCH_SIZE>
Control the maximum number of inputs that a client can send in a single request
[env: MAX_CLIENT_BATCH_SIZE=]
[default: 4]
```
## LORA_ADAPTERS
```shell
--lora-adapters <LORA_ADAPTERS>
Lora Adapters a list of adapter ids i.e. `repo/adapter1,repo/adapter2` to load during startup that will be available to callers via the `adapter_id` field in a request
[env: LORA_ADAPTERS=]
```
## USAGE_STATS
```shell
--usage-stats <USAGE_STATS>
Control if anonymous usage stats are collected. Options are "on", "off" and "no-stack" Defaul is on
[env: USAGE_STATS=]
[default: on]
Possible values:
- on: Default option, usage statistics are collected anonymously
- off: Disables all collection of usage statistics
- no-stack: Doesn't send the error stack trace or error type, but allows sending a crash event
```
## PAYLOAD_LIMIT
```shell
--payload-limit <PAYLOAD_LIMIT>
Payload size limit in bytes
Default is 2MB
[env: PAYLOAD_LIMIT=]
[default: 2000000]
```
## ENABLE_PREFILL_LOGPROBS
```shell
--enable-prefill-logprobs
Enables prefill logprobs
Logprobs in the prompt are deactivated by default because they consume a large amount of VRAM (especially for long prompts). Using this flag reallows users to ask for them.
[env: ENABLE_PREFILL_LOGPROBS=]
```
## GRACEFUL_TERMINATION_TIMEOUT
```shell
-g, --graceful-termination-timeout <GRACEFUL_TERMINATION_TIMEOUT>
Change timeout of graceful termination of the TGI server
[env: GRACEFUL_TERMINATION_TIMEOUT=]
[default: 90]
```
## HELP
```shell
-h, --help
Print help (see a summary with '-h')
```
## VERSION
```shell
-V, --version
Print version
```
| text-generation-inference/docs/source/reference/launcher.md/0 | {
"file_path": "text-generation-inference/docs/source/reference/launcher.md",
"repo_id": "text-generation-inference",
"token_count": 7909
} | 276 |
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 15,
"logprob": null,
"text": ","
},
{
"id": 1669,
"logprob": -5.4453125,
"text": " il"
},
{
"id": 11580,
"logprob": -2.3378906,
"text": " faut"
},
{
"id": 3913,
"logprob": -4.3320312,
"text": " tout"
},
{
"id": 39261,
"logprob": -2.9160156,
"text": " d'abord"
}
],
"seed": 0,
"tokens": [
{
"id": 408,
"logprob": -0.16687012,
"special": false,
"text": " que"
},
{
"id": 366,
"logprob": -1.5517578,
"special": false,
"text": " la"
},
{
"id": 8769,
"logprob": -0.16687012,
"special": false,
"text": " personne"
},
{
"id": 1479,
"logprob": -2.1035156,
"special": false,
"text": " qui"
},
{
"id": 143926,
"logprob": -2.8671875,
"special": false,
"text": " réalise"
},
{
"id": 578,
"logprob": 0.0,
"special": false,
"text": " le"
},
{
"id": 8138,
"logprob": -0.66748047,
"special": false,
"text": " projet"
},
{
"id": 795,
"logprob": -1.6279297,
"special": false,
"text": " ne"
},
{
"id": 9802,
"logprob": -0.47875977,
"special": false,
"text": " soit"
},
{
"id": 1230,
"logprob": 0.0,
"special": false,
"text": " pas"
}
],
"top_tokens": null
},
"generated_text": "Pour déguster un ortolan, il faut tout d'abord que la personne qui réalise le projet ne soit pas"
}
| text-generation-inference/integration-tests/models/__snapshots__/test_bloom_560m/test_bloom_560m_all_params.json/0 | {
"file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_bloom_560m/test_bloom_560m_all_params.json",
"repo_id": "text-generation-inference",
"token_count": 1204
} | 277 |
{
"details": {
"best_of_sequences": null,
"finish_reason": "eos_token",
"generated_tokens": 76,
"prefill": [],
"seed": null,
"tokens": [
{
"id": 18183,
"logprob": -1.5195312,
"special": false,
"text": " Deep"
},
{
"id": 6832,
"logprob": -0.06817627,
"special": false,
"text": " learning"
},
{
"id": 374,
"logprob": -0.13122559,
"special": false,
"text": " is"
},
{
"id": 264,
"logprob": -0.13415527,
"special": false,
"text": " a"
},
{
"id": 25993,
"logprob": -0.8769531,
"special": false,
"text": " subset"
},
{
"id": 315,
"logprob": -0.0011396408,
"special": false,
"text": " of"
},
{
"id": 5662,
"logprob": -0.16442871,
"special": false,
"text": " machine"
},
{
"id": 6832,
"logprob": -0.0026416779,
"special": false,
"text": " learning"
},
{
"id": 429,
"logprob": -0.48754883,
"special": false,
"text": " that"
},
{
"id": 5711,
"logprob": -1.2294922,
"special": false,
"text": " uses"
},
{
"id": 29728,
"logprob": -0.66503906,
"special": false,
"text": " neural"
},
{
"id": 14155,
"logprob": -0.02960205,
"special": false,
"text": " networks"
},
{
"id": 311,
"logprob": -0.7236328,
"special": false,
"text": " to"
},
{
"id": 3960,
"logprob": -1.1914062,
"special": false,
"text": " learn"
},
{
"id": 504,
"logprob": -0.7089844,
"special": false,
"text": " from"
},
{
"id": 821,
"logprob": -0.7729492,
"special": false,
"text": " data"
},
{
"id": 13,
"logprob": -0.7836914,
"special": false,
"text": "."
},
{
"id": 1084,
"logprob": -0.9941406,
"special": false,
"text": " It"
},
{
"id": 374,
"logprob": -0.52441406,
"special": false,
"text": " is"
},
{
"id": 264,
"logprob": -0.9511719,
"special": false,
"text": " a"
},
{
"id": 943,
"logprob": -0.8642578,
"special": false,
"text": " type"
},
{
"id": 315,
"logprob": -0.00030231476,
"special": false,
"text": " of"
},
{
"id": 20443,
"logprob": -0.14416504,
"special": false,
"text": " artificial"
},
{
"id": 11229,
"logprob": -0.013824463,
"special": false,
"text": " intelligence"
},
{
"id": 429,
"logprob": -0.18762207,
"special": false,
"text": " that"
},
{
"id": 646,
"logprob": -1.0087891,
"special": false,
"text": " can"
},
{
"id": 3960,
"logprob": -0.90234375,
"special": false,
"text": " learn"
},
{
"id": 504,
"logprob": -0.54345703,
"special": false,
"text": " from"
},
{
"id": 323,
"logprob": -1.0400391,
"special": false,
"text": " and"
},
{
"id": 1281,
"logprob": -0.072509766,
"special": false,
"text": " make"
},
{
"id": 19898,
"logprob": -0.16516113,
"special": false,
"text": " predictions"
},
{
"id": 389,
"logprob": -0.4416504,
"special": false,
"text": " on"
},
{
"id": 3460,
"logprob": -0.5385742,
"special": false,
"text": " large"
},
{
"id": 14713,
"logprob": -0.4387207,
"special": false,
"text": " amounts"
},
{
"id": 315,
"logprob": -0.00015091896,
"special": false,
"text": " of"
},
{
"id": 821,
"logprob": -0.061431885,
"special": false,
"text": " data"
},
{
"id": 13,
"logprob": -0.71875,
"special": false,
"text": "."
},
{
"id": 18183,
"logprob": -0.23632812,
"special": false,
"text": " Deep"
},
{
"id": 6832,
"logprob": -0.0017204285,
"special": false,
"text": " learning"
},
{
"id": 374,
"logprob": -1.1738281,
"special": false,
"text": " is"
},
{
"id": 1483,
"logprob": -0.61083984,
"special": false,
"text": " used"
},
{
"id": 304,
"logprob": -0.035003662,
"special": false,
"text": " in"
},
{
"id": 264,
"logprob": -0.118652344,
"special": false,
"text": " a"
},
{
"id": 8045,
"logprob": -0.42016602,
"special": false,
"text": " variety"
},
{
"id": 315,
"logprob": -1.6212463e-05,
"special": false,
"text": " of"
},
{
"id": 8357,
"logprob": -0.1315918,
"special": false,
"text": " applications"
},
{
"id": 11,
"logprob": -0.12915039,
"special": false,
"text": ","
},
{
"id": 2670,
"logprob": -0.12463379,
"special": false,
"text": " including"
},
{
"id": 2168,
"logprob": -0.37402344,
"special": false,
"text": " image"
},
{
"id": 323,
"logprob": -0.1451416,
"special": false,
"text": " and"
},
{
"id": 8806,
"logprob": -0.028869629,
"special": false,
"text": " speech"
},
{
"id": 17843,
"logprob": -0.00024068356,
"special": false,
"text": " recognition"
},
{
"id": 11,
"logprob": -0.00031018257,
"special": false,
"text": ","
},
{
"id": 5810,
"logprob": -0.019821167,
"special": false,
"text": " natural"
},
{
"id": 4128,
"logprob": -0.00012528896,
"special": false,
"text": " language"
},
{
"id": 8692,
"logprob": -0.00089263916,
"special": false,
"text": " processing"
},
{
"id": 11,
"logprob": -0.00073862076,
"special": false,
"text": ","
},
{
"id": 323,
"logprob": -0.040161133,
"special": false,
"text": " and"
},
{
"id": 38193,
"logprob": -0.4519043,
"special": false,
"text": " autonomous"
},
{
"id": 11474,
"logprob": -0.39941406,
"special": false,
"text": " vehicles"
},
{
"id": 13,
"logprob": -0.21166992,
"special": false,
"text": "."
},
{
"id": 1084,
"logprob": -0.9082031,
"special": false,
"text": " It"
},
{
"id": 374,
"logprob": -0.44213867,
"special": false,
"text": " is"
},
{
"id": 264,
"logprob": -1.2177734,
"special": false,
"text": " a"
},
{
"id": 18512,
"logprob": -0.5205078,
"special": false,
"text": " rapidly"
},
{
"id": 7826,
"logprob": -0.15332031,
"special": false,
"text": " growing"
},
{
"id": 2070,
"logprob": -0.0039978027,
"special": false,
"text": " field"
},
{
"id": 448,
"logprob": -0.9091797,
"special": false,
"text": " with"
},
{
"id": 1657,
"logprob": -0.17114258,
"special": false,
"text": " many"
},
{
"id": 4650,
"logprob": -0.70703125,
"special": false,
"text": " potential"
},
{
"id": 8357,
"logprob": -0.025131226,
"special": false,
"text": " applications"
},
{
"id": 304,
"logprob": -0.6699219,
"special": false,
"text": " in"
},
{
"id": 279,
"logprob": -0.35205078,
"special": false,
"text": " the"
},
{
"id": 3853,
"logprob": -0.049194336,
"special": false,
"text": " future"
},
{
"id": 13,
"logprob": -0.21972656,
"special": false,
"text": "."
},
{
"id": 151643,
"logprob": -2.0019531,
"special": true,
"text": "<|endoftext|>"
}
],
"top_tokens": null
},
"generated_text": " Deep learning is a subset of machine learning that uses neural networks to learn from data. It is a type of artificial intelligence that can learn from and make predictions on large amounts of data. Deep learning is used in a variety of applications, including image and speech recognition, natural language processing, and autonomous vehicles. It is a rapidly growing field with many potential applications in the future."
}
| text-generation-inference/integration-tests/models/__snapshots__/test_compressed_tensors_w8a8_int_dynamic_weight/test_compressed_tensors_w8a8_int_dynamic_weight.json/0 | {
"file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_compressed_tensors_w8a8_int_dynamic_weight/test_compressed_tensors_w8a8_int_dynamic_weight.json",
"repo_id": "text-generation-inference",
"token_count": 5893
} | 278 |
{
"choices": [
{
"finish_reason": "stop",
"index": 0,
"logprobs": null,
"message": {
"content": "Okay, let's analyze the image.\n\nThe image is a solid, bright white color. There is nothing else visible within it. \n\nIt's essentially a blank white square or rectangle.",
"name": null,
"role": "assistant",
"tool_calls": null
},
"usage": null
}
],
"created": 1747062956,
"id": "",
"model": "google/gemma-3-4b-it",
"object": "chat.completion",
"system_fingerprint": "3.3.4-dev0-native",
"usage": {
"completion_tokens": 42,
"prompt_tokens": 277,
"total_tokens": 319
}
}
| text-generation-inference/integration-tests/models/__snapshots__/test_flash_gemma3/test_flash_gemma3_image_base64_rgb_jpg.json/0 | {
"file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_flash_gemma3/test_flash_gemma3_image_base64_rgb_jpg.json",
"repo_id": "text-generation-inference",
"token_count": 304
} | 279 |
[
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [],
"seed": null,
"tokens": [
{
"id": 363,
"logprob": -1.5351562,
"special": false,
"text": " for"
},
{
"id": 847,
"logprob": -2.5566406,
"special": false,
"text": " /"
},
{
"id": 2754,
"logprob": -2.2519531,
"special": false,
"text": "api"
},
{
"id": 29914,
"logprob": -0.03414917,
"special": false,
"text": "/"
},
{
"id": 29894,
"logprob": -0.96240234,
"special": false,
"text": "v"
},
{
"id": 29896,
"logprob": -0.3647461,
"special": false,
"text": "1"
},
{
"id": 29914,
"logprob": -0.012901306,
"special": false,
"text": "/"
},
{
"id": 16418,
"logprob": -3.1542969,
"special": false,
"text": "projects"
},
{
"id": 29914,
"logprob": -0.4362793,
"special": false,
"text": "/"
},
{
"id": 29896,
"logprob": -1.9394531,
"special": false,
"text": "1"
}
],
"top_tokens": null
},
"generated_text": " for /api/v1/projects/1"
},
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [],
"seed": null,
"tokens": [
{
"id": 363,
"logprob": -1.5332031,
"special": false,
"text": " for"
},
{
"id": 847,
"logprob": -2.5625,
"special": false,
"text": " /"
},
{
"id": 2754,
"logprob": -2.2617188,
"special": false,
"text": "api"
},
{
"id": 29914,
"logprob": -0.033996582,
"special": false,
"text": "/"
},
{
"id": 29894,
"logprob": -0.9609375,
"special": false,
"text": "v"
},
{
"id": 29896,
"logprob": -0.36572266,
"special": false,
"text": "1"
},
{
"id": 29914,
"logprob": -0.0129776,
"special": false,
"text": "/"
},
{
"id": 16418,
"logprob": -3.15625,
"special": false,
"text": "projects"
},
{
"id": 29914,
"logprob": -0.4362793,
"special": false,
"text": "/"
},
{
"id": 29896,
"logprob": -1.9394531,
"special": false,
"text": "1"
}
],
"top_tokens": null
},
"generated_text": " for /api/v1/projects/1"
},
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [],
"seed": null,
"tokens": [
{
"id": 363,
"logprob": -1.5332031,
"special": false,
"text": " for"
},
{
"id": 847,
"logprob": -2.5625,
"special": false,
"text": " /"
},
{
"id": 2754,
"logprob": -2.2617188,
"special": false,
"text": "api"
},
{
"id": 29914,
"logprob": -0.033996582,
"special": false,
"text": "/"
},
{
"id": 29894,
"logprob": -0.9609375,
"special": false,
"text": "v"
},
{
"id": 29896,
"logprob": -0.36572266,
"special": false,
"text": "1"
},
{
"id": 29914,
"logprob": -0.0129776,
"special": false,
"text": "/"
},
{
"id": 16418,
"logprob": -3.15625,
"special": false,
"text": "projects"
},
{
"id": 29914,
"logprob": -0.4362793,
"special": false,
"text": "/"
},
{
"id": 29896,
"logprob": -1.9394531,
"special": false,
"text": "1"
}
],
"top_tokens": null
},
"generated_text": " for /api/v1/projects/1"
},
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [],
"seed": null,
"tokens": [
{
"id": 363,
"logprob": -1.5332031,
"special": false,
"text": " for"
},
{
"id": 847,
"logprob": -2.5625,
"special": false,
"text": " /"
},
{
"id": 2754,
"logprob": -2.2617188,
"special": false,
"text": "api"
},
{
"id": 29914,
"logprob": -0.033996582,
"special": false,
"text": "/"
},
{
"id": 29894,
"logprob": -0.9609375,
"special": false,
"text": "v"
},
{
"id": 29896,
"logprob": -0.36572266,
"special": false,
"text": "1"
},
{
"id": 29914,
"logprob": -0.0129776,
"special": false,
"text": "/"
},
{
"id": 16418,
"logprob": -3.15625,
"special": false,
"text": "projects"
},
{
"id": 29914,
"logprob": -0.4362793,
"special": false,
"text": "/"
},
{
"id": 29896,
"logprob": -1.9394531,
"special": false,
"text": "1"
}
],
"top_tokens": null
},
"generated_text": " for /api/v1/projects/1"
}
]
| text-generation-inference/integration-tests/models/__snapshots__/test_flash_llama/test_flash_llama_load.json/0 | {
"file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_flash_llama/test_flash_llama_load.json",
"repo_id": "text-generation-inference",
"token_count": 4045
} | 280 |
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [],
"seed": 0,
"tokens": [
{
"id": 25584,
"logprob": 0.0,
"special": false,
"text": "Grad"
},
{
"id": 993,
"logprob": 0.0,
"special": false,
"text": "ient"
},
{
"id": 2726,
"logprob": 0.0,
"special": false,
"text": " Des"
},
{
"id": 1760,
"logprob": 0.0,
"special": false,
"text": "cent"
},
{
"id": 313,
"logprob": -0.12322998,
"special": false,
"text": " ("
},
{
"id": 29954,
"logprob": 0.0,
"special": false,
"text": "G"
},
{
"id": 29928,
"logprob": 0.0,
"special": false,
"text": "D"
},
{
"id": 29897,
"logprob": 0.0,
"special": false,
"text": ")"
},
{
"id": 338,
"logprob": -0.6040039,
"special": false,
"text": " is"
},
{
"id": 385,
"logprob": -0.1796875,
"special": false,
"text": " an"
}
],
"top_tokens": null
},
"generated_text": "What is gradient descent?\nGradient Descent (GD) is an"
}
| text-generation-inference/integration-tests/models/__snapshots__/test_flash_phi35_moe/test_flash_phi35_moe_all_params.json/0 | {
"file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_flash_phi35_moe/test_flash_phi35_moe_all_params.json",
"repo_id": "text-generation-inference",
"token_count": 849
} | 281 |
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 60,
"prefill": [],
"seed": 0,
"tokens": [
{
"id": 2262,
"logprob": -0.045715332,
"special": false,
"text": "():"
},
{
"id": 284,
"logprob": 0.0,
"special": false,
"text": "\n "
},
{
"id": 1459,
"logprob": 0.0,
"special": false,
"text": " print"
},
{
"id": 440,
"logprob": 0.0,
"special": false,
"text": "(\""
},
{
"id": 8279,
"logprob": 0.0,
"special": false,
"text": "Hello"
},
{
"id": 10896,
"logprob": -0.3659668,
"special": false,
"text": " World"
},
{
"id": 657,
"logprob": -0.5229492,
"special": false,
"text": "\")"
},
{
"id": 203,
"logprob": -0.10632324,
"special": false,
"text": "\n"
},
{
"id": 203,
"logprob": 0.0,
"special": false,
"text": "\n"
},
{
"id": 589,
"logprob": -0.20141602,
"special": false,
"text": "def"
},
{
"id": 1459,
"logprob": 0.0,
"special": false,
"text": " print"
},
{
"id": 81,
"logprob": 0.0,
"special": false,
"text": "_"
},
{
"id": 7656,
"logprob": 0.0,
"special": false,
"text": "hello"
},
{
"id": 81,
"logprob": 0.0,
"special": false,
"text": "_"
},
{
"id": 426,
"logprob": -0.051635742,
"special": false,
"text": "name"
},
{
"id": 26,
"logprob": 0.0,
"special": false,
"text": "("
},
{
"id": 426,
"logprob": 0.0,
"special": false,
"text": "name"
},
{
"id": 711,
"logprob": 0.0,
"special": false,
"text": "):"
},
{
"id": 284,
"logprob": 0.0,
"special": false,
"text": "\n "
},
{
"id": 1459,
"logprob": 0.0,
"special": false,
"text": " print"
},
{
"id": 440,
"logprob": -0.16027832,
"special": false,
"text": "(\""
},
{
"id": 8279,
"logprob": 0.0,
"special": false,
"text": "Hello"
},
{
"id": 313,
"logprob": 0.0,
"special": false,
"text": " \""
},
{
"id": 474,
"logprob": 0.0,
"special": false,
"text": " +"
},
{
"id": 636,
"logprob": 0.0,
"special": false,
"text": " name"
},
{
"id": 27,
"logprob": 0.0,
"special": false,
"text": ")"
},
{
"id": 203,
"logprob": 0.0,
"special": false,
"text": "\n"
},
{
"id": 203,
"logprob": 0.0,
"special": false,
"text": "\n"
},
{
"id": 589,
"logprob": 0.0,
"special": false,
"text": "def"
},
{
"id": 1459,
"logprob": 0.0,
"special": false,
"text": " print"
},
{
"id": 81,
"logprob": 0.0,
"special": false,
"text": "_"
},
{
"id": 7656,
"logprob": 0.0,
"special": false,
"text": "hello"
},
{
"id": 81,
"logprob": 0.0,
"special": false,
"text": "_"
},
{
"id": 426,
"logprob": 0.0,
"special": false,
"text": "name"
},
{
"id": 81,
"logprob": 0.0,
"special": false,
"text": "_"
},
{
"id": 381,
"logprob": 0.0,
"special": false,
"text": "age"
},
{
"id": 26,
"logprob": 0.0,
"special": false,
"text": "("
},
{
"id": 426,
"logprob": 0.0,
"special": false,
"text": "name"
},
{
"id": 30,
"logprob": 0.0,
"special": false,
"text": ","
},
{
"id": 11442,
"logprob": 0.0,
"special": false,
"text": " age"
},
{
"id": 711,
"logprob": 0.0,
"special": false,
"text": "):"
},
{
"id": 284,
"logprob": 0.0,
"special": false,
"text": "\n "
},
{
"id": 1459,
"logprob": 0.0,
"special": false,
"text": " print"
},
{
"id": 440,
"logprob": 0.0,
"special": false,
"text": "(\""
},
{
"id": 8279,
"logprob": 0.0,
"special": false,
"text": "Hello"
},
{
"id": 313,
"logprob": 0.0,
"special": false,
"text": " \""
},
{
"id": 474,
"logprob": 0.0,
"special": false,
"text": " +"
},
{
"id": 636,
"logprob": 0.0,
"special": false,
"text": " name"
},
{
"id": 474,
"logprob": 0.0,
"special": false,
"text": " +"
},
{
"id": 313,
"logprob": -0.6933594,
"special": false,
"text": " \""
},
{
"id": 313,
"logprob": -1.7011719,
"special": false,
"text": " \""
},
{
"id": 474,
"logprob": 0.0,
"special": false,
"text": " +"
},
{
"id": 596,
"logprob": 0.0,
"special": false,
"text": " str"
},
{
"id": 26,
"logprob": 0.0,
"special": false,
"text": "("
},
{
"id": 381,
"logprob": 0.0,
"special": false,
"text": "age"
},
{
"id": 490,
"logprob": 0.0,
"special": false,
"text": "))"
},
{
"id": 203,
"logprob": 0.0,
"special": false,
"text": "\n"
},
{
"id": 203,
"logprob": 0.0,
"special": false,
"text": "\n"
},
{
"id": 589,
"logprob": 0.0,
"special": false,
"text": "def"
},
{
"id": 1459,
"logprob": 0.0,
"special": false,
"text": " print"
}
],
"top_tokens": null
},
"generated_text": "():\n print(\"Hello World\")\n\ndef print_hello_name(name):\n print(\"Hello \" + name)\n\ndef print_hello_name_age(name, age):\n print(\"Hello \" + name + \" \" + str(age))\n\ndef print"
}
| text-generation-inference/integration-tests/models/__snapshots__/test_flash_starcoder/test_flash_starcoder_default_params.json/0 | {
"file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_flash_starcoder/test_flash_starcoder_default_params.json",
"repo_id": "text-generation-inference",
"token_count": 4504
} | 282 |
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 1,
"logprob": null,
"text": "<s>"
},
{
"id": 4911,
"logprob": -6.9765625,
"text": "User"
},
{
"id": 29901,
"logprob": -0.0059432983,
"text": ":"
},
{
"id": 32000,
"logprob": -0.8408203,
"text": "<fake_token_around_image>"
},
{
"id": 32001,
"logprob": -9.906292e-05,
"text": "<image>"
},
{
"id": 32000,
"logprob": -2.3841858e-07,
"text": "<fake_token_around_image>"
},
{
"id": 1815,
"logprob": -4.1679688,
"text": "Can"
},
{
"id": 366,
"logprob": -0.014099121,
"text": "you"
},
{
"id": 2649,
"logprob": -4.4609375,
"text": "tell"
},
{
"id": 592,
"logprob": -0.29882812,
"text": "me"
},
{
"id": 263,
"logprob": -4.1445312,
"text": "a"
},
{
"id": 1407,
"logprob": -9.3828125,
"text": "very"
},
{
"id": 3273,
"logprob": -1.9736328,
"text": "short"
},
{
"id": 5828,
"logprob": -0.2800293,
"text": "story"
},
{
"id": 2729,
"logprob": -3.5625,
"text": "based"
},
{
"id": 373,
"logprob": -0.0006427765,
"text": "on"
},
{
"id": 278,
"logprob": -0.13952637,
"text": "the"
},
{
"id": 1967,
"logprob": -0.068115234,
"text": "image"
},
{
"id": 29973,
"logprob": -0.16357422,
"text": "?"
}
],
"seed": null,
"tokens": [
{
"id": 32002,
"logprob": -0.0026474,
"special": true,
"text": "<end_of_utterance>"
},
{
"id": 29871,
"logprob": -8.547306e-05,
"special": false,
"text": " "
},
{
"id": 13,
"logprob": -1.7881393e-05,
"special": false,
"text": "\n"
},
{
"id": 7900,
"logprob": -3.0994415e-06,
"special": false,
"text": "Ass"
},
{
"id": 22137,
"logprob": 0.0,
"special": false,
"text": "istant"
},
{
"id": 29901,
"logprob": -3.2186508e-06,
"special": false,
"text": ":"
},
{
"id": 319,
"logprob": -0.92529297,
"special": false,
"text": " A"
},
{
"id": 696,
"logprob": -1.1269531,
"special": false,
"text": " ro"
},
{
"id": 15664,
"logprob": -0.00029492378,
"special": false,
"text": "oster"
},
{
"id": 15028,
"logprob": -1.1855469,
"special": false,
"text": " stands"
}
]
},
"generated_text": " \nAssistant: A rooster stands"
}
| text-generation-inference/integration-tests/models/__snapshots__/test_idefics/test_idefics.json/0 | {
"file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_idefics/test_idefics.json",
"repo_id": "text-generation-inference",
"token_count": 2062
} | 283 |
{
"details": {
"finish_reason": "length",
"generated_tokens": 40,
"prefill": [],
"seed": null,
"tokens": [
{
"id": 13,
"logprob": -1.0488281,
"special": false,
"text": "\n"
},
{
"id": 13,
"logprob": -1.0800781,
"special": false,
"text": "\n"
},
{
"id": 27332,
"logprob": -2.1152344,
"special": false,
"text": "###"
},
{
"id": 28705,
"logprob": -1.6748047,
"special": false,
"text": " "
},
{
"id": 28740,
"logprob": -0.097229004,
"special": false,
"text": "1"
},
{
"id": 28723,
"logprob": -0.16467285,
"special": false,
"text": "."
},
{
"id": 7615,
"logprob": -2.2246094,
"special": false,
"text": " News"
},
{
"id": 13,
"logprob": -1.0488281,
"special": false,
"text": "\n"
},
{
"id": 27332,
"logprob": -0.69189453,
"special": false,
"text": "###"
},
{
"id": 28705,
"logprob": -0.013343811,
"special": false,
"text": " "
},
{
"id": 28750,
"logprob": -0.011230469,
"special": false,
"text": "2"
},
{
"id": 28723,
"logprob": -0.00096845627,
"special": false,
"text": "."
},
{
"id": 21095,
"logprob": -2.5605469,
"special": false,
"text": " Blog"
},
{
"id": 13,
"logprob": -0.19458008,
"special": false,
"text": "\n"
},
{
"id": 27332,
"logprob": -0.031280518,
"special": false,
"text": "###"
},
{
"id": 28705,
"logprob": -0.0030708313,
"special": false,
"text": " "
},
{
"id": 28770,
"logprob": -0.0029277802,
"special": false,
"text": "3"
},
{
"id": 28723,
"logprob": -0.0012350082,
"special": false,
"text": "."
},
{
"id": 20108,
"logprob": -2.1582031,
"special": false,
"text": " Article"
},
{
"id": 13,
"logprob": -0.05810547,
"special": false,
"text": "\n"
},
{
"id": 27332,
"logprob": -0.35083008,
"special": false,
"text": "###"
},
{
"id": 28705,
"logprob": -0.034332275,
"special": false,
"text": " "
},
{
"id": 28781,
"logprob": -0.009666443,
"special": false,
"text": "4"
},
{
"id": 28723,
"logprob": -0.0013113022,
"special": false,
"text": "."
},
{
"id": 8349,
"logprob": -2.6191406,
"special": false,
"text": " Review"
},
{
"id": 13,
"logprob": -0.04031372,
"special": false,
"text": "\n"
},
{
"id": 27332,
"logprob": -0.45239258,
"special": false,
"text": "###"
},
{
"id": 28705,
"logprob": -0.045410156,
"special": false,
"text": " "
},
{
"id": 28782,
"logprob": -0.0041236877,
"special": false,
"text": "5"
},
{
"id": 28723,
"logprob": -0.0010223389,
"special": false,
"text": "."
},
{
"id": 5299,
"logprob": -2.8066406,
"special": false,
"text": " Other"
},
{
"id": 13,
"logprob": -0.12054443,
"special": false,
"text": "\n"
},
{
"id": 13,
"logprob": -0.44580078,
"special": false,
"text": "\n"
},
{
"id": 13,
"logprob": -1.4921875,
"special": false,
"text": "\n"
},
{
"id": 13,
"logprob": -1.3574219,
"special": false,
"text": "\n"
},
{
"id": 13,
"logprob": -1.0039062,
"special": false,
"text": "\n"
},
{
"id": 13,
"logprob": -0.5859375,
"special": false,
"text": "\n"
},
{
"id": 13,
"logprob": -0.43481445,
"special": false,
"text": "\n"
},
{
"id": 13,
"logprob": -0.2783203,
"special": false,
"text": "\n"
},
{
"id": 13,
"logprob": -0.20410156,
"special": false,
"text": "\n"
}
]
},
"generated_text": "\n\n### 1. News\n### 2. Blog\n### 3. Article\n### 4. Review\n### 5. Other\n\n\n\n\n\n\n\n\n"
}
| text-generation-inference/integration-tests/models/__snapshots__/test_lora_mistral/test_lora_mistral_without_adapter.json/0 | {
"file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_lora_mistral/test_lora_mistral_without_adapter.json",
"repo_id": "text-generation-inference",
"token_count": 3130
} | 284 |
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [],
"seed": null,
"tokens": [
{
"id": 13,
"logprob": -2.3417969,
"special": false,
"text": "\n"
},
{
"id": 3057,
"logprob": -1.8730469,
"special": false,
"text": "Test"
},
{
"id": 2009,
"logprob": -1.2626953,
"special": false,
"text": " request"
},
{
"id": 13,
"logprob": -1.7060547,
"special": false,
"text": "\n"
},
{
"id": 3057,
"logprob": -1.4482422,
"special": false,
"text": "Test"
},
{
"id": 2009,
"logprob": -0.15246582,
"special": false,
"text": " request"
},
{
"id": 13,
"logprob": -0.796875,
"special": false,
"text": "\n"
},
{
"id": 3057,
"logprob": -0.22766113,
"special": false,
"text": "Test"
},
{
"id": 2009,
"logprob": -0.007045746,
"special": false,
"text": " request"
},
{
"id": 13,
"logprob": -0.021759033,
"special": false,
"text": "\n"
}
],
"top_tokens": null
},
"generated_text": "\nTest request\nTest request\nTest request\n"
}
| text-generation-inference/integration-tests/models/__snapshots__/test_server_gptq_quantized/test_server_gptq_quantized.json/0 | {
"file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_server_gptq_quantized/test_server_gptq_quantized.json",
"repo_id": "text-generation-inference",
"token_count": 867
} | 285 |
import pytest
@pytest.fixture(scope="module")
def compressed_tensors_w8a8_int_dynamic_weight_handle(launcher):
with launcher(
"danieldk/Qwen2.5-1.5B-Instruct-w8a8-int-dynamic-weight",
num_shard=2,
quantize="compressed-tensors",
) as handle:
yield handle
@pytest.fixture(scope="module")
async def compressed_tensors_w8a8_int_dynamic_weight(
compressed_tensors_w8a8_int_dynamic_weight_handle,
):
await compressed_tensors_w8a8_int_dynamic_weight_handle.health(300)
return compressed_tensors_w8a8_int_dynamic_weight_handle.client
@pytest.mark.release
@pytest.mark.asyncio
@pytest.mark.private
async def test_compressed_tensors_w8a8_int_dynamic_weight(
compressed_tensors_w8a8_int_dynamic_weight, response_snapshot
):
response = await compressed_tensors_w8a8_int_dynamic_weight.generate(
"What is deep learning?",
# prefer a longer response than the default, allow the llm to end generation
max_new_tokens=1000,
decoder_input_details=True,
)
assert (
response.generated_text
== " Deep learning is a subset of machine learning that uses neural networks to learn from data. It is a type of artificial intelligence that can learn from and make predictions on large amounts of data. Deep learning is used in a variety of applications, including image and speech recognition, natural language processing, and autonomous vehicles. It is a rapidly growing field with many potential applications in the future."
)
assert response.details.generated_tokens == 76
assert response == response_snapshot
@pytest.mark.release
@pytest.mark.asyncio
@pytest.mark.private
async def test_compressed_tensors_w8a8_int_dynamic_weight_all_params(
compressed_tensors_w8a8_int_dynamic_weight, response_snapshot
):
response = await compressed_tensors_w8a8_int_dynamic_weight.generate(
"What is deep learning",
max_new_tokens=10,
repetition_penalty=1.2,
return_full_text=True,
stop_sequences=["test"],
temperature=0.5,
top_p=0.9,
top_k=10,
truncate=5,
typical_p=0.9,
watermark=True,
decoder_input_details=True,
seed=0,
)
assert response.details.generated_tokens == 10
assert (
response.generated_text
== "What is deep learning?\nDeep Learning (DL), or artificial neural networks"
)
assert response == response_snapshot
@pytest.mark.release
@pytest.mark.asyncio
@pytest.mark.private
async def test_compressed_tensors_w8a8_int_dynamic_weight_load(
compressed_tensors_w8a8_int_dynamic_weight, generate_load, response_snapshot
):
responses = await generate_load(
compressed_tensors_w8a8_int_dynamic_weight,
"What is deep learning?",
max_new_tokens=10,
n=4,
)
assert (
responses[0].generated_text
== " Deep learning is a subset of machine learning that uses"
)
assert len(responses) == 4
assert all([r.generated_text == responses[0].generated_text for r in responses])
assert responses == response_snapshot
| text-generation-inference/integration-tests/models/test_compressed_tensors_w8a8_int_dynamic_weight.py/0 | {
"file_path": "text-generation-inference/integration-tests/models/test_compressed_tensors_w8a8_int_dynamic_weight.py",
"repo_id": "text-generation-inference",
"token_count": 1234
} | 286 |
import pytest
@pytest.fixture(scope="module")
def flash_llama_exl2_handle(launcher):
with launcher(
"turboderp/Llama-3-8B-Instruct-exl2",
revision="2.5bpw",
# Set max input length to avoid OOM due to extremely large
# scratch buffer.
max_input_length=1024,
num_shard=1,
quantize="exl2",
) as handle:
yield handle
@pytest.fixture(scope="module")
async def flash_llama_exl2(flash_llama_exl2_handle):
await flash_llama_exl2_handle.health(300)
return flash_llama_exl2_handle.client
@pytest.mark.asyncio
@pytest.mark.private
async def test_flash_llama_exl2(flash_llama_exl2, ignore_logprob_response_snapshot):
response = await flash_llama_exl2.generate(
"Test request", max_new_tokens=10, decoder_input_details=True
)
assert response.details.generated_tokens == 10
assert response == ignore_logprob_response_snapshot
@pytest.mark.asyncio
@pytest.mark.private
async def test_flash_llama_exl2_all_params(
flash_llama_exl2, ignore_logprob_response_snapshot
):
response = await flash_llama_exl2.generate(
"Test request",
max_new_tokens=10,
repetition_penalty=1.2,
return_full_text=True,
temperature=0.5,
top_p=0.9,
top_k=10,
truncate=5,
typical_p=0.9,
watermark=True,
decoder_input_details=True,
seed=0,
)
assert (
response.generated_text == 'Test request. The server responds with a "200 OK"'
)
assert response == ignore_logprob_response_snapshot
@pytest.mark.asyncio
@pytest.mark.private
async def test_flash_llama_exl2_load(
flash_llama_exl2, generate_load, ignore_logprob_response_snapshot
):
responses = await generate_load(
flash_llama_exl2, "Test request", max_new_tokens=10, n=4
)
assert len(responses) == 4
assert all([r.generated_text == responses[0].generated_text for r in responses])
assert responses == ignore_logprob_response_snapshot
| text-generation-inference/integration-tests/models/test_flash_llama_exl2.py/0 | {
"file_path": "text-generation-inference/integration-tests/models/test_flash_llama_exl2.py",
"repo_id": "text-generation-inference",
"token_count": 886
} | 287 |
import pytest
@pytest.fixture(scope="module")
def flash_pali_gemma_handle(launcher):
with launcher(
"google/paligemma2-3b-pt-224",
) as handle:
yield handle
@pytest.fixture(scope="module")
async def flash_pali_gemma(flash_pali_gemma_handle):
await flash_pali_gemma_handle.health(300)
return flash_pali_gemma_handle.client
async def test_flash_pali_gemma_image(flash_pali_gemma, response_snapshot):
car_image = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/car.jpg"
response = await flash_pali_gemma.generate(
f"",
max_new_tokens=20,
)
assert (
response.generated_text == "\nBrown\nCar\nColor\nCool\nDecor\n\n\n\n\n\n\n?\n?"
)
assert response == response_snapshot
| text-generation-inference/integration-tests/models/test_flash_pali_gemma2.py/0 | {
"file_path": "text-generation-inference/integration-tests/models/test_flash_pali_gemma2.py",
"repo_id": "text-generation-inference",
"token_count": 356
} | 288 |
import pytest
import json
import requests
@pytest.fixture(scope="module")
def model_handle(launcher):
"""Fixture to provide the base URL for API calls."""
with launcher(
"google/gemma-3-4b-it",
num_shard=2,
disable_grammar_support=False,
) as handle:
yield handle
@pytest.fixture(scope="module")
async def model_fixture(model_handle):
await model_handle.health(300)
return model_handle.client
# Sample JSON Schema for testing
person_schema = {
"type": "object",
"$id": "https://example.com/person.schema.json",
"$schema": "https://json-schema.org/draft/2020-12/schema",
"title": "Person",
"properties": {
"firstName": {
"type": "string",
"description": "The person's first name.",
"minLength": 4,
},
"lastName": {
"type": "string",
"description": "The person's last name.",
"minLength": 4,
},
"hobby": {
"description": "The person's hobby.",
"type": "string",
"minLength": 4,
},
"numCats": {
"description": "The number of cats the person has.",
"type": "integer",
"minimum": 0,
},
},
"required": ["firstName", "lastName", "hobby", "numCats"],
}
# More complex schema for testing nested objects and arrays
complex_schema = {
"type": "object",
"properties": {
"name": {"type": "string"},
"age": {"type": "integer", "minimum": 0},
"address": {
"type": "object",
"properties": {
"street": {"type": "string"},
"city": {"type": "string"},
"postalCode": {"type": "string"},
},
"required": ["street", "city"],
},
"hobbies": {"type": "array", "items": {"type": "string"}, "minItems": 1},
},
"required": ["name", "age", "hobbies"],
}
@pytest.mark.asyncio
@pytest.mark.private
async def test_json_schema_basic(model_fixture, response_snapshot):
"""Test basic JSON schema validation with the person schema."""
response = requests.post(
f"{model_fixture.base_url}/v1/chat/completions",
json={
"model": "tgi",
"messages": [
{
"role": "user",
"content": "David is a person who likes trees and nature. He enjoys studying math and science. He has 2 cats.",
},
],
"seed": 42,
"temperature": 0.0,
"response_format": {
"type": "json_schema",
"value": {"name": "person", "strict": True, "schema": person_schema},
},
},
)
result = response.json()
# Validate response format
content = result["choices"][0]["message"]["content"]
parsed_content = json.loads(content)
assert "firstName" in parsed_content
assert "lastName" in parsed_content
assert "hobby" in parsed_content
assert "numCats" in parsed_content
assert isinstance(parsed_content["numCats"], int)
assert parsed_content["numCats"] >= 0
assert result == response_snapshot
@pytest.mark.asyncio
@pytest.mark.private
async def test_json_schema_complex(model_fixture, response_snapshot):
"""Test complex JSON schema with nested objects and arrays."""
response = requests.post(
f"{model_fixture.base_url}/v1/chat/completions",
json={
"model": "tgi",
"messages": [
{
"role": "user",
"content": "John Smith is 30 years old. He lives on Maple Street in Boston. He enjoys botany, astronomy, and solving mathematical puzzles.",
},
],
"seed": 42,
"temperature": 0.0,
"response_format": {
"type": "json_schema",
"value": {
"name": "complex_person",
"strict": True,
"schema": complex_schema,
},
},
},
)
result = response.json()
# Validate response format
content = result["choices"][0]["message"]["content"]
parsed_content = json.loads(content)
assert "name" in parsed_content
assert "age" in parsed_content
assert "hobbies" in parsed_content
assert "address" in parsed_content
assert "street" in parsed_content["address"]
assert "city" in parsed_content["address"]
assert isinstance(parsed_content["hobbies"], list)
assert len(parsed_content["hobbies"]) >= 1
assert result == response_snapshot
@pytest.mark.asyncio
@pytest.mark.private
async def test_json_schema_stream(model_fixture, response_snapshot):
"""Test JSON schema validation with streaming."""
response = requests.post(
f"{model_fixture.base_url}/v1/chat/completions",
json={
"model": "tgi",
"messages": [
{
"role": "user",
"content": "David is a person who likes to ride bicycles. He has 2 cats.",
},
],
"seed": 42,
"temperature": 0.0,
"response_format": {
"type": "json_schema",
"value": {"name": "person", "strict": True, "schema": person_schema},
},
"stream": True,
},
stream=True,
)
chunks = []
content_generated = ""
for line in response.iter_lines():
if line:
# Remove the "data: " prefix and handle the special case of "[DONE]"
data = line.decode("utf-8")
if data.startswith("data: "):
data = data[6:]
if data != "[DONE]":
chunk = json.loads(data)
chunks.append(chunk)
if "choices" in chunk and len(chunk["choices"]) > 0:
if (
"delta" in chunk["choices"][0]
and "content" in chunk["choices"][0]["delta"]
):
content_generated += chunk["choices"][0]["delta"]["content"]
# Validate the final assembled JSON
parsed_content = json.loads(content_generated)
assert "firstName" in parsed_content
assert "lastName" in parsed_content
assert "hobby" in parsed_content
assert "numCats" in parsed_content
assert isinstance(parsed_content["numCats"], int)
assert parsed_content["numCats"] >= 0
assert chunks == response_snapshot
| text-generation-inference/integration-tests/models/test_json_schema_constrain.py/0 | {
"file_path": "text-generation-inference/integration-tests/models/test_json_schema_constrain.py",
"repo_id": "text-generation-inference",
"token_count": 3156
} | 289 |
import os
import pytest
@pytest.fixture(scope="module", params=["hub-neuron", "hub", "local-neuron"])
async def tgi_service(request, neuron_launcher, neuron_model_config):
"""Expose a TGI service corresponding to a model configuration
For each model configuration, the service will be started using the following
deployment options:
- from the hub original model (export parameters chosen after hub lookup),
- from the hub pre-exported neuron model,
- from a local path to the neuron model.
"""
# the tgi_env.py script will take care of setting these
for var in [
"MAX_BATCH_SIZE",
"MAX_INPUT_TOKENS",
"MAX_TOTAL_TOKENS",
"HF_NUM_CORES",
"HF_AUTO_CAST_TYPE",
]:
if var in os.environ:
del os.environ[var]
if request.param == "hub":
model_name_or_path = neuron_model_config["model_id"]
elif request.param == "hub-neuron":
model_name_or_path = neuron_model_config["neuron_model_id"]
else:
model_name_or_path = neuron_model_config["neuron_model_path"]
service_name = neuron_model_config["name"]
with neuron_launcher(service_name, model_name_or_path) as tgi_service:
await tgi_service.health(600)
yield tgi_service
@pytest.mark.asyncio
async def test_model_single_request(tgi_service):
# Just verify that the generation works, and nothing is raised, with several set of params
# No params
await tgi_service.client.text_generation(
"What is Deep Learning?",
)
response = await tgi_service.client.text_generation(
"How to cook beans ?",
max_new_tokens=17,
details=True,
decoder_input_details=True,
)
assert response.details.generated_tokens == 17
# Sampling
await tgi_service.client.text_generation(
"What is Deep Learning?",
do_sample=True,
top_k=50,
top_p=0.9,
repetition_penalty=1.2,
max_new_tokens=128,
seed=42,
)
| text-generation-inference/integration-tests/neuron/test_implicit_env.py/0 | {
"file_path": "text-generation-inference/integration-tests/neuron/test_implicit_env.py",
"repo_id": "text-generation-inference",
"token_count": 827
} | 290 |
# https://www.gutenberg.org/cache/epub/103/pg103.txt
from openai import OpenAI
import os
import requests
if not os.path.exists("pg103.txt"):
response = requests.get("https://www.gutenberg.org/cache/epub/103/pg103.txt")
with open("pg103.txt", "w") as f:
f.write(response.text)
length = 130000
with open("pg103.txt", "r") as f:
data = f.read()
messages = [{"role": "user", "content": data[: length * 4]}]
client = OpenAI(base_url="http://localhost:8000/v1", api_key="w")
completion = client.chat.completions.create(
model="meta-llama/Llama-3.1-8B-Instruct", messages=messages, max_tokens=2
)
| text-generation-inference/load_tests/long_prompt2.py/0 | {
"file_path": "text-generation-inference/load_tests/long_prompt2.py",
"repo_id": "text-generation-inference",
"token_count": 250
} | 291 |
use serde::{Deserialize, Serialize};
use std::collections::{HashMap, HashSet};
#[derive(Clone, Debug, Serialize, Deserialize)]
#[serde(tag = "model_type")]
#[serde(rename_all = "snake_case")]
pub struct LlavaNext {
pub(crate) text_config: TextConfig,
pub(crate) vision_config: VisionConfig,
pub(crate) image_grid_pinpoints: Vec<(usize, usize)>,
}
fn get_anyres_image_grid_shape(
height: usize,
width: usize,
grid_pinpoints: &[(usize, usize)],
patch_size: usize,
) -> (usize, usize) {
let (height, width) = select_best_resolution(height, width, grid_pinpoints);
(height / patch_size, width / patch_size)
}
/// Selects the best resolution from a list of possible resolutions based on the original size.
/// This is done by calculating the effective and wasted resolution for each possible resolution.
/// The best fit resolution is the one that maximizes the effective resolution and minimizes the wasted resolution.
fn select_best_resolution(
original_height: usize,
original_width: usize,
possible_resolutions: &[(usize, usize)],
) -> (usize, usize) {
let mut best_fit = None;
let mut max_effective_resolution = 0;
let mut min_wasted_resolution = f32::NEG_INFINITY;
for (height, width) in possible_resolutions {
let wscale = *width as f32 / original_width as f32;
let hscale = *height as f32 / original_height as f32;
// f32 partial ord.
let scale = if wscale > hscale { hscale } else { wscale };
let downscaled_width = (*width as f32 * scale) as usize;
let downscaled_height = (*height as f32 * scale) as usize;
let effective_resolution = std::cmp::min(
downscaled_width * downscaled_height,
original_width * original_height,
);
let wasted_resolution = (width * height) - effective_resolution;
if effective_resolution > max_effective_resolution
|| (effective_resolution == max_effective_resolution
&& (wasted_resolution as f32) < min_wasted_resolution)
{
max_effective_resolution = effective_resolution;
min_wasted_resolution = wasted_resolution as f32;
best_fit = Some((*height, *width));
}
}
best_fit.unwrap_or((original_height, original_width))
}
fn get_unpadded_features(
height: usize,
width: usize,
npatches: usize,
num_patch_height: usize,
num_patch_width: usize,
) -> (usize, usize) {
let current_height = npatches * num_patch_height;
let current_width = npatches * num_patch_width;
let aspect_ratio: f64 = width as f64 / height as f64;
let current_aspect_ratio: f64 = current_width as f64 / current_height as f64;
let (current_height, current_width) = if aspect_ratio > current_aspect_ratio {
let new_height = (height * current_width) / width;
let padding = (current_height - new_height) / 2;
(current_height - (2 * padding), current_width)
} else {
let new_width = (width * current_height) / height;
let padding = (current_width - new_width) / 2;
(current_height, current_width - (2 * padding))
};
let unpadded_features = current_height * current_width;
let newline_features = current_height;
(unpadded_features, newline_features)
}
impl LlavaNext {
pub fn get_number_of_features(&self, height: usize, width: usize) -> usize {
let image_size = self.vision_config.image_size;
let patch_size = self.vision_config.patch_size;
assert!(image_size % patch_size == 0);
let npatches = image_size / patch_size;
// Dimensions are intentionally swapped to be bug-compatible with
// upstream: https://github.com/LLaVA-VL/LLaVA-NeXT/issues/59
let (num_patch_width, num_patch_height) =
get_anyres_image_grid_shape(height, width, &self.image_grid_pinpoints, image_size);
let (unpadded_features, newline_features) =
get_unpadded_features(height, width, npatches, num_patch_height, num_patch_width);
// The base patch covers the entire image
let base_features = npatches.pow(2);
unpadded_features + newline_features + base_features
}
}
#[derive(Clone, Debug, Serialize, Deserialize)]
#[serde(rename_all = "snake_case")]
pub struct Llama4VisionConfig {
image_size: usize,
patch_size: usize,
pixel_shuffle_ratio: f64,
}
#[derive(Clone, Debug, Serialize, Deserialize)]
#[serde(rename_all = "snake_case")]
pub struct Llama4 {
text_config: TextConfig,
vision_config: Llama4VisionConfig,
}
fn gcd(a: usize, b: usize) -> usize {
if b == 0 {
a
} else {
gcd(b, a % b)
}
}
fn get_factors(dividend: usize) -> HashSet<usize> {
let mut factors_set = HashSet::new();
for i in 1..=((dividend as f64).sqrt() as usize) {
if dividend % i == 0 {
factors_set.insert(i);
factors_set.insert(dividend / i);
}
}
factors_set
}
fn find_supported_resolutions(max_num_chunks: usize, height: usize) -> Vec<(usize, usize)> {
let patch_size = height;
let mut asp_dict: HashMap<(usize, usize), Vec<(usize, usize)>> = HashMap::new();
for chunk_size in (1..=max_num_chunks).rev() {
let mut _factors: Vec<_> = get_factors(chunk_size).into_iter().collect();
_factors.sort();
let _asp_ratios: Vec<(usize, usize)> =
_factors.iter().map(|&f| (f, chunk_size / f)).collect();
for (h, w) in _asp_ratios {
let divisor = gcd(h, w);
let key = (h / divisor, w / divisor); // reduced aspect ratio as key
asp_dict.entry(key).or_default().push((h, w));
}
}
let mut possible_resolutions = vec![];
for (_key, value) in asp_dict {
for (h, w) in value {
possible_resolutions.push((h * patch_size, w * patch_size));
}
}
possible_resolutions
}
fn get_best_fit(
original_height: usize,
original_width: usize,
possible_resolutions: &[(usize, usize)],
resize_to_max_canvas: bool,
) -> (usize, usize) {
let orig_h = original_height as f32;
let orig_w = original_width as f32;
let mut scales = Vec::with_capacity(possible_resolutions.len());
for &(h, w) in possible_resolutions.iter() {
let scale_h = h as f32 / orig_h;
let scale_w = w as f32 / orig_w;
let scale = scale_h.min(scale_w);
scales.push(scale);
}
let upscaling_options: Vec<f32> = scales.iter().copied().filter(|&s| s >= 1.0).collect();
let selected_scale = if !upscaling_options.is_empty() {
if resize_to_max_canvas {
upscaling_options.into_iter().fold(f32::MIN, f32::max)
} else {
upscaling_options.into_iter().fold(f32::MAX, f32::min)
}
} else {
let downscaling_options: Vec<f32> = scales.iter().copied().filter(|&s| s < 1.0).collect();
downscaling_options.into_iter().fold(f32::MIN, f32::max)
};
let chosen_canvas: Vec<(usize, usize)> = possible_resolutions
.iter()
.zip(scales.iter())
.filter(|&(_, &s)| (s - selected_scale).abs() < f32::EPSILON)
.map(|(&(h, w), _)| (h, w))
.collect();
if chosen_canvas.len() > 1 {
chosen_canvas
.into_iter()
.min_by_key(|(h, w)| h * w)
.unwrap()
} else {
chosen_canvas[0]
}
}
impl Llama4 {
pub fn image_size(&self) -> usize {
self.vision_config.image_size
}
pub fn patch_size(&self) -> usize {
self.vision_config.patch_size
}
pub fn pixel_shuffle_ratio(&self) -> f64 {
self.vision_config.pixel_shuffle_ratio
}
pub fn get_aspect_ratios(
&self,
height: usize,
width: usize,
max_chunks: usize,
) -> (usize, usize) {
let patch_size = self.vision_config.image_size;
let supported = find_supported_resolutions(max_chunks, patch_size);
let (target_h, target_w) = get_best_fit(height, width, &supported, false);
(target_h / patch_size, target_w / patch_size)
}
}
#[derive(Clone, Debug, Serialize, Deserialize)]
#[serde(rename_all = "snake_case")]
pub struct ClipVisionModel {
image_size: usize,
patch_size: usize,
}
#[derive(Clone, Debug, Serialize, Deserialize)]
#[serde(rename_all = "snake_case")]
pub struct Idefics3 {}
impl Idefics3 {
pub fn get_max_longest_edge(&self) -> usize {
364
}
pub fn get_number_of_features(&self) -> usize {
169
}
pub fn get_max_longest_edge_for_image_resize(&self) -> usize {
1456
}
pub fn get_max_image_size(&self) -> usize {
4096
}
}
#[derive(Clone, Debug, Serialize, Deserialize)]
#[serde(rename_all = "snake_case")]
pub struct Idefics2 {}
impl Idefics2 {
pub fn get_number_of_features(&self, _height: usize, _width: usize) -> usize {
64
}
}
#[derive(Clone, Debug, Serialize, Deserialize)]
#[serde(rename_all = "snake_case")]
pub struct PaliTextConfig {
pub(crate) num_image_tokens: usize,
}
#[derive(Clone, Debug, Serialize, Deserialize)]
#[serde(rename_all = "snake_case")]
pub struct Paligemma {
pub(crate) text_config: PaliTextConfig,
}
impl Paligemma {
pub fn get_number_of_features(&self, _height: usize, _width: usize) -> usize {
self.text_config.num_image_tokens
}
}
#[derive(Clone, Debug, Serialize, Deserialize)]
#[serde(rename_all = "snake_case")]
pub struct Qwen2VlVisionConfig {
pub(crate) depth: usize,
pub(crate) embed_dim: usize,
pub(crate) mlp_ratio: usize,
pub(crate) num_heads: usize,
pub(crate) in_chans: usize,
pub(crate) hidden_size: usize,
pub(crate) patch_size: usize,
pub(crate) spatial_merge_size: usize,
pub(crate) spatial_patch_size: usize,
pub(crate) temporal_patch_size: usize,
}
#[derive(Clone, Debug, Serialize, Deserialize)]
#[serde(rename_all = "snake_case")]
pub struct Qwen2Vl {
pub(crate) vision_config: Qwen2VlVisionConfig,
}
impl Qwen2Vl {
pub fn get_number_of_features(&self, height: usize, width: usize) -> usize {
let num_pixels = height * width;
num_pixels / self.vision_config.patch_size.pow(2)
}
}
#[derive(Clone, Debug, Serialize, Deserialize)]
#[serde(rename_all = "snake_case")]
pub struct Qwen2_5VlVisionConfig {
// pub(crate) depth: usize,
// pub(crate) hidden_act: String,
// pub(crate) hidden_size: usize,
// pub(crate) intermediate_size: usize,
// pub(crate) num_heads: usize,
// pub(crate) in_chans: usize,
// pub(crate) out_hidden_size: usize,
// pub(crate) patch_size: usize,
// pub(crate) spatial_merge_size: usize,
pub(crate) spatial_patch_size: usize,
// pub(crate) window_size: usize,
// pub(crate) fullatt_block_indexes: Vec<usize>,
// pub(crate) tokens_per_second: usize,
// pub(crate) temporal_patch_size: usize,
}
#[derive(Clone, Debug, Serialize, Deserialize)]
#[serde(rename_all = "snake_case")]
pub struct Qwen2_5Vl {
pub(crate) vision_config: Qwen2_5VlVisionConfig,
}
impl Qwen2_5Vl {
pub fn get_number_of_features(&self, height: usize, width: usize) -> usize {
let num_pixels = height * width;
num_pixels / self.vision_config.spatial_patch_size.pow(2)
}
}
#[derive(Clone, Debug, Serialize, Deserialize)]
#[serde(rename_all = "snake_case")]
pub struct Gemma3VisionConfig {
pub(crate) image_size: usize,
pub(crate) patch_size: usize,
}
#[derive(Clone, Debug, Serialize, Deserialize)]
#[serde(rename_all = "snake_case")]
pub struct Gemma3 {
vision_config: Gemma3VisionConfig,
}
#[derive(Clone, Debug, Serialize, Deserialize)]
#[serde(tag = "model_type")]
#[serde(rename_all = "snake_case")]
pub enum Config {
Qwen2_5Vl(Qwen2_5Vl),
Qwen2Vl(Qwen2Vl),
LlavaNext(LlavaNext),
ClipVisionModel(ClipVisionModel),
Mistral,
Mamba,
Idefics,
Mllama,
Idefics2(Idefics2),
Idefics3(Idefics3),
Ssm,
GptBigcode,
Granite,
Santacoder,
Bloom,
Mpt,
Gpt2,
Gptj,
GptNeox,
Phi,
#[serde(rename = "phi-msft")]
PhiMsft,
Phi3,
Phimoe,
Llama,
Llama4(Llama4),
Baichuan,
Paligemma(Paligemma),
Gemma,
Gemma2,
Gemma3(Gemma3),
Gemma3Text,
Cohere,
Drbx,
Falcon,
Mixtral,
Starcoder2,
Qwen2,
Opt,
T5,
DeepseekV2,
DeepseekV3,
Qwen3,
}
#[derive(Clone, Debug, Serialize, Deserialize)]
#[serde(rename_all = "snake_case")]
pub struct TextConfig {}
#[derive(Clone, Debug, Serialize, Deserialize)]
#[serde(rename_all = "snake_case")]
pub struct VisionConfig {
pub(crate) image_size: usize,
pub(crate) patch_size: usize,
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_llava_next_features() {
let config = LlavaNext {
text_config: TextConfig {},
vision_config: VisionConfig {
image_size: 336,
patch_size: 14,
},
image_grid_pinpoints: vec![
(336, 672),
(672, 336),
(672, 672),
(1008, 336),
(336, 1008),
],
};
let slots = config.get_number_of_features(20, 20);
assert_eq!(slots, 1176);
let slots = config.get_number_of_features(640, 640);
assert_eq!(slots, 2928);
let slots = config.get_number_of_features(480, 640);
assert_eq!(slots, 2340);
let slots = config.get_number_of_features(899, 1024);
assert_eq!(slots, 2634);
let slots = config.get_number_of_features(1024, 899);
assert_eq!(slots, 2640);
let slots = config.get_number_of_features(1067, 1600);
assert_eq!(slots, 2144);
}
}
| text-generation-inference/router/src/config.rs/0 | {
"file_path": "text-generation-inference/router/src/config.rs",
"repo_id": "text-generation-inference",
"token_count": 6233
} | 292 |
// Adapted from turboderp exllama: https://github.com/turboderp/exllama
#include "column_remap.cuh"
#include "../util.cuh"
const int SHUF_BLOCKSIZE_X = 256;
const int SHUF_BLOCKSIZE_Y = 16;
__global__ void column_remap_kernel
(
const half* __restrict__ x,
half* __restrict__ x_new,
const int x_width,
const int x_height,
const uint32_t* x_map
)
{
int x_column = SHUF_BLOCKSIZE_X * blockIdx.x + threadIdx.x;
int x_row = SHUF_BLOCKSIZE_Y * blockIdx.y;
int x_stride = x_width;
int x_idx = x_row * x_stride + x_column;
int x_row_end = min(x_row + SHUF_BLOCKSIZE_Y, x_height);
int x_idx_end = x_row_end * x_stride + x_column;
int s_column = x_map[x_column];
int s_idx = x_row * x_stride + s_column;
while (x_idx < x_idx_end)
{
x_new[x_idx] = x[s_idx];
x_idx += x_stride;
s_idx += x_stride;
}
}
// Remap columns in x to correspond to sequential group index before matmul
//
// perform x -> seq_x such that seq_x @ seq_w == x @ w
void column_remap_cuda
(
const half* x,
half* x_new,
const int x_height,
const int x_width,
const uint32_t* x_map
)
{
dim3 threads(SHUF_BLOCKSIZE_X, 1, 1);
dim3 blocks
(
(x_width + SHUF_BLOCKSIZE_X - 1) / SHUF_BLOCKSIZE_X,
(x_height + SHUF_BLOCKSIZE_Y - 1) / SHUF_BLOCKSIZE_Y,
1
);
column_remap_kernel<<<blocks, threads>>>(x, x_new, x_width, x_height, x_map);
}
| text-generation-inference/server/exllama_kernels/exllama_kernels/cuda_func/column_remap.cu/0 | {
"file_path": "text-generation-inference/server/exllama_kernels/exllama_kernels/cuda_func/column_remap.cu",
"repo_id": "text-generation-inference",
"token_count": 696
} | 293 |
#include "q_gemm.cuh"
#include "util.cuh"
#include "matrix_view.cuh"
#include "../config.h"
#include "quant/qdq_2.cuh"
#include "quant/qdq_3.cuh"
#include "quant/qdq_4.cuh"
#include "quant/qdq_5.cuh"
#include "quant/qdq_6.cuh"
#include "quant/qdq_8.cuh"
#define GPTQ_BLOCK_KN_SIZE 128
#define GPTQ_BLOCK_M_SIZE_MAX 8
#define GPTQ_MAX_GROUPS_IN_BLOCK (GPTQ_BLOCK_KN_SIZE / 32)
#define EXL2_BLOCK_KN_SIZE 64
#define EXL2_BLOCK_M_SIZE_MAX 8
#define EXL2_MAX_GROUPS_IN_BLOCK (EXL2_BLOCK_KN_SIZE / 32)
#define CLEAR_N_SIZE 256
#include "q_gemm_kernel.cuh"
#include "q_gemm_kernel_gptq.cuh"
void gemm_half_q_half_cuda_part
(
const half* a,
QMatrix* b,
half* c,
int size_m,
int size_n,
int size_k,
int m_count,
bool clear,
const half* r_weights,
int r_weights_stride,
bool mul_r_weights
)
{
const cudaStream_t stream = at::cuda::getCurrentCUDAStream();
if (!b->is_gptq)
{
dim3 blockDim, gridDim;
blockDim.x = EXL2_BLOCK_KN_SIZE;
blockDim.y = 1;
blockDim.z = 1;
gridDim.x = DIVIDE(size_n, EXL2_BLOCK_KN_SIZE * 4);
gridDim.y = DIVIDE(size_m, m_count);
gridDim.z = DIVIDE(size_k, EXL2_BLOCK_KN_SIZE);
fp_gemm_half_q_half_kernel kernel = pick_gemm_half_q_half_kernel(m_count, r_weights != NULL, mul_r_weights);
kernel<<<gridDim, blockDim, 0, stream>>>
(
a,
b->cuda_q_weight,
b->cuda_q_scale,
b->cuda_q_scale_max,
c,
size_m,
size_n,
size_k,
b->groups,
b->cuda_q_group_map,
b->cuda_q_perm,
b->rows_8,
b->rows_6,
b->rows_5,
b->rows_4,
b->rows_3,
b->rows_2,
clear,
r_weights,
r_weights_stride
);
}
else
{
dim3 blockDim, gridDim;
blockDim.x = GPTQ_BLOCK_KN_SIZE;
blockDim.y = 1;
blockDim.z = 1;
gridDim.x = DIVIDE(size_n, GPTQ_BLOCK_KN_SIZE * 4);
gridDim.y = DIVIDE(size_m, m_count);
gridDim.z = DIVIDE(size_k, GPTQ_BLOCK_KN_SIZE);
fp_gemm_half_q_half_gptq_kernel kernel = pick_gemm_half_q_half_gptq_kernel(m_count, r_weights != NULL, mul_r_weights);
// DBGX((uint64_t) r_weights);
// if (r_weights)
// print_global_mem(r_weights, 1, 1, 1);
// DBGI(r_weights_stride);
kernel<<<gridDim, blockDim, 0, stream>>>
(
a,
b->cuda_q_weight,
b->cuda_gptq_qzeros,
b->cuda_gptq_scales,
c,
size_m,
size_n,
size_k,
b->groups,
b->gptq_groupsize,
b->cuda_q_perm,
b->rows_4,
clear,
r_weights,
r_weights_stride
);
}
}
void gemm_half_q_half_cuda
(
cublasHandle_t cublas_handle,
const half* a,
QMatrix* b,
half* c,
int size_m,
int size_n,
int size_k,
bool clear,
half* temp_dq,
bool force_cuda,
const half* r_weights,
const int r_weights_stride,
bool mul_r_weights
)
{
if (size_m > MAX_Q_GEMM_ROWS && !force_cuda)
{
// Reconstruct FP16 matrix, then cuBLAS
if (!temp_dq) temp_dq = b->temp_dq;
b->reconstruct(temp_dq);
//cublasSetMathMode(cublas_handle, CUBLAS_TENSOR_OP_MATH);
const half alpha = __float2half(1.0f);
const half beta = clear ? __float2half(0.0f) : __float2half(1.0f);
cublasHgemm(cublas_handle,
CUBLAS_OP_N,
CUBLAS_OP_N,
size_n, size_m, size_k,
&alpha, temp_dq, size_n,
a, size_k,
&beta, c, size_n);
//const float alpha = 1.0f;
//const float beta = clear ? 0.0f : 1.0f;
//cublasSgemmEx(cublas_handle,
// CUBLAS_OP_N,
// CUBLAS_OP_N,
// size_n, size_m, size_k,
// &alpha, temp_dq, CUDA_R_16F, size_n,
// a, CUDA_R_16F, size_k,
// &beta, c, CUDA_R_16F, size_n);
//const float alpha = 1.0f;
//const float beta = clear ? 0.0f : 1.0f;
//cublasGemmEx(cublas_handle,
// CUBLAS_OP_N, CUBLAS_OP_N,
// size_n, size_m, size_k,
// &alpha, temp_dq, CUDA_R_16F, size_n,
// a, CUDA_R_16F, size_k,
// &beta, c, CUDA_R_16F, size_n,
// CUDA_R_16F, CUBLAS_GEMM_DFALT_TENSOR_OP);
}
else
{
// Quantized matmul
int block_m_size_max = b->is_gptq ? GPTQ_BLOCK_M_SIZE_MAX : EXL2_BLOCK_M_SIZE_MAX;
int max_chunks = size_m / block_m_size_max;
int last_chunk = max_chunks * block_m_size_max;
int last_chunk_size = size_m - last_chunk;
if (max_chunks)
{
gemm_half_q_half_cuda_part(a, b, c, last_chunk, size_n, size_k, block_m_size_max, clear, r_weights, r_weights_stride, mul_r_weights);
}
if (last_chunk_size)
{
gemm_half_q_half_cuda_part(a + last_chunk * size_k, b, c + last_chunk * size_n, last_chunk_size, size_n, size_k, last_chunk_size, clear, r_weights, r_weights_stride, mul_r_weights);
}
}
}
__global__ void clear_kernel
(
half* __restrict__ c,
const int size_m,
const int size_n
)
{
int m = blockIdx.y;
int n = (blockIdx.x * CLEAR_N_SIZE + threadIdx.x) * 8;
if (n >= size_n) return;
int4* c_ptr = (int4*)(c + m * size_n + n);
*c_ptr = {};
}
void clear_tensor_cuda
(
half* c,
int size_m,
int size_n
)
{
// dim3 blockDim, gridDim;
// blockDim.x = CLEAR_N_SIZE;
// blockDim.y = 1;
// gridDim.x = DIVIDE(size_n / 8, CLEAR_N_SIZE);
// gridDim.y = size_m;
// clear_kernel<<<gridDim, blockDim>>>(c, size_m, size_n);
}
| text-generation-inference/server/exllamav2_kernels/exllamav2_kernels/cuda/q_gemm.cu/0 | {
"file_path": "text-generation-inference/server/exllamav2_kernels/exllamav2_kernels/cuda/q_gemm.cu",
"repo_id": "text-generation-inference",
"token_count": 3563
} | 294 |
[
{
"repo_id": "kernels-community/paged-attention",
"sha": "1e0a9708f0fe47009a3d292226c5492474353258",
"variants": {
"torch25-cxx11-cu118-x86_64-linux": {
"hash": "sha256-99710450ce815fdd0eeab3862ed0940c37a236c4f6cd49399e0112d66c9e40cb",
"hash_type": "git_lfs_concat"
},
"torch25-cxx11-cu121-x86_64-linux": {
"hash": "sha256-bf136ffb4732e141e05738606a014fde18d3aa6d4345d6223858327c00eef2d1",
"hash_type": "git_lfs_concat"
},
"torch25-cxx11-cu124-x86_64-linux": {
"hash": "sha256-5ff343fc4feadf36ea38032d2a014a1cd6008fe22dea26191cd397745dbaf8ae",
"hash_type": "git_lfs_concat"
},
"torch25-cxx98-cu118-x86_64-linux": {
"hash": "sha256-5db4fd37dcc6ec49ea71eba49415758b98fc21699155632902c76a545b36c47a",
"hash_type": "git_lfs_concat"
},
"torch25-cxx98-cu121-x86_64-linux": {
"hash": "sha256-995ff1a0cfe569639bc1644b5d6d823ea47ad0da33fe1cf398370ee70a203eb3",
"hash_type": "git_lfs_concat"
},
"torch25-cxx98-cu124-x86_64-linux": {
"hash": "sha256-1a00b021ea1273acb003ebd459699287ebf3d03f949befa31ae91899fa90b9e8",
"hash_type": "git_lfs_concat"
},
"torch26-cxx11-cu118-x86_64-linux": {
"hash": "sha256-91e57835ae0f6e2df38c65c9e2eb47d33b609c7c117f6a86898740ad17653dba",
"hash_type": "git_lfs_concat"
},
"torch26-cxx11-cu124-x86_64-linux": {
"hash": "sha256-5435890298a7eca613c805c8aee08b5a4405a1a7ad38ad3bc43bba14b26683ae",
"hash_type": "git_lfs_concat"
},
"torch26-cxx11-cu126-aarch64-linux": {
"hash": "sha256-b3dffef571f4f813b727ce3b2fcb7b43ee9d2e793b594e6ccf3a694bac87280a",
"hash_type": "git_lfs_concat"
},
"torch26-cxx11-cu126-x86_64-linux": {
"hash": "sha256-7ce5d58943f52959cc9643477e4dc211c7592628968cc53714e307092c95a769",
"hash_type": "git_lfs_concat"
},
"torch26-cxx98-cu118-x86_64-linux": {
"hash": "sha256-c74c251ba84cf6ea4c0402ed6dec7dca92f46b101f299a0abb1bcab5c83d2165",
"hash_type": "git_lfs_concat"
},
"torch26-cxx98-cu124-x86_64-linux": {
"hash": "sha256-44661e14516679bfa1788a4919c01014e9cd2402ad6231947bf7a6ca55002ecd",
"hash_type": "git_lfs_concat"
},
"torch26-cxx98-cu126-aarch64-linux": {
"hash": "sha256-e28ca88f80f95eede03eae610c08f83caabe579e15d110d9e070e46b6435770f",
"hash_type": "git_lfs_concat"
},
"torch26-cxx98-cu126-x86_64-linux": {
"hash": "sha256-05eb63f56b6b665d0e25919a8f429c8c3b2e0e3fc55725885d0e68e9011ca283",
"hash_type": "git_lfs_concat"
},
"torch27-cxx11-cu118-x86_64-linux": {
"hash": "sha256-ef0c14844fd8df0ce765b85497c90ce1091b4a780642d86bf206799ba9d3c94a",
"hash_type": "git_lfs_concat"
},
"torch27-cxx11-cu126-aarch64-linux": {
"hash": "sha256-ab151aea475c6880ed15e8f9232bf8720f7f0f2b96acdac65a5bcb7e5ab727b1",
"hash_type": "git_lfs_concat"
},
"torch27-cxx11-cu126-x86_64-linux": {
"hash": "sha256-08345dd704dcea727b9c2c109664f1602f97908fed84522edb817d95eb859f74",
"hash_type": "git_lfs_concat"
},
"torch27-cxx11-cu128-aarch64-linux": {
"hash": "sha256-c2419e4057e26bd90360dacd30f1b51eea1fde2efed9bd4c7db034ffc2962a5a",
"hash_type": "git_lfs_concat"
},
"torch27-cxx11-cu128-x86_64-linux": {
"hash": "sha256-a85fa6b43d438380c9d064769d8dd509ebf5206327a326082c0c249c0704ca46",
"hash_type": "git_lfs_concat"
}
}
},
{
"repo_id": "kernels-community/moe",
"sha": "e3efab933893cde20c5417ba185fa3b7cc811b24",
"variants": {
"torch25-cxx11-cu118-x86_64-linux": {
"hash": "sha256-719817bc2320f52d510e4a62bceef41a0ba8c58ea0e67d844db4225add3c5783",
"hash_type": "git_lfs_concat"
},
"torch25-cxx11-cu121-x86_64-linux": {
"hash": "sha256-1b5973b5d9376e377ff223aed71936cc25f19367c8db7fcd9aa70960c15de290",
"hash_type": "git_lfs_concat"
},
"torch25-cxx11-cu124-x86_64-linux": {
"hash": "sha256-69e1e5603c01227c3e2cbd67c09dd39fa7c0d4ecf3f736a2eb07227f6bb8935b",
"hash_type": "git_lfs_concat"
},
"torch25-cxx98-cu118-x86_64-linux": {
"hash": "sha256-91626ab4046b04e1a0967cc5c8a60a248e611b413e1cace3e4bdb0fc3a68a0e4",
"hash_type": "git_lfs_concat"
},
"torch25-cxx98-cu121-x86_64-linux": {
"hash": "sha256-84dd628239aa3043bc048c51f513faf55042ccc3d372002bbc231b0aa6d6689f",
"hash_type": "git_lfs_concat"
},
"torch25-cxx98-cu124-x86_64-linux": {
"hash": "sha256-ffb9743f69aae59fba1cfed1fc9e2e0f90a9000121c2db5880f0e055a714931a",
"hash_type": "git_lfs_concat"
},
"torch26-cxx11-cu118-x86_64-linux": {
"hash": "sha256-30560d5c091a9be1914fc8bf42d86767cfb07f1b7335f1ee88797e42f31e7856",
"hash_type": "git_lfs_concat"
},
"torch26-cxx11-cu124-x86_64-linux": {
"hash": "sha256-6e2afd532fdc9cee8f532097a80e4c2139f47df8005c43c5cdac42204d6217e1",
"hash_type": "git_lfs_concat"
},
"torch26-cxx11-cu126-aarch64-linux": {
"hash": "sha256-93d46cc7701358cd5a4e5ae3fafde8120fdb765149b9a9224f52a802b7d48cf1",
"hash_type": "git_lfs_concat"
},
"torch26-cxx11-cu126-x86_64-linux": {
"hash": "sha256-e57c961ea9c1a411c5b348986e359b1e6f1102fa09cfaa82d20f96d09528098a",
"hash_type": "git_lfs_concat"
},
"torch26-cxx98-cu118-x86_64-linux": {
"hash": "sha256-946b982082c008220a667f44e4308c17933e0d4785cad72ececa35273275f09c",
"hash_type": "git_lfs_concat"
},
"torch26-cxx98-cu124-x86_64-linux": {
"hash": "sha256-227be46b6cc468fadc237bb616d14e4747ad122bc0a2cd5bbef1a2b89a63d5bf",
"hash_type": "git_lfs_concat"
},
"torch26-cxx98-cu126-aarch64-linux": {
"hash": "sha256-d0dc0c8f34608f7c735e804c606dff029708349e68d5b9d9df7541b2498c1e8e",
"hash_type": "git_lfs_concat"
},
"torch26-cxx98-cu126-x86_64-linux": {
"hash": "sha256-91b3df206bd4418e42d08608fdf652d65612342efc8f67958a66d68038179567",
"hash_type": "git_lfs_concat"
},
"torch27-cxx11-cu118-x86_64-linux": {
"hash": "sha256-4b0f4536cd8f24ef00f06e00dfa0123c03dada7de3394a6274ec5cfa3bbf31f6",
"hash_type": "git_lfs_concat"
},
"torch27-cxx11-cu126-aarch64-linux": {
"hash": "sha256-4c8468437ac977116f46be9a6871b0887f762ba44d3aea3c3ce2eb41637fb626",
"hash_type": "git_lfs_concat"
},
"torch27-cxx11-cu126-x86_64-linux": {
"hash": "sha256-9a0d84b8636a897e4a5abd243f48a71d7d470c2f8e28df6a6874a9d981105c0f",
"hash_type": "git_lfs_concat"
},
"torch27-cxx11-cu128-aarch64-linux": {
"hash": "sha256-11e6c4ce82a25d17664b4100af419f974fc312ac283195129c91519dac4d5812",
"hash_type": "git_lfs_concat"
},
"torch27-cxx11-cu128-x86_64-linux": {
"hash": "sha256-c49a6eda12752adf78690a5e985a55d3b85d6724be5d18db51cd03d5fc75cc9b",
"hash_type": "git_lfs_concat"
}
}
},
{
"repo_id": "kernels-community/punica-sgmv",
"sha": "9ae1b469cb39c33df9ddd61657c6359acc423714",
"variants": {
"torch26-cxx11-cu118-x86_64-linux": {
"hash": "sha256-766062cd845bdebbe4e4391fda6f2663bebc2c110cbc2642d09c8c09ccf3f1d4",
"hash_type": "git_lfs_concat"
},
"torch26-cxx11-cu124-x86_64-linux": {
"hash": "sha256-c9cd76df7c84851aa566deb1c0d04ebddc1b1908a29df218344f2b3d53c4e683",
"hash_type": "git_lfs_concat"
},
"torch26-cxx11-cu126-aarch64-linux": {
"hash": "sha256-ae444bf53be3d469d4c9c58faef7d61a92e873e6104afe5aed2b2a1397333e99",
"hash_type": "git_lfs_concat"
},
"torch26-cxx11-cu126-x86_64-linux": {
"hash": "sha256-0706cc5ccf9cedae0bb6a938acdf2d5599a7b8f8b1fe46118b6ad61c0f3432af",
"hash_type": "git_lfs_concat"
},
"torch26-cxx98-cu118-x86_64-linux": {
"hash": "sha256-42cf390c6ae48b18041e201d4c67b4bf820b9f9cafe49a12c505f7920bae56ae",
"hash_type": "git_lfs_concat"
},
"torch26-cxx98-cu124-x86_64-linux": {
"hash": "sha256-75c97c23bfe32f65830341420d093a07df051828f385cbc5357b073c635f442f",
"hash_type": "git_lfs_concat"
},
"torch26-cxx98-cu126-aarch64-linux": {
"hash": "sha256-2ff5590ff6c298220c6e06142c971b08a686b98abb8d7dd1e6eb4539fa115cba",
"hash_type": "git_lfs_concat"
},
"torch26-cxx98-cu126-x86_64-linux": {
"hash": "sha256-70bcf04490865df6518c9d6a4c7eb2fee76b14642651f04a061c20ffa6fdb283",
"hash_type": "git_lfs_concat"
},
"torch27-cxx11-cu118-x86_64-linux": {
"hash": "sha256-727b8f5b22e4e91b956516235f26c39013a87ac6e196a0ce5f1897c2d959e69d",
"hash_type": "git_lfs_concat"
},
"torch27-cxx11-cu126-aarch64-linux": {
"hash": "sha256-bfddd19db7c9268a83e3cc5e281b007de80ab0fe611b3856ffd1691b400eca46",
"hash_type": "git_lfs_concat"
},
"torch27-cxx11-cu126-x86_64-linux": {
"hash": "sha256-940c68f5d4d8a2391b1eb3c7c5a56623428862f428aa5c6c1f7e62588c0e36fb",
"hash_type": "git_lfs_concat"
},
"torch27-cxx11-cu128-aarch64-linux": {
"hash": "sha256-781259a371b67bfbf744431c88a6ee847ab48459e73cb57264590de2728d6b3a",
"hash_type": "git_lfs_concat"
},
"torch27-cxx11-cu128-x86_64-linux": {
"hash": "sha256-8977a33d7884bebb9fb5e3d7daf157119206f0f18a22edb2b96ec593d5c81ae1",
"hash_type": "git_lfs_concat"
}
}
},
{
"repo_id": "kernels-community/quantization",
"sha": "6470f9b005797e00279eb9103463dfe0f8b7da00",
"variants": {
"torch25-cxx11-cu118-x86_64-linux": {
"hash": "sha256-f52c9b1a7cd98fb389c6d2a0b22a293cb36eb96af3a624f5aec761735861c96d",
"hash_type": "git_lfs_concat"
},
"torch25-cxx11-cu121-x86_64-linux": {
"hash": "sha256-e5f0da343363a562ce52f147a9534cd54a3efa90e70671f606cc2516f02a3876",
"hash_type": "git_lfs_concat"
},
"torch25-cxx11-cu124-x86_64-linux": {
"hash": "sha256-caad9300c155faf79c26426f10951ba75f931a05e741a5b39a24b064daabc040",
"hash_type": "git_lfs_concat"
},
"torch25-cxx98-cu118-x86_64-linux": {
"hash": "sha256-4fc87893de14a29ba4b55f5026ea05ec5901c0b52abd5ebae681ea0b791e858c",
"hash_type": "git_lfs_concat"
},
"torch25-cxx98-cu121-x86_64-linux": {
"hash": "sha256-72c975ea63fc524a38fcee5b2dbdb566eff0a0ea546ee5756441d04908e4e896",
"hash_type": "git_lfs_concat"
},
"torch25-cxx98-cu124-x86_64-linux": {
"hash": "sha256-28c5510e3b07eae2b3846b880f6111da65df024e1f24f81077d187a97c015364",
"hash_type": "git_lfs_concat"
},
"torch26-cxx11-cu118-x86_64-linux": {
"hash": "sha256-8444cf77686578a6b0f7e2fd29bf2783ba120ebf7df41573f61d2521fd0acc10",
"hash_type": "git_lfs_concat"
},
"torch26-cxx11-cu124-x86_64-linux": {
"hash": "sha256-6ea8e00625b5fe799fbe407e7de0fc08228cac26f9bbed2d70a6500026fe3bab",
"hash_type": "git_lfs_concat"
},
"torch26-cxx11-cu126-aarch64-linux": {
"hash": "sha256-0b8b8afbdaf9aa533895cb9e884e3ad3e9a34d483f05a1bbde1b8902f9dbeb0f",
"hash_type": "git_lfs_concat"
},
"torch26-cxx11-cu126-x86_64-linux": {
"hash": "sha256-e115e855d7ca4b97787f04c88e128432256c6b43d4823fb8889ab9985dc4cf36",
"hash_type": "git_lfs_concat"
},
"torch26-cxx98-cu118-x86_64-linux": {
"hash": "sha256-509f08c48a05584cc85c058607277fcbe3193e6cc61846dd2416d39e27c1d68e",
"hash_type": "git_lfs_concat"
},
"torch26-cxx98-cu124-x86_64-linux": {
"hash": "sha256-a10236bffd435296c736ae2762ab0836da2421297e46b377368a17b39d70c27b",
"hash_type": "git_lfs_concat"
},
"torch26-cxx98-cu126-aarch64-linux": {
"hash": "sha256-ca2cb56f3eea4c399a61e21ba9b577d718b250aa60a13f42f01019ddd5cd8b0c",
"hash_type": "git_lfs_concat"
},
"torch26-cxx98-cu126-x86_64-linux": {
"hash": "sha256-8fcd62d8243a30b63a03751cc0c15d24f6e00e43eae79f7281627f24e078bf9a",
"hash_type": "git_lfs_concat"
},
"torch27-cxx11-cu118-x86_64-linux": {
"hash": "sha256-60f5807ee3da937c57c1b6080c30632305aa4875ed5a52bf4e81968770b61b13",
"hash_type": "git_lfs_concat"
},
"torch27-cxx11-cu126-aarch64-linux": {
"hash": "sha256-64298b1713dc1d950915dc6569a06e2f541de3ed80aa5b32084246c1fdc7a958",
"hash_type": "git_lfs_concat"
},
"torch27-cxx11-cu126-x86_64-linux": {
"hash": "sha256-d9e219890dc28e8582ef21d6f81f2ebc361de218a86b742be63bc4714f102e5e",
"hash_type": "git_lfs_concat"
},
"torch27-cxx11-cu128-aarch64-linux": {
"hash": "sha256-d72549f51aefcf020bc74262bbbccb78094638c5ab9adc8667873d247c1cce86",
"hash_type": "git_lfs_concat"
},
"torch27-cxx11-cu128-x86_64-linux": {
"hash": "sha256-d31ac5f87d7c7f62c63c72946479193aed467c9417c0acead5137e0e1fa968f8",
"hash_type": "git_lfs_concat"
}
}
},
{
"repo_id": "kernels-community/quantization-eetq",
"sha": "1aa83b1261b0c4cad890184a4d689e6330a110b5",
"variants": {
"torch25-cxx11-cu118-x86_64-linux": {
"hash": "sha256-de257728ec38f48220d6c90b2fd960fed1f4c963e7cd6c204abfcf8607aedc20",
"hash_type": "git_lfs_concat"
},
"torch25-cxx11-cu121-x86_64-linux": {
"hash": "sha256-9027918cf6e52591f97b2c621355e12d9adf0dfe833a763219813bfecd1ad1a3",
"hash_type": "git_lfs_concat"
},
"torch25-cxx11-cu124-x86_64-linux": {
"hash": "sha256-15cd0a56311897b27ee50617491cf69e698053a9f9af7bd37937cbca8da9db13",
"hash_type": "git_lfs_concat"
},
"torch25-cxx98-cu118-x86_64-linux": {
"hash": "sha256-ca35ccbb193c795587f4a0ea072fda6f0a0ac7f745f7a68e35c35012098f0a57",
"hash_type": "git_lfs_concat"
},
"torch25-cxx98-cu121-x86_64-linux": {
"hash": "sha256-e7b12bd79163ee0f520b4a399f69c29e4a692667edf27f7d100f053434d8840c",
"hash_type": "git_lfs_concat"
},
"torch25-cxx98-cu124-x86_64-linux": {
"hash": "sha256-f08e850e856faa42c992188affa898a9b5a7be9d64980c4193871b0ad999da78",
"hash_type": "git_lfs_concat"
},
"torch26-cxx11-cu118-x86_64-linux": {
"hash": "sha256-9596f1c7cdbc7adf75898d18f370dc33ce0dfab2559301244411f5f4c4e581d4",
"hash_type": "git_lfs_concat"
},
"torch26-cxx11-cu124-x86_64-linux": {
"hash": "sha256-90002710f9e59d12bff260ce288c2b2b954f988f94ef920c8384c97946b7782b",
"hash_type": "git_lfs_concat"
},
"torch26-cxx11-cu126-aarch64-linux": {
"hash": "sha256-d230dd53423cf29387350d2e28cc691785135613408edb73c79f5d965dbb30e5",
"hash_type": "git_lfs_concat"
},
"torch26-cxx11-cu126-x86_64-linux": {
"hash": "sha256-fb95eb2faee971ebc0ede12678816c7796b64c723e4fd787aea97397f1c7f5cd",
"hash_type": "git_lfs_concat"
},
"torch26-cxx98-cu118-x86_64-linux": {
"hash": "sha256-027930f857347a4f1524fa37244c41c53ffb8c1ebd4eeb72fa32eea4a28b8787",
"hash_type": "git_lfs_concat"
},
"torch26-cxx98-cu124-x86_64-linux": {
"hash": "sha256-59ee042d58d57100c415f491a3db905671e094707f786f5f7e3260d5b827ad6a",
"hash_type": "git_lfs_concat"
},
"torch26-cxx98-cu126-aarch64-linux": {
"hash": "sha256-1f9d739bd8198c330b1f2893e0301740c54fa95272233fadb7a95c9b53a70383",
"hash_type": "git_lfs_concat"
},
"torch26-cxx98-cu126-x86_64-linux": {
"hash": "sha256-f56c5ea702982b9f75dedeb3a8998550b1b38bcacd77590926234e221fcc571f",
"hash_type": "git_lfs_concat"
},
"torch27-cxx11-cu118-x86_64-linux": {
"hash": "sha256-9c6f2b7fea5327abee2920da86dd57878d5f35aacacc886875050649073d1565",
"hash_type": "git_lfs_concat"
},
"torch27-cxx11-cu126-aarch64-linux": {
"hash": "sha256-fba9bd51e4aa5515ed81193743512dec2129f38555a16a54710e650a717259a8",
"hash_type": "git_lfs_concat"
},
"torch27-cxx11-cu126-x86_64-linux": {
"hash": "sha256-990b615c4b5d2f96874e7f88767681544d84771f3a11443cf0c994759f5e5f75",
"hash_type": "git_lfs_concat"
},
"torch27-cxx11-cu128-aarch64-linux": {
"hash": "sha256-6ad809543e1099f91b022f1393fe9a4527957b854cdfe6c8f4a0632c5497cb9d",
"hash_type": "git_lfs_concat"
},
"torch27-cxx11-cu128-x86_64-linux": {
"hash": "sha256-90aaa73d93db015c693a4089f2574c2ec2d4943bcee5c9b0ede2834a2c72c370",
"hash_type": "git_lfs_concat"
}
}
},
{
"repo_id": "kernels-community/rotary",
"sha": "804a326b61f181778b5eb4ebe27aecdb8fbcd845",
"variants": {
"torch25-cxx11-cu118-x86_64-linux": {
"hash": "sha256-198c67cc7330535da671086c3b6a0dd6189015381f25b409704b51224b25ae3c",
"hash_type": "git_lfs_concat"
},
"torch25-cxx11-cu121-x86_64-linux": {
"hash": "sha256-c2e8233d79dd36fc778502c0d44e7399907c2ef064981c7d122fb0652c71eca5",
"hash_type": "git_lfs_concat"
},
"torch25-cxx11-cu124-x86_64-linux": {
"hash": "sha256-452040cd5c335a3985da635a76db60a6fc0d9f8b1050fdf29f837d42ee2742ea",
"hash_type": "git_lfs_concat"
},
"torch25-cxx98-cu118-x86_64-linux": {
"hash": "sha256-b627ad5946713c8893f2847eb28f87203f3caaa84f2f35bb9f7b54ea9c3c8a5d",
"hash_type": "git_lfs_concat"
},
"torch25-cxx98-cu121-x86_64-linux": {
"hash": "sha256-30311ae1858e29754a4c69e081466e78202ffe8522d08afa46f06350f54cfcd1",
"hash_type": "git_lfs_concat"
},
"torch25-cxx98-cu124-x86_64-linux": {
"hash": "sha256-f988c59f5ac640c657f51c7a463f7bcc5ff789109275d8b14f524ad300f9ca55",
"hash_type": "git_lfs_concat"
},
"torch26-cxx11-cu118-x86_64-linux": {
"hash": "sha256-58998893b9992e3ede276388e09c1c31da0b6175d68cf37bcb75bd6f69dba240",
"hash_type": "git_lfs_concat"
},
"torch26-cxx11-cu124-x86_64-linux": {
"hash": "sha256-2fdc356b7a5ce2f090dead00253180a750ec9ff72c0afc5f3f07c96e2e603916",
"hash_type": "git_lfs_concat"
},
"torch26-cxx11-cu126-aarch64-linux": {
"hash": "sha256-d82cd995be25b4b88b0a4086269dcdeb400d0720141fbbfa47bf88cd639ae7e1",
"hash_type": "git_lfs_concat"
},
"torch26-cxx11-cu126-x86_64-linux": {
"hash": "sha256-a6cd702f278dcbd94f8412d51f79a2664844217b7344bdd24353760c72a789d5",
"hash_type": "git_lfs_concat"
},
"torch26-cxx98-cu118-x86_64-linux": {
"hash": "sha256-c759c2e38a17ea61446afb881cfa2a152d82350e6d38efecbec8ebe1e27cf81f",
"hash_type": "git_lfs_concat"
},
"torch26-cxx98-cu124-x86_64-linux": {
"hash": "sha256-d81512fa75acbe8a124b9890bb041fdd1e447794ee210bbb5d01343bd5033eec",
"hash_type": "git_lfs_concat"
},
"torch26-cxx98-cu126-aarch64-linux": {
"hash": "sha256-a81df695a1b980f899df3c05920a04ff15a89dd28c8cef4067e4e6579669292b",
"hash_type": "git_lfs_concat"
},
"torch26-cxx98-cu126-x86_64-linux": {
"hash": "sha256-868a4b47368a251018bf8f67f3effd8685fed6b01e64725da7e653d38831b166",
"hash_type": "git_lfs_concat"
},
"torch27-cxx11-cu118-x86_64-linux": {
"hash": "sha256-21ae5790dcf3936b66cd74641f815280ea648dffdc5259b7e1dba3fa5a8fc70d",
"hash_type": "git_lfs_concat"
},
"torch27-cxx11-cu126-aarch64-linux": {
"hash": "sha256-93466448e31897ef7db0e84e7d6d36824661b15a9841e2476ff181e1eab155c2",
"hash_type": "git_lfs_concat"
},
"torch27-cxx11-cu126-x86_64-linux": {
"hash": "sha256-e0ce52422c82c2ce966c44e61e0d65c789b36feaaeca818f88c2e746201cde9b",
"hash_type": "git_lfs_concat"
},
"torch27-cxx11-cu128-aarch64-linux": {
"hash": "sha256-eb155e56df00ad7d6455f1549d072c39f14c2b7e355f729bf35cb3e62d087df9",
"hash_type": "git_lfs_concat"
},
"torch27-cxx11-cu128-x86_64-linux": {
"hash": "sha256-63b3f8fc56c940d824cdf06d3cc5b504d82c14e005c7d2ca5360e384a2b16af2",
"hash_type": "git_lfs_concat"
}
}
}
]
| text-generation-inference/server/kernels.lock/0 | {
"file_path": "text-generation-inference/server/kernels.lock",
"repo_id": "text-generation-inference",
"token_count": 12258
} | 295 |
import torch
from text_generation_server.layers import (
TensorParallelEmbedding,
)
class ProcessGroup:
def __init__(self, rank: int, world_size: int):
self._rank = rank
self.world_size = world_size
def size(self) -> int:
return self.world_size
def rank(self) -> int:
return self._rank
class Weights:
def __init__(self, rank: int, world_size: int, vocab_size: int, hidden_dim: int):
self.weight = (
torch.arange(vocab_size * hidden_dim).float().view(vocab_size, hidden_dim)
)
self.process_group = ProcessGroup(rank, world_size)
def get_partial_sharded(self, name: str, dim: int):
assert dim == 0
rank = self.process_group.rank()
world_size = self.process_group.size()
size = self.weight.shape[dim]
block_size = (size + world_size - 1) // world_size
start = rank * block_size
stop = (rank + 1) * block_size
return self.weight[start:stop]
def get_shape(self, name: str):
return self.weight.shape
def test_weight_hub_files_offline_error():
vocab_size = 17
weights = Weights(
rank=0,
world_size=1,
vocab_size=vocab_size,
hidden_dim=256,
)
embeddings = TensorParallelEmbedding("", weights)
input_ids = torch.arange(vocab_size)
output = embeddings.forward(input_ids)
assert embeddings.min_id == 0
assert embeddings.max_id == 17
torch.testing.assert_close(output, torch.arange(256 * 17).float().view(17, 256))
weights_0_2 = Weights(rank=0, world_size=2, vocab_size=vocab_size, hidden_dim=256)
weights_1_2 = Weights(rank=1, world_size=2, vocab_size=vocab_size, hidden_dim=256)
embeddings_0_2 = TensorParallelEmbedding("", weights_0_2, reduce=False)
assert embeddings_0_2.min_id == 0
assert embeddings_0_2.max_id == 9
torch.testing.assert_close(
embeddings_0_2.weight,
torch.cat([torch.arange(9 * 256), torch.zeros(256)], dim=0)
.view(10, 256)
.float(),
)
embeddings_1_2 = TensorParallelEmbedding("", weights_1_2, reduce=False)
assert embeddings_1_2.min_id == 9
assert embeddings_1_2.max_id == 17
torch.testing.assert_close(
embeddings_1_2.weight,
torch.cat([torch.arange(8 * 256) + 9 * 256, torch.zeros(256)], dim=0)
.view(9, 256)
.float(),
)
output_tp_0 = embeddings_0_2.forward(input_ids)
output_tp_1 = embeddings_1_2.forward(input_ids)
torch.testing.assert_close(output, output_tp_0 + output_tp_1)
| text-generation-inference/server/tests/utils/test_layers.py/0 | {
"file_path": "text-generation-inference/server/tests/utils/test_layers.py",
"repo_id": "text-generation-inference",
"token_count": 1146
} | 296 |
#!/usr/bin/env python
"""
Fused Attention
===============
This is a Triton implementation of the Flash Attention v2 algorithm from Tri Dao
(https://tridao.me/publications/flash2/flash2.pdf)
Credits: OpenAI kernel team, AMD ML Frameworks Triton team
Features supported:
1) Fwd with causal masking
2) Any sequence lengths without padding (currently fwd kernel only)
3) Support for different sequence lengths for q and k
4) Nested tensor API currently does not support dropout or bias.
Not currently supported:
1) Non power of two head dims
"""
import torch
import triton
import triton.language as tl
torch_dtype: tl.constexpr = torch.float16
@triton.jit
def cdiv_fn(x, y):
return (x + y - 1) // y
@triton.jit
def max_fn(x, y):
return tl.math.max(x, y)
@triton.jit
def dropout_offsets(philox_seed, philox_offset, dropout_p, m, n, stride):
ms = tl.arange(0, m)
ns = tl.arange(0, n)
return philox_offset + ms[:, None] * stride + ns[None, :]
@triton.jit
def dropout_rng(philox_seed, philox_offset, dropout_p, m, n, stride):
rng_offsets = dropout_offsets(
philox_seed, philox_offset, dropout_p, m, n, stride
).to(tl.uint32)
# TODO: use tl.randint for better performance
return tl.rand(philox_seed, rng_offsets)
@triton.jit
def dropout_mask(philox_seed, philox_offset, dropout_p, m, n, stride):
rng_output = dropout_rng(philox_seed, philox_offset, dropout_p, m, n, stride)
rng_keep = rng_output > dropout_p
return rng_keep
@triton.jit
def load_fn(block_ptr, first, second, pad):
if first and second:
tensor = tl.load(block_ptr, boundary_check=(0, 1), padding_option=pad)
elif first:
tensor = tl.load(block_ptr, boundary_check=(0,), padding_option=pad)
elif second:
tensor = tl.load(block_ptr, boundary_check=(1,), padding_option=pad)
else:
tensor = tl.load(block_ptr)
return tensor
@triton.jit
def _attn_fwd_inner(
acc,
l_i,
m_i,
q,
K_block_ptr,
V_block_ptr,
start_m,
actual_seqlen_k,
dropout_p,
philox_seed,
batch_philox_offset,
encoded_softmax_block_ptr,
block_min,
block_max,
offs_n_causal,
masked_blocks,
n_extra_tokens,
bias_ptr,
IS_CAUSAL: tl.constexpr,
BLOCK_M: tl.constexpr,
BLOCK_DMODEL: tl.constexpr,
BLOCK_N: tl.constexpr,
OFFS_M: tl.constexpr,
OFFS_N: tl.constexpr,
PRE_LOAD_V: tl.constexpr,
MASK_STEPS: tl.constexpr,
ENABLE_DROPOUT: tl.constexpr,
RETURN_ENCODED_SOFTMAX: tl.constexpr,
PADDED_HEAD: tl.constexpr,
):
# loop over k, v, and update accumulator
for start_n in range(block_min, block_max, BLOCK_N):
# For padded blocks, we will overrun the tensor size if
# we load all BLOCK_N. For others, the blocks are all within range.
k = load_fn(
K_block_ptr,
PADDED_HEAD,
MASK_STEPS and (n_extra_tokens != 0),
"zero",
)
if PRE_LOAD_V:
v = load_fn(
V_block_ptr,
MASK_STEPS and (n_extra_tokens != 0),
PADDED_HEAD,
"zero",
)
qk = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32)
# We start from end of seqlen_k so only the first iteration would need
# to be checked for padding if it is not a multiple of block_n
# TODO: This can be optimized to only be true for the padded block.
if MASK_STEPS: # noqa: SIM102
# If this is the last block / iteration, we want to
# mask if the sequence length is not a multiple of block size
# a solution is to always do BLOCK_M // BLOCK_N + 1 steps
# if not is_modulo_mn. last step might get wasted but that is okay.
# check if this masking works for that case.
if (start_n + BLOCK_N == block_max) and (n_extra_tokens != 0):
boundary_m = tl.full([BLOCK_M], actual_seqlen_k, dtype=tl.int32)
size_n = start_n + OFFS_N[None, :]
mask = size_n < boundary_m[:, None]
qk = tl.where(mask, qk, float("-inf"))
if IS_CAUSAL:
causal_boundary = start_n + offs_n_causal
causal_mask = OFFS_M[:, None] >= causal_boundary[None, :]
qk = tl.where(causal_mask, qk, float("-inf"))
# -- compute qk ----
qk += tl.dot(q, k)
if bias_ptr is not None:
bias = load_fn(
bias_ptr, False, MASK_STEPS and (n_extra_tokens != 0), "zero"
)
# While bias is added after multiplying qk with sm_scale, our
# optimization to use 2^x instead of e^x results in an additional
# scale factor of log2(e) which we must also multiply the bias with.
qk += bias * 1.44269504089
m_ij = tl.maximum(m_i, tl.max(qk, 1))
qk = qk - m_ij[:, None]
p = tl.math.exp2(qk)
# CAVEAT: Must update l_ij before applying dropout
l_ij = tl.sum(p, 1)
if ENABLE_DROPOUT:
philox_offset = (
batch_philox_offset
+ start_m * BLOCK_M * actual_seqlen_k
+ start_n
- BLOCK_N
)
keep = dropout_mask(
philox_seed,
philox_offset,
dropout_p,
BLOCK_M,
BLOCK_N,
actual_seqlen_k,
)
if RETURN_ENCODED_SOFTMAX:
tl.store(
encoded_softmax_block_ptr,
tl.where(keep, p, -p).to(encoded_softmax_block_ptr.type.element_ty),
)
p = tl.where(keep, p, 0.0)
elif RETURN_ENCODED_SOFTMAX:
tl.store(
encoded_softmax_block_ptr,
p.to(encoded_softmax_block_ptr.type.element_ty),
)
# -- update output accumulator --
alpha = tl.math.exp2(m_i - m_ij)
acc = acc * alpha[:, None]
if not PRE_LOAD_V:
v = load_fn(
V_block_ptr,
MASK_STEPS and (n_extra_tokens != 0),
PADDED_HEAD,
"zero",
)
# -- update m_i and l_i
l_i = l_i * alpha + l_ij
# update m_i and l_i
m_i = m_ij
acc += tl.dot(p.to(V_block_ptr.type.element_ty), v)
V_block_ptr = tl.advance(V_block_ptr, (BLOCK_N, 0))
K_block_ptr = tl.advance(K_block_ptr, (0, BLOCK_N))
if bias_ptr is not None:
bias_ptr = tl.advance(bias_ptr, (0, BLOCK_N))
if RETURN_ENCODED_SOFTMAX:
encoded_softmax_block_ptr = tl.advance(
encoded_softmax_block_ptr, (0, BLOCK_N)
)
return acc, l_i, m_i
@triton.autotune(
configs=[
triton.Config(
{
"BLOCK_M": 256,
"BLOCK_N": 64,
"waves_per_eu": 2,
"PRE_LOAD_V": False,
},
num_stages=1,
num_warps=8,
),
triton.Config(
{
"BLOCK_M": 128,
"BLOCK_N": 128,
"waves_per_eu": 2,
"PRE_LOAD_V": False,
},
num_stages=1,
num_warps=4,
),
triton.Config(
{
"BLOCK_M": 256,
"BLOCK_N": 128,
"waves_per_eu": 2,
"PRE_LOAD_V": False,
},
num_stages=1,
num_warps=8,
),
triton.Config(
{
"BLOCK_M": 128,
"BLOCK_N": 64,
"waves_per_eu": 3,
"PRE_LOAD_V": True,
},
num_stages=1,
num_warps=4,
),
triton.Config(
{
"BLOCK_M": 128,
"BLOCK_N": 64,
"waves_per_eu": 3,
"PRE_LOAD_V": False,
},
num_stages=1,
num_warps=4,
),
triton.Config(
{
"BLOCK_M": 64,
"BLOCK_N": 64,
"waves_per_eu": 4,
"PRE_LOAD_V": False,
},
num_stages=1,
num_warps=8,
),
triton.Config(
{
"BLOCK_M": 32,
"BLOCK_N": 32,
"waves_per_eu": 4,
"PRE_LOAD_V": False,
},
num_stages=1,
num_warps=8,
),
# TODO: This config fails with head_size not pow2 with data mismatches.
# triton.Config({'BLOCK_M': 32, 'BLOCK_N': 16, 'waves_per_eu': 1,
# 'PRE_LOAD_V': False}, num_stages=1, num_warps=4),
triton.Config(
{
"BLOCK_M": 16,
"BLOCK_N": 16,
"waves_per_eu": 1,
"PRE_LOAD_V": False,
},
num_stages=1,
num_warps=4,
),
triton.Config(
{
"BLOCK_M": 128,
"BLOCK_N": 64,
"waves_per_eu": 1,
"PRE_LOAD_V": False,
},
num_stages=1,
num_warps=4,
),
],
key=["IS_CAUSAL", "dropout_p", "BLOCK_DMODEL"],
)
@triton.jit
def attn_fwd(
Q,
K,
V,
bias,
sm_scale,
L,
Out,
stride_qz,
stride_qh,
stride_qm,
stride_qk,
stride_kz,
stride_kh,
stride_kn,
stride_kk,
stride_vz,
stride_vh,
stride_vk,
stride_vn,
stride_oz,
stride_oh,
stride_om,
stride_on,
stride_bz,
stride_bh,
stride_bm,
stride_bn,
cu_seqlens_q,
cu_seqlens_k,
dropout_p,
philox_seed,
philox_offset_base,
encoded_softmax,
HQ: tl.constexpr,
HK: tl.constexpr,
ACTUAL_BLOCK_DMODEL: tl.constexpr,
MAX_SEQLENS_Q: tl.constexpr,
MAX_SEQLENS_K: tl.constexpr,
VARLEN: tl.constexpr,
IS_CAUSAL: tl.constexpr,
BLOCK_M: tl.constexpr,
BLOCK_DMODEL: tl.constexpr,
BLOCK_N: tl.constexpr,
PRE_LOAD_V: tl.constexpr,
BIAS_TYPE: tl.constexpr,
ENABLE_DROPOUT: tl.constexpr,
RETURN_ENCODED_SOFTMAX: tl.constexpr,
):
start_m = tl.program_id(0)
off_h_q = tl.program_id(1)
off_z = tl.program_id(2)
offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M)
offs_n = tl.arange(0, BLOCK_N)
if VARLEN:
cu_seqlens_q_start = tl.load(cu_seqlens_q + off_z)
cu_seqlens_q_end = tl.load(cu_seqlens_q + off_z + 1)
seqlen_q = cu_seqlens_q_end - cu_seqlens_q_start
# We have a one-size-fits-all grid in id(0). Some seqlens might be too
# small for all start_m so for those we return early.
if start_m * BLOCK_M > seqlen_q:
return
cu_seqlens_k_start = tl.load(cu_seqlens_k + off_z)
cu_seqlens_k_end = tl.load(cu_seqlens_k + off_z + 1)
seqlen_k = cu_seqlens_k_end - cu_seqlens_k_start
else:
cu_seqlens_q_start = 0
cu_seqlens_k_start = 0
seqlen_q = MAX_SEQLENS_Q
seqlen_k = MAX_SEQLENS_K
# Now we compute whether we need to exit early due to causal masking.
# This is because for seqlen_q > seqlen_k, M rows of the attn scores
# are completely masked, resulting in 0s written to the output, and
# inf written to LSE. We don't need to do any GEMMs in this case.
# This block of code determines what N is, and if this WG is operating
# on those M rows.
n_blocks = cdiv_fn(seqlen_k, BLOCK_N)
if IS_CAUSAL:
# If seqlen_q == seqlen_k, the attn scores are a square matrix.
# If seqlen_q != seqlen_k, attn scores are rectangular which means
# the causal mask boundary is bottom right aligned, and ends at either
# the top edge (seqlen_q < seqlen_k) or left edge.
# This captures the decrease in n_blocks if we have a rectangular attn
# matrix
n_blocks_seqlen = cdiv_fn(
(start_m + 1) * BLOCK_M + seqlen_k - seqlen_q, BLOCK_N
)
# This is what adjusts the block_max for the current WG, only
# if IS_CAUSAL. Otherwise we want to always iterate through all n_blocks
n_blocks = min(n_blocks, n_blocks_seqlen)
# If we have no blocks after adjusting for seqlen deltas, this WG is
# part of the blocks that are all 0. We exit early.
if n_blocks <= 0:
o_offset = (
off_z * stride_oz + cu_seqlens_q_start * stride_om + off_h_q * stride_oh
)
O_block_ptr = tl.make_block_ptr(
base=Out + o_offset,
shape=(seqlen_q, BLOCK_DMODEL),
strides=(stride_om, stride_on),
offsets=(start_m * BLOCK_M, 0),
block_shape=(BLOCK_M, BLOCK_DMODEL),
order=(1, 0),
)
acc = tl.zeros([BLOCK_M, BLOCK_DMODEL], dtype=Out.type.element_ty)
# We still need to write 0s to the result
# tl.store(O_block_ptr,
# acc.to(Out.type.element_ty), boundary_check=(0,1))
# l_ptrs = L + off_z * hq * MAX_SEQLENS_Q + off_h_q * MAX_SEQLENS_Q
# + offs_m
# We store inf to LSE, not -inf because in the bwd pass,
# we subtract this
# from qk which makes it -inf, such that exp(qk - inf) = 0
# for these masked blocks.
# l = tl.full([BLOCK_M], value=float("inf"), dtype=tl.float32)
# tl.store(l_ptrs, l)
# TODO: Should dropout and return encoded softmax be handled here?
return
# If MQA / GQA, set the K and V head offsets appropriately.
GROUP_SIZE: tl.constexpr = HQ // HK
if GROUP_SIZE != 1:
off_h_k = off_h_q // GROUP_SIZE
else:
off_h_k = off_h_q
n_extra_tokens = 0
if seqlen_k < BLOCK_N:
n_extra_tokens = BLOCK_N - seqlen_k
elif seqlen_k % BLOCK_N:
n_extra_tokens = seqlen_k % BLOCK_N
PADDED_HEAD: tl.constexpr = ACTUAL_BLOCK_DMODEL != BLOCK_DMODEL
# Compute pointers for all the tensors used in this kernel.
q_offset = off_z * stride_qz + off_h_q * stride_qh + cu_seqlens_q_start * stride_qm
Q_block_ptr = tl.make_block_ptr(
base=Q + q_offset,
shape=(seqlen_q, ACTUAL_BLOCK_DMODEL),
strides=(stride_qm, stride_qk),
offsets=(start_m * BLOCK_M, 0),
block_shape=(BLOCK_M, BLOCK_DMODEL),
order=(1, 0),
)
k_offset = off_z * stride_kz + off_h_k * stride_kh + cu_seqlens_k_start * stride_kn
K_block_ptr = tl.make_block_ptr(
base=K + k_offset,
shape=(ACTUAL_BLOCK_DMODEL, seqlen_k),
strides=(stride_kk, stride_kn),
offsets=(0, 0),
block_shape=(BLOCK_DMODEL, BLOCK_N),
order=(0, 1),
)
v_offset = off_z * stride_vz + off_h_k * stride_vh + cu_seqlens_k_start * stride_vk
V_block_ptr = tl.make_block_ptr(
base=V + v_offset,
shape=(seqlen_k, ACTUAL_BLOCK_DMODEL),
strides=(stride_vk, stride_vn),
offsets=(0, 0),
block_shape=(BLOCK_N, BLOCK_DMODEL),
order=(1, 0),
)
if BIAS_TYPE != 0:
bias_ptr = tl.make_block_ptr(
base=bias + off_h_q * stride_bh,
shape=(seqlen_q, seqlen_k),
strides=(stride_bm, stride_bn),
offsets=(start_m * BLOCK_M, 0),
block_shape=(BLOCK_M, BLOCK_N),
order=(1, 0),
)
else:
bias_ptr = None
if ENABLE_DROPOUT:
batch_philox_offset = (
philox_offset_base + (off_z * HQ + off_h_q) * seqlen_q * seqlen_k
)
else:
batch_philox_offset = 0
# We can ask to return the dropout mask without actually doing any dropout.
# In this case, we return an invalid pointer so indicate the mask is not i
# valid.
# TODO: Fix encoded softmax. It currently uses just h_q in the base offset.
if RETURN_ENCODED_SOFTMAX:
encoded_softmax_block_ptr = tl.make_block_ptr(
base=encoded_softmax + off_h_q * seqlen_q * seqlen_k,
shape=(seqlen_q, seqlen_k),
strides=(seqlen_k, 1),
offsets=(start_m * BLOCK_M, 0),
block_shape=(BLOCK_M, BLOCK_N),
order=(1, 0),
)
else:
encoded_softmax_block_ptr = 0
# initialize pointer to m and l
m_i = tl.full([BLOCK_M], float("-inf"), dtype=tl.float32)
l_i = tl.full([BLOCK_M], 1.0, dtype=tl.float32)
acc = tl.zeros([BLOCK_M, BLOCK_DMODEL], dtype=tl.float32)
# scale sm_scale by log_2(e) and use 2^x in the loop as we do not
# have native e^x support in HW.
qk_scale = sm_scale * 1.44269504089
# Q is loaded once at the beginning and shared by all N blocks.
q = load_fn(Q_block_ptr, True, PADDED_HEAD, "zero")
q = (q * qk_scale).to(Q_block_ptr.type.element_ty)
# Here we compute how many full and masked blocks we have.
padded_block_k = n_extra_tokens != 0
is_modulo_mn = not padded_block_k and (seqlen_q % BLOCK_M == 0)
if IS_CAUSAL:
# There are always at least BLOCK_M // BLOCK_N masked blocks.
# Additionally there might be one more due to dissimilar seqlens.
masked_blocks = BLOCK_M // BLOCK_N + (not is_modulo_mn)
else:
# Padding on Q does not need to be masked in the FA loop.
masked_blocks = padded_block_k
# if IS_CAUSAL, not is_modulo_mn does not always result in an additional
# block. In this case we might exceed n_blocks so pick the min.
masked_blocks = min(masked_blocks, n_blocks)
n_full_blocks = n_blocks - masked_blocks
block_min = 0
block_max = n_blocks * BLOCK_N
# Compute for full blocks. Here we set causal to false regardless of its
# value because there is no masking. Similarly we do not need padding.
if n_full_blocks > 0:
block_max = (n_blocks - masked_blocks) * BLOCK_N
acc, l_i, m_i = _attn_fwd_inner(
acc,
l_i,
m_i,
q,
K_block_ptr,
V_block_ptr,
start_m,
seqlen_k,
dropout_p,
philox_seed,
batch_philox_offset,
encoded_softmax_block_ptr,
# _, _, offs_n_causal, masked_blocks, n_extra_tokens, _
block_min,
block_max,
0,
0,
0,
bias_ptr,
# IS_CAUSAL, ....
False,
BLOCK_M,
BLOCK_DMODEL,
BLOCK_N,
offs_m,
offs_n,
# _, MASK_STEPS, ...
PRE_LOAD_V,
False,
ENABLE_DROPOUT,
RETURN_ENCODED_SOFTMAX,
PADDED_HEAD,
)
block_min = block_max
block_max = n_blocks * BLOCK_N
tl.debug_barrier()
# Remaining blocks, if any, are full / not masked.
if masked_blocks > 0:
offs_n_causal = offs_n + (seqlen_q - seqlen_k) if IS_CAUSAL else 0
K_block_ptr = tl.advance(K_block_ptr, (0, n_full_blocks * BLOCK_N))
V_block_ptr = tl.advance(V_block_ptr, (n_full_blocks * BLOCK_N, 0))
if bias_ptr is not None:
bias_ptr = tl.advance(bias_ptr, (0, n_full_blocks * BLOCK_N))
if RETURN_ENCODED_SOFTMAX:
encoded_softmax_block_ptr = tl.advance(
encoded_softmax_block_ptr, (0, n_full_blocks)
)
acc, l_i, m_i = _attn_fwd_inner(
acc,
l_i,
m_i,
q,
K_block_ptr,
V_block_ptr,
start_m,
seqlen_k,
dropout_p,
philox_seed,
batch_philox_offset,
encoded_softmax_block_ptr,
block_min,
block_max,
offs_n_causal,
masked_blocks,
n_extra_tokens,
bias_ptr,
IS_CAUSAL,
BLOCK_M,
BLOCK_DMODEL,
BLOCK_N,
offs_m,
offs_n,
# _, MASK_STEPS, ...
PRE_LOAD_V,
True,
ENABLE_DROPOUT,
RETURN_ENCODED_SOFTMAX,
PADDED_HEAD,
)
# epilogue
acc = acc / l_i[:, None]
if ENABLE_DROPOUT:
acc = acc / (1 - dropout_p)
# If seqlen_q > seqlen_k but the delta is not a multiple of BLOCK_M,
# then we have one block with a row of all NaNs which come from computing
# softmax over a row of all -infs (-inf - inf = NaN). We check for that here
# and store 0s where there are NaNs as these rows should've been zeroed out.
end_m_idx = (start_m + 1) * BLOCK_M
start_m_idx = start_m * BLOCK_M
causal_start_idx = seqlen_q - seqlen_k
acc = acc.to(Out.type.element_ty)
if IS_CAUSAL: # noqa: SIM102
if causal_start_idx > start_m_idx and causal_start_idx < end_m_idx:
out_mask_boundary = tl.full(
(BLOCK_DMODEL,), causal_start_idx, dtype=tl.int32
)
mask_m_offsets = start_m_idx + tl.arange(0, BLOCK_M)
out_ptrs_mask = mask_m_offsets[:, None] >= out_mask_boundary[None, :]
z = 0.0
acc = tl.where(out_ptrs_mask, acc, z.to(acc.type.element_ty))
# write back LSE
# l_ptrs = L + off_z * hq * MAX_SEQLENS_Q + off_h_q * MAX_SEQLENS_Q + offs_m
# If seqlen_q not multiple of BLOCK_M, we need to mask out the last
# few rows. This is only true for the last M block. For others,
# overflow_size will be -ve
# overflow_size = end_m_idx - seqlen_q
# if overflow_size > 0:
# boundary = tl.full((BLOCK_M,), BLOCK_M - overflow_size, dtype=tl.int32)
# # This is a > check because mask being 0 blocks the store.
# l_ptrs_mask = boundary > tl.arange(0, BLOCK_M)
# tl.store(l_ptrs, m_i + tl.math.log2(l_i), mask=l_ptrs_mask)
# else:
# tl.store(l_ptrs, m_i + tl.math.log2(l_i))
# write back O
o_offset = off_z * stride_oz + cu_seqlens_q_start * stride_om + off_h_q * stride_oh
O_block_ptr = tl.make_block_ptr(
base=Out + o_offset,
shape=(seqlen_q, ACTUAL_BLOCK_DMODEL),
strides=(stride_om, stride_on),
offsets=(start_m * BLOCK_M, 0),
block_shape=(BLOCK_M, BLOCK_DMODEL),
order=(1, 0),
)
# Need boundary check on this to make sure the padding from the
# Q and KV tensors in both dims are not part of what we store back.
# TODO: Do the boundary check optionally.
tl.store(O_block_ptr, acc, boundary_check=(0, 1))
def check_args(
q,
k,
v,
o,
varlen=True,
max_seqlens=None,
cu_seqlens_q=None,
cu_seqlens_k=None,
):
assert q.dim() == k.dim() and q.dim() == v.dim()
if varlen:
assert q.dim() == 3
total_q, nheads_q, head_size = q.shape
total_k, nheads_k, _ = k.shape
assert cu_seqlens_q is not None
assert cu_seqlens_k is not None
assert len(cu_seqlens_q) == len(cu_seqlens_k)
else:
assert q.dim() == 4
batch, nheads_q, seqlen_q, head_size = q.shape
_, nheads_k, seqlen_k, _ = k.shape
assert max_seqlens > 0
assert k.shape == v.shape
assert q.shape[-1] == k.shape[-1] and q.shape[-1] == v.shape[-1]
# TODO: Change assert if we support qkl f8 and v f16
assert q.dtype == k.dtype and q.dtype == v.dtype
# TODO: Fix assert to check head size <=256 once supported
assert head_size <= 128
assert o.shape == q.shape
assert (nheads_q % nheads_k) == 0
class _attention(torch.autograd.Function):
@staticmethod
def forward(
ctx,
q,
k,
v,
o,
cu_seqlens_q,
cu_seqlens_k,
max_seqlens_q,
max_seqlens_k,
causal=False,
sm_scale=1.0,
bias=None,
):
if o is None:
o = torch.empty_like(q, dtype=v.dtype)
check_args(
q,
k,
v,
o,
varlen=True,
cu_seqlens_q=cu_seqlens_q,
cu_seqlens_k=cu_seqlens_k,
)
if True: # varlen
total_q, nheads_q, head_size = q.shape
total_k, nheads_k, _ = k.shape
batch = len(cu_seqlens_q) - 1
q_strides = (0, q.stride(1), q.stride(0), q.stride(2))
k_strides = (0, k.stride(1), k.stride(0), k.stride(2))
v_strides = (0, v.stride(1), v.stride(0), v.stride(2))
o_strides = (0, o.stride(1), o.stride(0), o.stride(2))
else:
batch, seqlen_q, nheads_q, head_size = q.shape
_, seqlen_k, nheads_k, _ = k.shape
q_strides = (q.stride(0), q.stride(2), q.stride(1), q.stride(3))
k_strides = (k.stride(0), k.stride(2), k.stride(1), k.stride(3))
v_strides = (v.stride(0), v.stride(2), v.stride(1), v.stride(3))
o_strides = (o.stride(0), o.stride(2), o.stride(1), o.stride(3))
# Get closest power of 2 over or equal to 32.
padded_d_model = 1 << (head_size - 1).bit_length()
padded_d_model = max(padded_d_model, 16)
def grid(META):
return triton.cdiv(max_seqlens_q, META["BLOCK_M"]), nheads_q, batch
encoded_softmax = None
# Seed the RNG so we get reproducible results for testing.
philox_seed = 0x1BF52
philox_offset = 0x1D4B42
if bias is not None:
bias_strides = (
bias.stride(0),
bias.stride(1),
bias.stride(2),
bias.stride(3),
)
else:
bias_strides = (0, 0, 0, 0)
attn_fwd[grid](
q,
k,
v,
bias,
sm_scale,
None,
o,
*q_strides,
*k_strides,
*v_strides,
*o_strides,
*bias_strides,
cu_seqlens_q,
cu_seqlens_k,
dropout_p=0.0,
philox_seed=philox_seed,
philox_offset_base=philox_offset,
encoded_softmax=encoded_softmax,
HQ=nheads_q,
HK=nheads_k,
ACTUAL_BLOCK_DMODEL=head_size,
MAX_SEQLENS_Q=max_seqlens_q,
MAX_SEQLENS_K=max_seqlens_k,
IS_CAUSAL=causal,
VARLEN=True,
BLOCK_DMODEL=padded_d_model,
BIAS_TYPE=0 if bias is None else 1,
ENABLE_DROPOUT=False,
RETURN_ENCODED_SOFTMAX=False,
)
ctx.grid = grid
ctx.sm_scale = sm_scale
ctx.BLOCK_DMODEL = head_size
ctx.causal = causal
ctx.dropout_p = 0.0
ctx.philox_seed = philox_seed
ctx.philox_offset = philox_offset
ctx.encoded_softmax = encoded_softmax
ctx.return_encoded_softmax = False
return o, encoded_softmax
triton_attention = _attention.apply
| text-generation-inference/server/text_generation_server/layers/attention/flash_attn_triton.py/0 | {
"file_path": "text-generation-inference/server/text_generation_server/layers/attention/flash_attn_triton.py",
"repo_id": "text-generation-inference",
"token_count": 14692
} | 297 |
from typing import Optional
import torch
import torch.nn as nn
from text_generation_server.layers.fp8 import fp8_quantize
from text_generation_server.layers.marlin.gptq import _check_valid_shape
from text_generation_server.layers.marlin.util import (
_check_marlin_kernels,
permute_scales,
)
from text_generation_server.utils.import_utils import SYSTEM
from text_generation_server.utils.kernels import load_kernel
if SYSTEM == "cuda":
quantization = load_kernel(
module="quantization", repo_id="kernels-community/quantization"
)
else:
quantization = None
MARLIN_TILE_SIZE = 16
class GPTQMarlinFP8Linear(nn.Module):
"""
FP8 GPTQ-Marlin linear layer.
"""
def __init__(
self,
qweight: torch.Tensor,
scales: torch.Tensor,
bias: Optional[torch.Tensor],
) -> None:
super().__init__()
_check_marlin_kernels()
assert quantization is not None
scales = scales.unsqueeze(0)
if scales.shape[1] == 1:
out_features, in_features = qweight.shape
scales = scales.repeat(1, out_features)
qweight, scales = repack_fp8_for_marlin(qweight, scales)
in_features = qweight.shape[0] * MARLIN_TILE_SIZE
out_features = scales.shape[1]
_check_valid_shape(in_features=in_features, out_features=out_features)
self.qweight = qweight
self.scales = scales
self.bias = bias if bias is not None else None
self.workspace = torch.zeros(
out_features // 64 * 16, dtype=torch.int, device=qweight.device
)
@classmethod
def from_unquant(cls, weight, bias, dtype):
qweight, scales = fp8_quantize(weight)
return cls(qweight=qweight, scales=scales.to(dtype), bias=bias)
@classmethod
def from_fp8(
cls,
weight: torch.Tensor,
scale: torch.Tensor,
bias: torch.Tensor,
dtype: torch.dtype,
**kwargs,
):
return cls(qweight=weight, scales=scale.to(dtype), bias=bias)
def forward(self, A: torch.Tensor) -> torch.Tensor:
assert quantization is not None
A_flat = A.view(-1, A.shape[-1])
C = quantization.fp8_marlin_gemm(
A_flat,
self.qweight,
self.scales,
self.workspace,
8,
A_flat.shape[0],
self.scales.shape[1],
A_flat.shape[1],
)
C = C.reshape(A.shape[:-1] + (self.scales.shape[1],))
if self.bias is not None:
C += self.bias
return C
def pack_fp8_as_int32(fp8_tensor: torch.Tensor) -> torch.Tensor:
"""
Repack FP8 weights to gptq format (packed int32 elements).
"""
assert fp8_tensor.dtype == torch.float8_e4m3fn
if fp8_tensor.shape[0] % 4 != 0:
raise ValueError(
f"Leading tensor dimension is not divisable by 4: {fp8_tensor.shape[0]}"
)
# Reshape to prepare for packing
reshaped = fp8_tensor.reshape(-1, 4, *fp8_tensor.shape[1:])
# Convert fp8 to uint8 (byte) representation
byte_tensor = reshaped.view(torch.uint8)
# Pack 4 uint8 values into one int32
packed = torch.zeros(
fp8_tensor.shape[0] // 4,
fp8_tensor.shape[1],
dtype=torch.int32,
device=fp8_tensor.device,
)
for i in range(4):
packed.bitwise_or_(byte_tensor[:, i].to(torch.int32) << i * 8)
return packed
def repack_fp8_for_marlin(weight: torch.Tensor, scales: torch.Tensor):
"""
Repack FP8 tensor for GPTQ-Marlin.
"""
out_features, in_features = weight.shape
# Torch linear layers weights with shape [out_features, in_features],
# GPTQ-quantized weights use [in_feateres/pack_factor, in_features],
# so transpose before packing.
qweight = pack_fp8_as_int32(weight.t())
perm = torch.empty(0, dtype=torch.int, device=qweight.device)
repacked = quantization.gptq_marlin_repack(
qweight, perm, in_features, out_features, 8
)
scales = permute_scales(scales)
return repacked, scales
| text-generation-inference/server/text_generation_server/layers/marlin/fp8.py/0 | {
"file_path": "text-generation-inference/server/text_generation_server/layers/marlin/fp8.py",
"repo_id": "text-generation-inference",
"token_count": 1856
} | 298 |
import torch
import time
import torch.distributed
from dataclasses import dataclass
from opentelemetry import trace
from transformers import (
AutoConfig,
AutoTokenizer,
AutoModelForCausalLM,
PreTrainedTokenizerBase,
)
from typing import Optional, Tuple, List, Type, Dict
from text_generation_server.utils import (
initialize_torch_distributed,
weight_files,
Weights,
)
from text_generation_server.models import Model
from text_generation_server.utils.chunks import concat_text_chunks
from text_generation_server.utils.import_utils import SYSTEM
from text_generation_server.utils.quantization import get_loader
from text_generation_server.utils.tokens import batch_top_tokens
from text_generation_server.models.types import (
Batch,
Tokens,
Generation,
GeneratedText,
)
from text_generation_server.pb import generate_pb2
from text_generation_server.utils import NextTokenChooser, StoppingCriteria, Sampling
tracer = trace.get_tracer(__name__)
@dataclass
class CausalLMBatch(Batch):
batch_id: int
requests: List[generate_pb2.Request]
requests_idx_mapping: Dict[int, int]
# Decoder values
input_ids: torch.Tensor
attention_mask: torch.Tensor
position_ids: torch.Tensor
past_key_values: Optional[List[Tuple]]
# All tokens
all_input_ids: List[torch.Tensor]
# Lengths of all generations present in the batch
input_lengths: List[int]
prefix_offsets: List[int]
read_offsets: List[int]
# Generation helpers
next_token_choosers: List[NextTokenChooser]
stopping_criterias: List[StoppingCriteria]
top_n_tokens: List[int]
top_n_tokens_tensor: torch.Tensor
# Metadata used for padding
max_input_length: int
padding_right_offset: int
# Maximum number of tokens this batch will grow to
max_tokens: int
# Past metadata
keys_head_dim_last: bool = True
def to_pb(self) -> generate_pb2.CachedBatch:
return generate_pb2.CachedBatch(
id=self.batch_id,
request_ids=[r.id for r in self.requests],
size=len(self),
max_tokens=self.max_tokens,
current_tokens=len(self.input_ids),
)
@classmethod
def from_pb(
cls,
pb: generate_pb2.Batch,
tokenizer: PreTrainedTokenizerBase,
dtype: torch.dtype,
device: torch.device,
) -> "CausalLMBatch":
inputs = []
next_token_choosers = []
stopping_criterias = []
top_n_tokens = []
prefix_offsets = []
read_offsets = []
requests_idx_mapping = {}
# Parse batch
max_truncation = 0
padding_right_offset = 0
max_decode_tokens = 0
for i, r in enumerate(pb.requests):
requests_idx_mapping[r.id] = i
inputs.append(concat_text_chunks(r.input_chunks.chunks))
next_token_choosers.append(
NextTokenChooser.from_pb(r.parameters, device, tokenizer)
)
stopping_criteria = StoppingCriteria.from_pb(
r.stopping_parameters, tokenizer
)
stopping_criterias.append(stopping_criteria)
top_n_tokens.append(r.top_n_tokens)
max_truncation = max(max_truncation, r.truncate)
max_decode_tokens += stopping_criteria.max_new_tokens
padding_right_offset = max(
padding_right_offset, stopping_criteria.max_new_tokens
)
tokenized_inputs = tokenizer(
inputs,
return_tensors="pt",
padding=True,
return_token_type_ids=False,
truncation=True,
max_length=max_truncation,
).to(device)
for _ in pb.requests:
input_len = tokenized_inputs["input_ids"].shape[1]
prefix_offsets.append(input_len - 5)
read_offsets.append(input_len)
input_lengths = tokenized_inputs["attention_mask"].sum(1)
max_input_length = input_lengths.max()
input_ids = tokenized_inputs["input_ids"]
# Allocate maximum attention_mask
attention_mask = input_ids.new_zeros(
(pb.size, max_input_length + padding_right_offset)
)
# Copy tokenizer attention_mask into fully allocated attention_mask
attention_mask[:, :max_input_length] = tokenized_inputs["attention_mask"]
position_ids = tokenized_inputs["attention_mask"].long().cumsum(-1) - 1
position_ids.masked_fill_(tokenized_inputs["attention_mask"] == 0, 1)
all_input_ids = tokenized_inputs["input_ids"].T.split(1, dim=1)
top_n_tokens_tensor = torch.tensor(
top_n_tokens, device=device, dtype=torch.int64
)
max_tokens = len(inputs) * (max_input_length + max_decode_tokens)
return cls(
batch_id=pb.id,
requests=pb.requests,
requests_idx_mapping=requests_idx_mapping,
input_ids=input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=None,
all_input_ids=list(all_input_ids),
input_lengths=input_lengths.tolist(),
prefix_offsets=prefix_offsets,
read_offsets=read_offsets,
next_token_choosers=next_token_choosers,
stopping_criterias=stopping_criterias,
top_n_tokens=top_n_tokens,
top_n_tokens_tensor=top_n_tokens_tensor,
max_input_length=max_input_length.item(),
padding_right_offset=padding_right_offset,
max_tokens=max_tokens,
)
@tracer.start_as_current_span("filter")
def filter(self, request_ids: List[int]) -> Optional["CausalLMBatch"]:
if len(request_ids) == 0:
raise ValueError("Batch must have at least one request")
if len(request_ids) == len(self):
return self
keep_indices = []
# New values after filtering
requests_idx_mapping = {}
requests = []
input_lengths = []
prefix_offsets = []
read_offsets = []
all_input_ids = []
max_input_length = 0
next_token_choosers = []
stopping_criterias = []
top_n_tokens = []
total_remaining_decode_tokens = 0
new_padding_right_offset = 0
for i, request_id in enumerate(request_ids):
idx = self.requests_idx_mapping[request_id]
requests_idx_mapping[request_id] = i
keep_indices.append(idx)
requests.append(self.requests[idx])
prefix_offsets.append(self.prefix_offsets[idx])
read_offsets.append(self.read_offsets[idx])
all_input_ids.append(self.all_input_ids[idx])
request_input_length = self.input_lengths[idx]
input_lengths.append(request_input_length)
max_input_length = max(max_input_length, request_input_length)
next_token_choosers.append(self.next_token_choosers[idx])
stopping_criteria = self.stopping_criterias[idx]
stopping_criterias.append(stopping_criteria)
top_n_tokens.append(self.top_n_tokens[idx])
remaining_decode_tokens = (
stopping_criteria.max_new_tokens - stopping_criteria.current_tokens
)
total_remaining_decode_tokens += remaining_decode_tokens
new_padding_right_offset = max(
new_padding_right_offset, remaining_decode_tokens
)
# Apply indices to input_ids, attention mask, past key values and other items that need to be cached
input_ids = self.input_ids[keep_indices]
position_ids = self.position_ids[keep_indices]
self.attention_mask = self.attention_mask[
keep_indices,
-(self.padding_right_offset + max_input_length) : (
self.attention_mask.shape[1] - self.padding_right_offset
)
+ new_padding_right_offset,
]
# Ensure that past_key_values tensors can be updated in-place
if type(self.past_key_values[0]) is tuple:
self.past_key_values = [list(layer) for layer in self.past_key_values]
# Update tensors in-place to allow incremental garbage collection
past_kv_length = max_input_length - 1
for layer in self.past_key_values:
past_keys, past_values = layer
if len(past_keys.shape) == 3:
# Force past to be of dim [self_size, num_heads, ...] for easy indexing
past_keys = past_keys.view(len(self), -1, *past_keys.shape[-2:])
past_values = past_values.view(len(self), -1, *past_values.shape[-2:])
if self.keys_head_dim_last:
layer[0] = past_keys[keep_indices, :, -past_kv_length:, :]
else:
layer[0] = past_keys[keep_indices, :, :, -past_kv_length:]
del past_keys
layer[1] = past_values[keep_indices, :, -past_kv_length:, :]
del past_values
top_n_tokens_tensor = self.top_n_tokens_tensor[keep_indices]
max_tokens = len(request_ids) * max_input_length + total_remaining_decode_tokens
self.requests = requests
self.requests_idx_mapping = requests_idx_mapping
self.input_ids = input_ids
self.position_ids = position_ids
self.all_input_ids = all_input_ids
self.input_lengths = input_lengths
self.prefix_offsets = prefix_offsets
self.read_offsets = read_offsets
self.next_token_choosers = next_token_choosers
self.stopping_criterias = stopping_criterias
self.top_n_tokens = top_n_tokens
self.top_n_tokens_tensor = top_n_tokens_tensor
self.max_input_length = max_input_length
self.padding_right_offset = new_padding_right_offset
self.max_tokens = max_tokens
return self
@classmethod
@tracer.start_as_current_span("concatenate")
def concatenate(cls, batches: List["CausalLMBatch"]) -> "CausalLMBatch":
# Used for padding
total_batch_size = 0
max_input_length = 0
padding_right_offset = 0
for batch in batches:
total_batch_size += len(batch)
max_input_length = max(max_input_length, batch.max_input_length)
padding_right_offset = max(padding_right_offset, batch.padding_right_offset)
# Batch attributes
requests = []
requests_idx_mapping = {}
input_lengths = []
prefix_offsets = []
read_offsets = []
all_input_ids = []
next_token_choosers = []
stopping_criterias = []
top_n_tokens = []
max_tokens = 0
# Batch tensors
input_ids = None
attention_mask = None
position_ids = None
past_key_values = []
top_n_tokens_tensor = None
# Used for slicing correctly inside the tensors
# Equivalent to a cumsum on batch sizes
start_index = 0
for i, batch in enumerate(batches):
requests.extend(batch.requests)
input_lengths.extend(batch.input_lengths)
prefix_offsets.extend(batch.prefix_offsets)
read_offsets.extend(batch.read_offsets)
all_input_ids.extend(batch.all_input_ids)
next_token_choosers.extend(batch.next_token_choosers)
stopping_criterias.extend(batch.stopping_criterias)
top_n_tokens.extend(batch.top_n_tokens)
if i == 0:
requests_idx_mapping = batch.requests_idx_mapping
else:
# We need to offset the mapping for each batch by the cumulative batch size
for k, v in batch.requests_idx_mapping.items():
requests_idx_mapping[k] = v + start_index
# Slicing end index for this batch
end_index = start_index + len(batch)
# We only concatenate batches that did at least one step
if batch.past_key_values is None:
raise ValueError("only concatenate prefilled batches")
# Create empty tensor
# input_ids is always of shape [batch_size, 1]
# We do not need to pad it
if input_ids is None:
input_ids = batch.input_ids.new_empty((total_batch_size, 1))
# Copy to correct indices
input_ids[start_index:end_index] = batch.input_ids
# Create padded tensor
if attention_mask is None:
attention_mask = batch.attention_mask.new_zeros(
(total_batch_size, max_input_length + padding_right_offset),
)
if top_n_tokens_tensor is None:
top_n_tokens_tensor = batches[0].top_n_tokens_tensor.new_zeros(
total_batch_size,
)
top_n_tokens_tensor[start_index:end_index] = batch.top_n_tokens_tensor
# We need to slice the attention mask to remove padding from previous steps
# and to remove unused allocated space
left_offset = max_input_length - batch.max_input_length
batch_left_offset = (
batch.attention_mask.shape[1]
- batch.max_input_length
- batch.padding_right_offset
)
attention_mask[
start_index:end_index,
left_offset:-padding_right_offset,
] = batch.attention_mask[
:,
batch_left_offset : -batch.padding_right_offset,
]
# Create empty tensor
# position_ids is always of shape [batch_size, 1]
if position_ids is None:
position_ids = batch.position_ids.new_empty((total_batch_size, 1))
position_ids[start_index:end_index] = batch.position_ids
# Shenanigans to get dimensions because BLOOM outputs a past with a different shape
# BLOOM Keys: [batch_size * num_heads, head_dim, seq_length]
# BLOOM Values: [batch_size * num_heads, seq_length, head_dim]
# And ensure that we can update tensors in-place
if isinstance(batch.past_key_values[0], tuple):
batch.past_key_values = [
[t.view(len(batch), -1, *t.shape[-2:]) for t in layer]
for layer in batch.past_key_values
]
elif len(batch.past_key_values[0][0].shape) == 3:
for layer in batch.past_key_values:
for k, t in enumerate(layer):
layer[k] = t.view(len(batch), -1, *t.shape[-2:])
# Add eventual padding tokens that were added while concatenating
max_tokens += batch.max_tokens + (
max_input_length - batch.max_input_length
) * len(batch)
start_index = end_index
first_past_kvs = batches[0].past_key_values
_, num_heads, padded_sequence_length, head_dim = first_past_kvs[0][1].shape
padded_past_values_shape = (
total_batch_size,
num_heads,
max_input_length - 1,
head_dim,
)
if batches[0].keys_head_dim_last:
padded_past_keys_shape = padded_past_values_shape
else:
# seq_length is last for BLOOM
padded_past_keys_shape = (
total_batch_size,
num_heads,
head_dim,
max_input_length - 1,
)
# Iterate over attention layers
# Concatenate past key values layer by layer to allow incremental garbage collection
for j in range(len(first_past_kvs)):
padded_past_keys = first_past_kvs[j][0].new_zeros(padded_past_keys_shape)
start_index = 0
for batch in batches:
past_keys = batch.past_key_values[j][0]
# Clear reference to the original tensor
batch.past_key_values[j][0] = None
# Slicing end index for this batch
end_index = start_index + len(batch)
# We slice the keys to remove the padding from previous batches
past_seq_len = batch.max_input_length - 1
if batch.keys_head_dim_last:
padded_past_keys[start_index:end_index, :, -past_seq_len:, :] = (
past_keys[:, :, -past_seq_len:, :]
)
else:
# BLOOM case
padded_past_keys[start_index:end_index, :, :, -past_seq_len:] = (
past_keys[:, :, :, -past_seq_len:]
)
del past_keys
start_index = end_index
padded_past_values = first_past_kvs[j][1].new_zeros(
padded_past_values_shape
)
start_index = 0
for batch in batches:
past_values = batch.past_key_values[j][1]
# Clear reference to the original tensor
batch.past_key_values[j][1] = None
# Slicing end index for this batch
end_index = start_index + len(batch)
# We slice the past values to remove the padding from previous batches
past_seq_len = batch.max_input_length - 1
padded_past_values[start_index:end_index, :, -past_seq_len:, :] = (
past_values[:, :, -past_seq_len:, :]
)
del past_values
# Update values
start_index = end_index
past_key_values.append([padded_past_keys, padded_past_values])
return cls(
batch_id=batches[0].batch_id,
requests=requests,
requests_idx_mapping=requests_idx_mapping,
input_ids=input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
all_input_ids=all_input_ids,
input_lengths=input_lengths,
prefix_offsets=prefix_offsets,
read_offsets=read_offsets,
next_token_choosers=next_token_choosers,
stopping_criterias=stopping_criterias,
top_n_tokens=top_n_tokens,
top_n_tokens_tensor=top_n_tokens_tensor,
max_input_length=max_input_length,
padding_right_offset=padding_right_offset,
keys_head_dim_last=batches[0].keys_head_dim_last,
max_tokens=max_tokens,
)
def __len__(self):
return len(self.requests)
@dataclass
class CausalLMBatchKeysLast(CausalLMBatch):
keys_head_dim_last: bool = False
class CausalLM(Model):
def __init__(
self,
model_id: str,
model_class,
revision: Optional[str] = None,
quantize: Optional[str] = None,
speculator: Optional[str] = None,
dtype: Optional[torch.dtype] = None,
default_dtype=torch.float16,
trust_remote_code: bool = False,
tokenizer_class=AutoTokenizer,
config_class=AutoConfig,
batch_class=CausalLMBatch,
):
self.quantize = quantize
self.batch_class = batch_class
self.process_group, rank, world_size = initialize_torch_distributed()
if torch.cuda.is_available():
device = torch.device(f"cuda:{rank}")
dtype = default_dtype if dtype is None else dtype
elif hasattr(torch, "xpu") and torch.xpu.is_available():
device = torch.device(f"xpu:{rank}")
dtype = default_dtype if dtype is None else dtype
elif SYSTEM == "ipex":
device = torch.device("cpu")
# Float16 doesn't exist on target.
dtype = torch.bfloat16 if dtype is None else dtype
else:
device = torch.device("cpu")
dtype = torch.float32 if dtype is None else dtype
tokenizer = tokenizer_class.from_pretrained(
model_id,
revision=revision,
padding_side="left",
truncation_side="left",
trust_remote_code=trust_remote_code,
)
config = config_class.from_pretrained(
model_id,
revision=revision,
trust_remote_code=trust_remote_code,
)
config.quantize = quantize
config.speculator = speculator
if tokenizer.pad_token_id is None:
if config.pad_token_id is not None:
tokenizer.pad_token_id = config.pad_token_id
elif config.eos_token_id is not None:
tokenizer.pad_token_id = config.eos_token_id
elif tokenizer.eos_token_id is not None:
tokenizer.pad_token_id = tokenizer.eos_token_id
torch.distributed.barrier(group=self.process_group)
weights_loader = get_loader(
quantize=quantize, model_id=model_id, revision=revision
)
filenames = weight_files(model_id, revision=revision, extension=".safetensors")
weights = Weights(
filenames,
device=device,
dtype=dtype,
process_group=self.process_group,
weights_loader=weights_loader,
)
prefix = ""
model = model_class(prefix, config, weights)
torch.distributed.barrier(group=self.process_group)
super().__init__(
model_id=model_id,
model=model,
tokenizer=tokenizer,
requires_padding=True,
dtype=dtype,
device=device,
rank=rank,
world_size=world_size,
)
@classmethod
def fallback(
cls,
model_id: str,
revision: Optional[str] = None,
quantize: Optional[str] = None,
speculator: Optional[str] = None,
dtype: Optional[torch.dtype] = None,
trust_remote_code: bool = False,
):
if speculator:
raise RuntimeError("Speculator decoding is not enabled for AutoModel")
device_count = 0
if torch.cuda.is_available():
device = torch.device("cuda")
device_count = torch.cuda.device_count()
dtype = torch.float16 if dtype is None else dtype
elif hasattr(torch, "xpu") and torch.xpu.is_available():
device = torch.device("xpu")
device_count = torch.xpu.device_count()
dtype = torch.float16 if dtype is None else dtype
else:
if quantize:
raise ValueError("quantization is not available on CPU")
device = torch.device("cpu")
dtype = torch.float32 if dtype is None else dtype
tokenizer = AutoTokenizer.from_pretrained(
model_id,
revision=revision,
padding_side="left",
truncation_side="left",
trust_remote_code=trust_remote_code,
)
model = AutoModelForCausalLM.from_pretrained(
model_id,
revision=revision,
torch_dtype=dtype,
device_map=("auto" if device_count > 1 else None),
load_in_8bit=quantize == "bitsandbytes",
trust_remote_code=trust_remote_code,
)
if device_count == 1 and quantize != "bitsandbytes":
model = model.to(device)
if tokenizer.pad_token_id is None:
if model.config.pad_token_id is not None:
tokenizer.pad_token_id = model.config.pad_token_id
elif model.config.eos_token_id is not None and isinstance(
model.config.eos_token_id, int
):
tokenizer.pad_token_id = model.config.eos_token_id
elif tokenizer.eos_token_id is not None:
tokenizer.pad_token_id = tokenizer.eos_token_id
else:
tokenizer.add_special_tokens({"pad_token": "[PAD]"})
self = cls.__new__(
cls,
)
self.batch_class = CausalLMBatch
super().__init__(
self,
model_id=model_id,
model=model,
tokenizer=tokenizer,
requires_padding=True,
dtype=dtype,
device=device,
)
self.quantize = quantize
return self
@property
def batch_type(self) -> Type[CausalLMBatch]:
return self.batch_class
def forward(
self, input_ids, attention_mask, position_ids, past_key_values: Optional = None
) -> Tuple[
torch.Tensor, Optional[torch.Tensor], List[Tuple[torch.Tensor, torch.Tensor]]
]:
# Model Forward
kwargs = {
"input_ids": input_ids,
"attention_mask": attention_mask,
"past_key_values": past_key_values,
"use_cache": True,
"return_dict": True,
}
if self.has_position_ids:
kwargs["position_ids"] = position_ids
outputs = self.model.forward(**kwargs)
if isinstance(outputs, tuple):
outputs, speculative_logits = outputs
else:
speculative_logits = None
return outputs.logits, speculative_logits, outputs.past_key_values
@tracer.start_as_current_span("generate_token")
def generate_token(
self, batch: CausalLMBatch
) -> Tuple[List[Generation], Optional[CausalLMBatch], Tuple[int, int]]:
start = time.time_ns()
# slice the attention mask to the correct shape
attention_mask = batch.attention_mask[:, : -batch.padding_right_offset]
logits, speculative_logits, past = self.forward(
batch.input_ids,
attention_mask,
batch.position_ids,
batch.past_key_values,
)
# Results
generations: List[Generation] = []
stopped = True
# Speculation is not active for causal
accepted_ids = torch.ones_like(batch.input_ids)[:, 0]
batch_top_token_ids, batch_top_token_logprobs = batch_top_tokens(
batch.top_n_tokens,
batch.top_n_tokens_tensor,
torch.log_softmax(logits[:, -1], -1),
accepted_ids,
)
start_decode = time.time_ns()
# Zipped iterator
iterator = zip(
batch.requests,
batch.input_lengths,
batch.prefix_offsets,
batch.read_offsets,
logits,
batch.next_token_choosers,
batch.stopping_criterias,
batch.all_input_ids,
batch.top_n_tokens,
batch_top_token_ids,
batch_top_token_logprobs,
)
# For each member of the batch
for i, (
request,
input_length,
prefix_offset,
read_offset,
logits,
next_token_chooser,
stopping_criteria,
all_input_ids,
top_n_tokens,
top_token_ids,
top_token_logprobs,
) in enumerate(iterator):
# Select next token
next_token_id, logprobs = next_token_chooser(
all_input_ids.view(1, -1), logits[-1:, :]
)
# Append next token to all tokens
all_input_ids = torch.cat([all_input_ids, next_token_id])
new_input_length = input_length + 1
# Generated token
next_token_logprob = logprobs[-1, next_token_id]
next_token_id_squeezed = next_token_id.squeeze()
next_token_text, prefix_offset, read_offset = self.decode_token(
all_input_ids[:, 0], prefix_offset, read_offset
)
# Evaluate stopping criteria
stop, reason = stopping_criteria(
next_token_id_squeezed,
next_token_text,
)
if not stop:
stopped = False
# Shard generations
# All generations will be appended in the rust sharded client
if i % self.world_size == self.rank:
if stop:
# Decode generated tokens
output_text, _, _ = self.decode_token(
all_input_ids[:, 0],
prefix_offset=len(all_input_ids)
- stopping_criteria.current_tokens
- 1,
read_offset=len(all_input_ids)
- stopping_criteria.current_tokens,
skip_special_tokens=True,
)
# Get seed
if isinstance(next_token_chooser.choice, Sampling):
seed = next_token_chooser.choice.seed
else:
seed = None
generated_text = GeneratedText(
output_text, stopping_criteria.current_tokens, reason, seed
)
else:
generated_text = None
# Prefill
if stopping_criteria.current_tokens == 1 and request.prefill_logprobs:
# Remove generated token to only have prefill and add nan for first prompt token
prefill_logprobs = [float("nan")] + torch.log_softmax(
logits, -1
).gather(1, all_input_ids[1:]).squeeze(1)[
-new_input_length:-1
].tolist()
prefill_token_ids = all_input_ids[-new_input_length:-1]
prefill_texts = self.tokenizer.batch_decode(
prefill_token_ids,
clean_up_tokenization_spaces=False,
skip_special_tokens=False,
)
prefill_tokens = Tokens(
prefill_token_ids,
prefill_logprobs,
prefill_texts,
is_special=[],
)
else:
prefill_tokens = None
if top_n_tokens > 0:
all_top_tokens = []
for top_token_ids, top_token_logprobs in zip(
top_token_ids, top_token_logprobs
):
toptoken_texts = self.tokenizer.batch_decode(
top_token_ids,
clean_up_tokenization_spaces=False,
skip_special_tokens=False,
)
special_toptokens = [
token_id in self.all_special_ids
for token_id in top_token_ids
]
top_tokens = Tokens(
top_token_ids,
top_token_logprobs,
toptoken_texts,
special_toptokens,
)
all_top_tokens.append(top_tokens)
top_tokens = all_top_tokens
else:
top_tokens = None
generation = Generation(
request.id,
prefill_tokens,
Tokens(
[next_token_id_squeezed],
[next_token_logprob],
[next_token_text],
[next_token_id_squeezed.item() in self.all_special_ids],
),
generated_text,
top_tokens,
)
generations.append(generation)
# Update values
batch.next_token_choosers[i] = batch.next_token_choosers[i].advance_grammar(
next_token_id_squeezed.item()
)
batch.input_ids[i, 0] = next_token_id
batch.all_input_ids[i] = all_input_ids
batch.input_lengths[i] = new_input_length
batch.prefix_offsets[i] = prefix_offset
batch.read_offsets[i] = read_offset
batch.max_input_length = max(batch.max_input_length, new_input_length)
# We finished all generations in the batch; there is no next batch
if stopped:
forward_ns = start_decode - start
decode_ns = time.time_ns() - start_decode
return generations, None, (forward_ns, decode_ns)
# Slice unused values from prefill
batch.input_ids = batch.input_ids[:, :1]
# Update attention_mask as we added a new token to input_ids
batch.attention_mask[:, -batch.padding_right_offset] = 1
# Decrease right offset
batch.padding_right_offset -= 1
# Update position_ids
batch.position_ids = batch.position_ids[:, -1:] + 1
# Update past key values
batch.past_key_values = past
forward_ns = start_decode - start
decode_ns = time.time_ns() - start_decode
return generations, batch, (forward_ns, decode_ns)
| text-generation-inference/server/text_generation_server/models/causal_lm.py/0 | {
"file_path": "text-generation-inference/server/text_generation_server/models/causal_lm.py",
"repo_id": "text-generation-inference",
"token_count": 16985
} | 299 |
# coding=utf-8
# Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved.
#
# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
# and OPT implementations in this library. It has been modified from its
# original forms to accommodate minor architectural differences compared
# to GPT-NeoX and OPT used by the Meta AI team that trained the model.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" PyTorch Idefics model."""
from typing import List, Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from transformers import PreTrainedModel
from transformers.activations import ACT2FN
from transformers.modeling_outputs import (
BaseModelOutputWithPast,
CausalLMOutputWithPast,
dataclass,
)
from text_generation_server.models.custom_modeling.idefics_config import IdeficsConfig
from text_generation_server.models.custom_modeling.idefics_vision import (
IdeficsVisionTransformer,
)
from text_generation_server.models.custom_modeling.idefics_perceiver import (
IdeficsPerceiverResampler,
)
from text_generation_server.layers import (
TensorParallelColumnLinear,
TensorParallelEmbedding,
TensorParallelRowLinear,
SpeculativeHead,
FastLinear,
)
from text_generation_server.layers.rotary import PositionRotaryEmbedding
from text_generation_server.utils.import_utils import SYSTEM
from loguru import logger
if SYSTEM == "cuda":
import dropout_layer_norm
elif SYSTEM == "rocm":
import vllm._custom_ops as ops
else:
dropout_layer_norm = None
@dataclass
class BaseModelOutputWithPastImage(BaseModelOutputWithPast):
image_hidden_states: Optional[torch.FloatTensor] = None
@dataclass
class CausalLMOutputWithPastImage(CausalLMOutputWithPast):
image_hidden_states: Optional[torch.FloatTensor] = None
# logger = logging.get_logger(__name__)
# _CONFIG_FOR_DOC = "IdeficsConfig"
# IDEFICS_PRETRAINED_MODEL_ARCHIVE_LIST = [
# "HuggingFaceM4/idefics-9b",
# "HuggingFaceM4/idefics-80b",
# # See all Idefics models at https://huggingface.co/models?filter=idefics
# ]
def expand_inputs_for_generation(
input_ids,
expand_size=1,
is_encoder_decoder=False,
attention_mask=None,
encoder_outputs=None,
**model_kwargs,
):
expanded_return_idx = (
torch.arange(input_ids.shape[0])
.view(-1, 1)
.repeat(1, expand_size)
.view(-1)
.to(input_ids.device)
)
input_ids = input_ids.index_select(0, expanded_return_idx)
if "token_type_ids" in model_kwargs:
token_type_ids = model_kwargs["token_type_ids"]
model_kwargs["token_type_ids"] = token_type_ids.index_select(
0, expanded_return_idx
)
if attention_mask is not None:
model_kwargs["attention_mask"] = attention_mask.index_select(
0, expanded_return_idx
)
model_kwargs["image_attention_mask"] = model_kwargs[
"image_attention_mask"
].index_select(0, expanded_return_idx)
model_kwargs["pixel_values"] = model_kwargs["pixel_values"].index_select(
0, expanded_return_idx
)
if is_encoder_decoder:
if encoder_outputs is None:
raise ValueError(
"If `is_encoder_decoder` is True, make sure that `encoder_outputs` is defined."
)
encoder_outputs["last_hidden_state"] = (
encoder_outputs.last_hidden_state.index_select(
0, expanded_return_idx.to(encoder_outputs.last_hidden_state.device)
)
)
model_kwargs["encoder_outputs"] = encoder_outputs
return input_ids, model_kwargs
def update_model_kwargs_for_generation(outputs, model_kwargs, is_encoder_decoder=False):
# must have this key set to at least None
model_kwargs["past_key_values"] = model_kwargs.get("past_key_values", None)
# update past
if "past_key_values" in outputs:
model_kwargs["past"] = outputs.past_key_values
elif "mems" in outputs:
model_kwargs["past"] = outputs.mems
elif "past_buckets_states" in outputs:
model_kwargs["past"] = outputs.past_buckets_states
else:
model_kwargs["past"] = None
# update token_type_ids with last value
if "token_type_ids" in model_kwargs:
token_type_ids = model_kwargs["token_type_ids"]
model_kwargs["token_type_ids"] = torch.cat(
[token_type_ids, token_type_ids[:, -1].unsqueeze(-1)], dim=-1
)
# update attention masks
if not is_encoder_decoder:
if "attention_mask" in model_kwargs:
attention_mask = model_kwargs["attention_mask"]
model_kwargs["attention_mask"] = torch.cat(
[attention_mask, attention_mask.new_ones((attention_mask.shape[0], 1))],
dim=-1,
)
if "image_attention_mask" in model_kwargs:
image_attention_mask = model_kwargs["image_attention_mask"]
last_mask = image_attention_mask[:, -1, :].unsqueeze(1)
model_kwargs["image_attention_mask"] = last_mask
return model_kwargs
def prepare_inputs_for_generation(input_ids, past=None, **kwargs):
token_type_ids = kwargs.get("token_type_ids", None)
# only last token for inputs_ids if past is defined in kwargs
if past:
input_ids = input_ids[:, -1].unsqueeze(-1)
if token_type_ids is not None:
token_type_ids = token_type_ids[:, -1].unsqueeze(-1)
attention_mask = kwargs.get("attention_mask", None)
position_ids = kwargs.get("position_ids", None)
if attention_mask is not None and position_ids is None:
# create position_ids on the fly for batch generation
position_ids = attention_mask.long().cumsum(-1) - 1
position_ids.masked_fill_(attention_mask == 0, 1)
if past:
position_ids = position_ids[:, -1].unsqueeze(-1)
pixel_values = kwargs.get("pixel_values", None)
image_attention_mask = kwargs.get("image_attention_mask", None)
# if pixel_values is None or image_attention_mask is None:
# raise ValueError("pixel values and image attention mask cannot be None")
return {
"input_ids": input_ids,
"past_key_values": past,
"use_cache": kwargs.get("use_cache"),
"position_ids": position_ids,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
"pixel_values": pixel_values,
"image_attention_mask": image_attention_mask,
}
def freeze_model(model, module_exceptions=[]):
mapping = {
"LayerNorm": nn.LayerNorm,
"Linear": nn.Linear,
"Embedding": nn.Embedding,
}
module_exceptions_mapped = [mapping[m] for m in module_exceptions]
for module in model.modules():
if module_exceptions and any(
[isinstance(module, t) for t in module_exceptions_mapped]
):
module.requires_grad_(
True
) # Explicitely setting it to true to avoid any mistakes
else:
module.requires_grad_(False)
return model
class IdeficsDecoupledPartialTPEmbedding(nn.Module):
def __init__(
self,
config,
weights,
):
super().__init__()
self.num_embeddings = config.vocab_size
self.weight = TensorParallelEmbedding(
prefix="model.embed_tokens", weights=weights
)
self.additional_weight = nn.Parameter(
weights.get_tensor("model.embed_tokens.additional_embedding.weight")
)
def forward(self, input_ids):
# Clone so that we don't modify the original input_ids later on
input_ids = input_ids.clone()
additional_vocab_indices = torch.where(input_ids >= self.num_embeddings)
input_ids_additional_vocab = input_ids[additional_vocab_indices]
additional_embeddings = torch.nn.functional.embedding(
input_ids_additional_vocab - self.num_embeddings, self.additional_weight
)
# for successful lookup replace input_ids with 0, the results of these will be discarded anyway
input_ids[additional_vocab_indices] = 0
full_vector = self.weight(input_ids)
# overwrite the records with high indices
full_vector[additional_vocab_indices] = additional_embeddings
return full_vector
class IdeficsDecoupledTensorParallelLinear(nn.Module):
# Derived from https://pytorch.org/docs/stable/_modules/torch/nn/modules/linear.html#Linear
"""
Implements a decoupling of parameters to allow freezing (or not) a subset of the parameters. In practise, the
regular `weight` can be trained or frozen (i.e. `partially_freeze=True`), and if `out_additional_features` > 0,
then it will create `out_additional_features * in_features` additional parameters that are always trained. If
`out_additional_features=0`, then the module defaults back to the regular behavior of `nn.Linear`.
"""
def __init__(
self,
config,
weights,
) -> None:
super().__init__()
self.fc = SpeculativeHead.load(config=config, prefix="lm_head", weights=weights)
self.additional_fc = FastLinear.load(
config=config,
prefix="lm_head.additional_fc",
weights=weights,
bias=False,
)
def forward(self, input: torch.Tensor) -> torch.Tensor:
output, speculative_logits = self.fc(input)
additional_features = self.additional_fc(input)
output = torch.cat((output, additional_features), -1)
return output, speculative_logits
def extra_repr(self) -> str:
"""Overwriting `nn.Linear.extra_repr` to include new parameters."""
return "in_features={}, out_features={}, out_additional_features={}, bias={}, partially_freeze={}".format(
self.in_features,
self.out_features,
self.out_additional_features,
self.bias is not None,
self.partially_freeze,
)
# Copied from transformers.models.bart.modeling_bart._make_causal_mask
def _make_causal_mask(
input_ids_shape: torch.Size,
dtype: torch.dtype,
device: torch.device,
past_key_values_length: int = 0,
):
"""
Make causal mask used for bi-directional self-attention.
"""
bsz, tgt_len = input_ids_shape
mask = torch.full((tgt_len, tgt_len), torch.finfo(dtype).min, device=device)
mask_cond = torch.arange(mask.size(-1), device=device)
mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0)
mask = mask.to(dtype)
if past_key_values_length > 0:
mask = torch.cat(
[
torch.zeros(
tgt_len, past_key_values_length, dtype=dtype, device=device
),
mask,
],
dim=-1,
)
return mask[None, None, :, :].expand(
bsz, 1, tgt_len, tgt_len + past_key_values_length
)
def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None):
"""
Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
"""
bsz, src_len = mask.size()
tgt_len = tgt_len if tgt_len is not None else src_len
expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype)
inverted_mask = 1.0 - expanded_mask
return inverted_mask.masked_fill(
inverted_mask.to(torch.bool), torch.finfo(dtype).min
)
class IdeficsRMSNorm(nn.Module):
def __init__(self, prefix, weights, eps=1e-6):
"""
LlamaRMSNorm is equivalent to T5LayerNorm
"""
super().__init__()
weight = weights.get_tensor(f"{prefix}.weight")
self.weight = nn.Parameter(weight)
self.variance_epsilon = eps
def forward(self, hidden_states, residual=None):
if SYSTEM == "ipex":
import intel_extension_for_pytorch as ipex
out = ipex.llm.functional.add_rms_norm(
residual,
hidden_states,
self.weight,
None,
self.variance_epsilon,
residual is not None,
)
return out
elif hidden_states.shape[-1] > 8192:
if residual is not None:
hidden_states += residual
residual = hidden_states
hidden_states = hidden_states.to(torch.float32)
variance = hidden_states.pow(2).mean(-1, keepdim=True)
hidden_states = hidden_states * torch.rsqrt(
variance + self.variance_epsilon
)
# convert into half-precision if necessary
if self.weight.dtype in [torch.float16, torch.bfloat16]:
hidden_states = hidden_states.to(self.weight.dtype)
return self.weight * hidden_states
elif SYSTEM == "cuda":
# faster post attention rms norm
unwrap = False
if len(hidden_states.shape) > 2:
unwrap = True
shape = hidden_states.shape
hidden_states = hidden_states.reshape(-1, shape[-1])
normed_hidden_states, res, *rest = dropout_layer_norm.dropout_add_ln_fwd(
hidden_states,
residual,
self.weight,
None,
None,
None,
None,
None,
0.0,
self.variance_epsilon,
1.0,
0,
None,
False,
True, # Activate RMSNorm
)
if res is None:
res = hidden_states
if unwrap:
normed_hidden_states = normed_hidden_states.view(*shape)
return normed_hidden_states
elif SYSTEM == "rocm":
# We use VLLM RMSNorm kernel that can be compiled for RoCm, instead of Flash Attention ones that can not.
if residual is not None:
hidden_states += residual
residual = hidden_states
unwrap = False
if len(hidden_states.shape) > 2:
unwrap = True
shape = hidden_states.shape
hidden_states = hidden_states.reshape(-1, shape[-1])
out = torch.empty_like(hidden_states)
ops.rms_norm(
out,
hidden_states,
self.weight.data,
self.variance_epsilon,
)
if unwrap:
out = out.view(*shape)
return out
else:
raise ValueError(
"Your system seem to be not supported. Please check your install or open an issue at https://github.com/huggingface/text-generation-inference/issues with a clear reproduction."
)
# this was adapted from LlamaMLP
class IdeficsMLP(nn.Module):
def __init__(
self,
config,
prefix,
weights,
):
super().__init__()
self.gate_up_proj = TensorParallelColumnLinear.load_multi(
config,
prefixes=[f"{prefix}.gate_proj", f"{prefix}.up_proj"],
weights=weights,
dim=0,
bias=False,
)
self.down_proj = TensorParallelRowLinear.load(
config,
prefix=f"{prefix}.down_proj",
weights=weights,
bias=False,
)
self.act_fn = ACT2FN[config.hidden_act]
def forward(self, hidden_states):
gate_up_states = self.gate_up_proj(hidden_states)
shape = gate_up_states.shape
gate_up_states = gate_up_states.view(*shape[:-1], 2, shape[-1] // 2)
return self.down_proj(
self.act_fn(gate_up_states[:, :, 0]) * gate_up_states[:, :, 1]
)
# this was adapted from LlamaAttention
class IdeficsAttention(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(
self,
config,
prefix,
weights,
qk_layer_norms: bool = False,
is_cross_attention: bool = False,
):
super().__init__()
self.hidden_size = config.hidden_size
self.num_heads = config.num_attention_heads
self.head_dim = self.hidden_size // self.num_heads
self.dropout = config.dropout
if (self.head_dim * self.num_heads) != self.hidden_size:
raise ValueError(
f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}"
f" and `num_heads`: {self.num_heads})."
)
self.is_cross_attention = is_cross_attention
# if not hasattr(nn.functional, "scaled_dot_product_attention"):
# raise ValueError("this model requires pytorch 2.0 or higher")
if self.num_heads % weights.process_group.size() != 0:
raise ValueError(
f"`num_heads` must be divisible by `num_shards` (got `num_heads`: {self.num_heads} "
f"and `num_shards`: {weights.process_group.size()}"
)
self.num_heads //= weights.process_group.size()
if self.is_cross_attention:
# kv_input_dim = (
# self.hidden_size if not hasattr(config.vision_config, "embed_dim") else config.vision_config.embed_dim
# )
self.q_proj = TensorParallelColumnLinear.load(
config, prefix=f"{prefix}.q_proj", weights=weights, bias=False
)
self.k_proj = TensorParallelColumnLinear.load(
config, prefix=f"{prefix}.k_proj", weights=weights, bias=False
)
self.v_proj = TensorParallelColumnLinear.load(
config, prefix=f"{prefix}.v_proj", weights=weights, bias=False
)
else:
self.qkv = TensorParallelColumnLinear.load_multi(
config,
prefixes=[f"{prefix}.q_proj", f"{prefix}.k_proj", f"{prefix}.v_proj"],
dim=0,
weights=weights,
bias=False,
)
self.o_proj = TensorParallelRowLinear.load(
config, prefix=f"{prefix}.o_proj", weights=weights, bias=False
)
self.rotary_emb = PositionRotaryEmbedding.static(
config=config, dim=self.head_dim, base=10000.0, device=weights.device
)
self.qk_layer_norms = qk_layer_norms
if self.qk_layer_norms:
self.q_layer_norm = IdeficsRMSNorm(
prefix=f"{prefix}.q_layer_norm",
weights=weights,
eps=config.rms_norm_eps,
)
self.k_layer_norm = IdeficsRMSNorm(
prefix=f"{prefix}.q_layer_norm",
weights=weights,
eps=config.rms_norm_eps,
)
def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
return (
tensor.view(bsz, seq_len, self.num_heads, self.head_dim)
.transpose(1, 2)
.contiguous()
)
def forward(
self,
hidden_states: torch.Tensor,
key_value_states: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_value: Optional[Tuple[torch.Tensor]] = None,
output_attentions: bool = False,
use_cache: bool = False,
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
# if key_value_states are provided this layer is used as a cross-attention layer
is_cross_attention = self.is_cross_attention or key_value_states is not None
bsz, q_len, _ = hidden_states.size()
if is_cross_attention:
query_states = self.q_proj(hidden_states).view(
bsz, q_len, self.num_heads, self.head_dim
) # .transpose(1, 2)
query_states = query_states.transpose(1, 2)
(
_,
kv_len,
_,
) = (
key_value_states.size()
) # Note that, in this case, `kv_len` == `kv_seq_len`
key_states = (
self.k_proj(key_value_states)
.view(bsz, kv_len, self.num_heads, self.head_dim)
.transpose(1, 2)
)
value_states = (
self.v_proj(key_value_states)
.view(bsz, kv_len, self.num_heads, self.head_dim)
.transpose(1, 2)
)
else:
qkv = self.qkv(hidden_states)
query_states, key_states, value_states = qkv.split(
self.num_heads * self.head_dim, dim=2
)
query_states = query_states.view(
bsz, q_len, self.num_heads, self.head_dim
) # .transpose(1, 2)
key_states = key_states.view(
bsz, q_len, self.num_heads, self.head_dim
) # . transpose(1, 2)
value_states = value_states.view(
bsz, q_len, self.num_heads, self.head_dim
) # .transpose(1, 2)
kv_seq_len = q_len
if past_key_value is not None:
kv_seq_len += past_key_value[0].shape[-2]
max_s = max(kv_seq_len, q_len)
cos, sin = self.rotary_emb.get_cos_sin(
position_ids.view(-1), max_s, hidden_states.dtype
)
query_shape = query_states.shape
key_shape = key_states.shape
self.rotary_emb(
query_states.view(-1, *query_shape[2:]),
key_states.reshape(-1, *key_shape[2:]),
cos,
sin,
)
query_states = query_states.view(query_shape)
key_states = key_states.view(key_shape)
query_states = query_states.transpose(1, 2)
key_states = key_states.transpose(1, 2)
value_states = value_states.transpose(1, 2)
kv_seq_len = key_states.shape[-2]
if past_key_value is not None:
kv_seq_len += past_key_value[0].shape[-2]
# [bsz, nh, t, hd]
if past_key_value is not None:
# reuse k, v, self_attention
key_states = torch.cat([past_key_value[0], key_states], dim=2)
value_states = torch.cat([past_key_value[1], value_states], dim=2)
past_key_value = (key_states, value_states) if use_cache else None
if self.qk_layer_norms:
query_states = self.q_layer_norm(query_states)
key_states = self.k_layer_norm(key_states)
if attention_mask is not None:
if attention_mask.size() != (bsz, 1, q_len, kv_seq_len):
raise ValueError(
f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}"
)
attn_output = nn.functional.scaled_dot_product_attention(
query_states,
key_states,
value_states,
attn_mask=attention_mask,
dropout_p=self.dropout,
)
if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim):
raise ValueError(
f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is"
f" {attn_output.size()}"
)
attn_output = attn_output.transpose(1, 2)
attn_output = attn_output.reshape(bsz, q_len, -1)
attn_output = self.o_proj(attn_output)
attn_weights = None
if output_attentions:
logger.warning_once(
"attn_weights are not extracted in scaled_dot_product_attention. The model returns None instead"
)
return attn_output, attn_weights, past_key_value
# this was adapted from LlamaDecoderLayer
class IdeficsDecoderLayer(nn.Module):
def __init__(self, layer_id: int, config: IdeficsConfig, weights):
super().__init__()
self.process_group = weights.process_group
self.hidden_size = config.hidden_size
prefix = f"model.layers.{layer_id}"
self.self_attn = IdeficsAttention(
config=config,
prefix=f"{prefix}.self_attn",
weights=weights,
qk_layer_norms=False,
is_cross_attention=False,
)
self.mlp = IdeficsMLP(
config=config,
prefix=f"{prefix}.mlp",
weights=weights,
)
self.input_layernorm = IdeficsRMSNorm(
prefix=f"{prefix}.input_layernorm", weights=weights, eps=config.rms_norm_eps
)
self.post_attention_layernorm = IdeficsRMSNorm(
prefix=f"{prefix}.post_attention_layernorm",
weights=weights,
eps=config.rms_norm_eps,
)
self.dropout = config.dropout
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_value: Optional[Tuple[torch.Tensor]] = None,
output_attentions: Optional[bool] = False,
use_cache: Optional[bool] = False,
) -> Tuple[
torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]
]:
"""
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
use_cache (`bool`, *optional*):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
(see `past_key_values`).
past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
"""
residual = hidden_states
hidden_states = self.input_layernorm(hidden_states)
# Self Attention
hidden_states, self_attn_weights, present_key_value = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_value=past_key_value,
output_attentions=output_attentions,
use_cache=use_cache,
)
# hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
# Fully Connected
residual = hidden_states
hidden_states = self.post_attention_layernorm(hidden_states)
hidden_states = self.mlp(hidden_states)
# hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
outputs = (hidden_states,)
if output_attentions:
outputs += (self_attn_weights,)
if use_cache:
outputs += (present_key_value,)
return outputs
class IdeficsGatedCrossAttentionLayer(nn.Module):
def __init__(self, layer_id, config: IdeficsConfig, weights):
super().__init__()
self.process_group = weights.process_group
self.hidden_size = config.hidden_size
prefix = f"model.gated_cross_attn_layers.{layer_id}"
self.cross_attn = IdeficsAttention(
config=config,
prefix=f"{prefix}.cross_attn",
weights=weights,
qk_layer_norms=True,
is_cross_attention=True,
)
self.mlp = IdeficsMLP(
config=config,
prefix=f"{prefix}.mlp",
weights=weights,
)
self.input_layernorm = IdeficsRMSNorm(
prefix=f"{prefix}.input_layernorm", weights=weights, eps=config.rms_norm_eps
)
self.post_attention_layernorm = IdeficsRMSNorm(
prefix=f"{prefix}.post_attention_layernorm",
weights=weights,
eps=config.rms_norm_eps,
)
self.config = config.dropout
self.act_cross_attn = nn.Tanh()
self.act_dense = nn.Tanh()
self.alpha_cross_attn = nn.Parameter(
weights.get_tensor(f"{prefix}.alpha_cross_attn")
)
self.alpha_dense = nn.Parameter(weights.get_tensor(f"{prefix}.alpha_dense"))
if not (hasattr(self, "alpha_cross_attn") and hasattr(self, "alpha_dense")):
raise ValueError("Alpha parameters not initialized correctly!")
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
image_hidden_states: Optional[torch.Tensor] = None,
image_attention_mask: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = False,
use_cache: Optional[bool] = False,
past_key_value: Optional[Tuple[torch.Tensor]] = None,
no_images: Optional[bool] = False,
) -> Tuple[
torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]
]:
"""
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
use_cache (`bool`, *optional*):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
(see `past_key_values`).
past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
no_images (`bool`, *optional*, defaults to `False`): If `True` the vision part is ignored
"""
if image_hidden_states is None:
raise ValueError(
"`image_hidden_states` is required for Idefics cross attention module which are visual features to be"
" conditioned on."
)
if past_key_value is not None:
raise NotImplementedError(
"Past key value states are not implemented for Idefics cross attention module."
)
residual = hidden_states
hidden_states = self.input_layernorm(hidden_states)
# Self Attention
hidden_states, self_attn_weights, present_key_value = self.cross_attn(
hidden_states=hidden_states,
key_value_states=image_hidden_states,
attention_mask=image_attention_mask,
output_attentions=output_attentions,
)
# hidden_states = nn.functional.dropout(hidden_states, p=self.config, training=self.training)
# when there are no images the model is used in pure language mode
gate = 0 if no_images else 1
hidden_states = (
residual + gate * self.act_cross_attn(self.alpha_cross_attn) * hidden_states
)
# Fully Connected
residual = hidden_states
hidden_states = self.post_attention_layernorm(hidden_states)
hidden_states = self.mlp(hidden_states)
# hidden_states = nn.functional.dropout(hidden_states, p=self.config, training=self.training)
hidden_states = residual + self.act_dense(self.alpha_dense) * hidden_states
outputs = (hidden_states,)
if output_attentions:
outputs += (self_attn_weights,)
if use_cache:
outputs += (present_key_value,)
return outputs
LLAMA_START_DOCSTRING = r"""
This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
etc.)
This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
and behavior.
Parameters:
config ([`IdeficsConfig`]):
Model configuration class with all the parameters of the model. Initializing with a config file does not
load the weights associated with the model, only the configuration. Check out the
[`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
# @add_start_docstrings(
# "The bare LLaMA Model outputting raw hidden-states without any specific head on top.",
# LLAMA_START_DOCSTRING,
# )
class IdeficsPreTrainedModel(PreTrainedModel):
config_class = IdeficsConfig
# base_model_prefix = "model"
# supports_gradient_checkpointing = True
# _no_split_modules = ["IdeficsDecoderLayer", "IdeficsGatedCrossAttentionLayer"]
# def _init_weights(self, module):
# # important: this ported version of Idefics isn't meant for training from scratch - only
# # inference and fine-tuning - so the proper init weights code has been removed - the m4 code
# # base should be used for training from scratch and it contains the correct code.
# std = self.config.initializer_range
# if isinstance(module, nn.Linear):
# module.weight.data.normal_(mean=0.0, std=std)
# if module.bias is not None:
# module.bias.data.zero_()
# elif isinstance(module, nn.Embedding):
# module.weight.data.normal_(mean=0.0, std=std)
# if module.padding_idx is not None:
# module.weight.data[module.padding_idx].zero_()
# def _set_gradient_checkpointing(self, module, value=False):
# if isinstance(module, IdeficsModel):
# module.gradient_checkpointing = value
# LLAMA_INPUTS_DOCSTRING = r"""
# Args:
# input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
# Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
# it.
# Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
# [`PreTrainedTokenizer.__call__`] for details.
# [What are input IDs?](../glossary#input-ids)
# attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
# Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
# - 1 for tokens that are **not masked**,
# - 0 for tokens that are **masked**.
# [What are attention masks?](../glossary#attention-mask)
# Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
# [`PreTrainedTokenizer.__call__`] for details.
# If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
# `past_key_values`).
# If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
# and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
# information on the default strategy.
# - 1 indicates the head is **not masked**,
# - 0 indicates the head is **masked**.
# position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
# Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
# config.n_positions - 1]`. [What are position IDs?](../glossary#position-ids)
# past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
# Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
# `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape
# `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
# Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
# blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
# If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
# don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
# `decoder_input_ids` of shape `(batch_size, sequence_length)`.
# inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
# Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
# is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
# model's internal embedding lookup matrix.
# use_cache (`bool`, *optional*):
# If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
# `past_key_values`).
# output_attentions (`bool`, *optional*):
# Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
# tensors for more detail.
# output_hidden_states (`bool`, *optional*):
# Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
# more detail.
# return_dict (`bool`, *optional*):
# Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
# """
# @add_start_docstrings(
# "The bare LLaMA Model outputting raw hidden-states without any specific head on top.",
# LLAMA_START_DOCSTRING,
# )
class IdeficsModel(IdeficsPreTrainedModel):
# """
# Transformer decoder consisting of `config.num_hidden_layers` layers. Each layer is a [`IdeficsDecoderLayer`]
# Args:
# config: IdeficsConfig
# """
def __init__(self, config: IdeficsConfig, weights):
super().__init__(config)
self.config = config
self.padding_idx = config.pad_token_id
self.vocab_size = config.vocab_size
self.embed_tokens = IdeficsDecoupledPartialTPEmbedding(
config=config,
weights=weights,
)
self.image_size = config.vision_config.image_size
self.vision_config = config.vision_config
self.vision_model = IdeficsVisionTransformer(
prefix="model.vision_model",
config=config.vision_config,
weights=weights,
)
# Perceiver Resampler
if config.use_resampler:
perceiver_config = config.perceiver_config
self.perceiver_resampler = IdeficsPerceiverResampler(
prefix="model.perceiver_resampler",
config=config,
embed_dim=config.vision_config.embed_dim,
depth=perceiver_config.resampler_depth,
n_heads=perceiver_config.resampler_n_heads,
head_dim=perceiver_config.resampler_head_dim,
n_latents=perceiver_config.resampler_n_latents,
weights=weights,
)
self.layers = nn.ModuleList(
[
IdeficsDecoderLayer(layer_id, config, weights)
for layer_id in range(config.num_hidden_layers)
]
)
self.cross_layer_interval = config.cross_layer_interval
num_cross_layers = config.num_hidden_layers // self.cross_layer_interval
self.gated_cross_attn_layers = nn.ModuleList(
[
IdeficsGatedCrossAttentionLayer(layer_id, config, weights)
for layer_id in range(num_cross_layers)
]
)
# self.gradient_checkpointing = False
self.norm = IdeficsRMSNorm(
prefix="model.norm", weights=weights, eps=config.rms_norm_eps
)
# self.gradient_checkpointing = False
# Initialize weights and apply final processing
# self.post_init()
# self.freeze_relevant_params(config)
# def freeze_relevant_params(self, config=None):
# if config is None:
# config = self.config
# if config.freeze_text_layers:
# self.freeze_text_layers(config.freeze_text_module_exceptions)
# if config.freeze_vision_layers:
# freeze_model(self.vision_model, module_exceptions=config.freeze_vision_module_exceptions)
# def freeze_text_layers(self, module_exceptions=[]):
# for module in [self.layers, self.norm]:
# freeze_model(module, module_exceptions=module_exceptions)
# def freeze_vision_layers(self, module_exceptions=[]):
# freeze_model(self.vision_model, module_exceptions=module_exceptions)
# def get_input_embeddings(self):
# return self.embed_tokens
# def set_input_embeddings(self, value):
# self.embed_tokens = value
# Copied from transformers.models.bart.modeling_bart.BartDecoder._prepare_decoder_attention_mask
def _prepare_decoder_attention_mask(
self, attention_mask, input_shape, inputs_embeds, past_key_values_length
):
# create causal mask
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
combined_attention_mask = None
if input_shape[-1] > 1:
combined_attention_mask = _make_causal_mask(
input_shape,
inputs_embeds.dtype,
device=inputs_embeds.device,
past_key_values_length=past_key_values_length,
)
if attention_mask is not None:
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
expanded_attn_mask = _expand_mask(
attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]
).to(inputs_embeds.device)
combined_attention_mask = (
expanded_attn_mask
if combined_attention_mask is None
else expanded_attn_mask + combined_attention_mask
)
return combined_attention_mask
# @add_start_docstrings_to_model_forward(LLAMA_INPUTS_DOCSTRING)
def forward(
self,
input_ids: torch.LongTensor = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[List[torch.FloatTensor]] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
pixel_values: Optional[torch.FloatTensor] = None,
image_hidden_states: Optional[torch.FloatTensor] = None,
image_embeddings: Optional[torch.FloatTensor] = None,
image_attention_mask: Optional[torch.Tensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, BaseModelOutputWithPastImage]:
device = input_ids.device if input_ids is not None else inputs_embeds.device
output_attentions = (
output_attentions
if output_attentions is not None
else self.config.output_attentions
)
output_hidden_states = (
output_hidden_states
if output_hidden_states is not None
else self.config.output_hidden_states
)
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = (
return_dict if return_dict is not None else self.config.use_return_dict
)
# retrieve input_ids and inputs_embeds
if input_ids is not None and inputs_embeds is not None:
raise ValueError(
"You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time"
)
elif input_ids is not None:
batch_size, seq_length = input_ids.shape
elif inputs_embeds is not None:
batch_size, seq_length, _ = inputs_embeds.shape
else:
raise ValueError(
"You have to specify either decoder_input_ids or decoder_inputs_embeds"
)
seq_length_with_past = seq_length
past_key_values_length = 0
if past_key_values is not None:
past_key_values_length = past_key_values[0][0].shape[2]
seq_length_with_past = seq_length_with_past + past_key_values_length
if attention_mask is not None and position_ids is None:
# create position_ids on the fly for batch generation
position_ids = attention_mask.long().cumsum(-1) - 1
position_ids.masked_fill_(attention_mask == 0, 1)
elif position_ids is None:
device = input_ids.device if input_ids is not None else inputs_embeds.device
position_ids = torch.arange(
past_key_values_length,
seq_length + past_key_values_length,
dtype=torch.long,
device=device,
)
position_ids = position_ids.unsqueeze(0).view(-1, seq_length)
else:
position_ids = position_ids.view(-1, seq_length).long()
no_images = False
if image_hidden_states is None:
if pixel_values is None and image_embeddings is None:
raise ValueError(
"Either pixel_values and image_embeddings have to be not-None."
)
elif pixel_values is not None and image_embeddings is not None:
raise ValueError(
"You cannot specify both pixel_values and image_embeddings at the same time"
)
elif pixel_values is not None:
no_images = len(torch.nonzero(pixel_values)) == 0
pixel_values = pixel_values.to(
dtype=self.dtype, device=device
) # fp16 compatibility
batch_size, num_images = pixel_values.shape[:2]
pixel_values = pixel_values.contiguous().view(
batch_size * num_images, *pixel_values.shape[2:]
)
# Get sequence from the vision encoder
image_hidden_states = self.vision_model(
pixel_values=pixel_values
).last_hidden_state
elif image_embeddings is not None:
(
batch_size,
num_images,
image_seq_len,
image_hidden_size,
) = image_embeddings.size()
image_hidden_states = image_embeddings.to(
dtype=self.dtype, device=input_ids.device
)
image_hidden_states = image_hidden_states.view(
batch_size * num_images, image_seq_len, image_hidden_size
)
if self.config.use_resampler:
image_hidden_states = self.perceiver_resampler(image_hidden_states)
image_seq_len, image_hidden_size = image_hidden_states.size(
1
), image_hidden_states.size(2)
image_hidden_states = image_hidden_states.view(
batch_size, num_images * image_seq_len, image_hidden_size
)
else:
no_images = False
num_images = pixel_values.shape[1]
image_seq_len = image_hidden_states.shape[1] // num_images
# # Hack to use the model in full language modeling mode
# image_attention_mask = torch.zeros(batch_size, seq_length, 1, dtype=torch.long, device=image_hidden_states.device)
# Make image_attention_mask compatible with hidden states
text_seq_len = image_attention_mask.size(1)
image_attention_mask = image_attention_mask.unsqueeze(-1)
image_attention_mask = image_attention_mask.repeat(1, 1, 1, image_seq_len)
image_attention_mask = image_attention_mask.view(
batch_size, text_seq_len, num_images * image_seq_len
)
image_batch_size, image_sequence_length, _ = image_hidden_states.size()
image_hidden_shape = (image_batch_size, image_sequence_length)
if image_attention_mask is None:
image_attention_mask = torch.ones(image_hidden_shape, device=device)
image_attention_mask = self.invert_attention_mask(image_attention_mask)
# if list(image_attention_mask.shape) != [4, 1, 1024, 64]:
# raise ValueError(f"Image hidden_states {image_hidden_states.shape} - mask {image_attention_mask.shape} {num_images} {image_seq_len} {text_seq_len}")
# if image_hidden_states is not None:
# else:
# image_attention_mask = None
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids)
# embed positions
if attention_mask is None:
attention_mask = torch.ones(
(batch_size, seq_length_with_past),
dtype=torch.bool,
device=inputs_embeds.device,
)
attention_mask = self._prepare_decoder_attention_mask(
attention_mask,
(batch_size, seq_length),
inputs_embeds,
past_key_values_length,
)
hidden_states = inputs_embeds
# if self.gradient_checkpointing and self.training:
# if use_cache:
# logger.warning_once(
# "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
# )
# use_cache = False
# decoder layers
all_hidden_states = () if output_hidden_states else None
all_self_attns = () if output_attentions else None
next_decoder_cache = () if use_cache else None
for idx, decoder_layer in enumerate(self.layers):
if output_hidden_states:
all_hidden_states += (hidden_states,)
past_key_value = (
past_key_values[idx] if past_key_values is not None else None
)
def vblock(
main_block,
hidden_states,
attention_mask,
position_ids,
past_key_value,
image_hidden_states,
image_attention_mask,
output_attentions,
use_cache,
no_images,
layer_idx,
cross_layer_interval,
gated_cross_attn_layers,
):
# TODO(ls): Add cross attention values to respective lists
if layer_idx % cross_layer_interval == 0:
xblock = gated_cross_attn_layers[layer_idx // cross_layer_interval]
outputs = xblock(
hidden_states,
attention_mask=attention_mask,
image_hidden_states=image_hidden_states,
image_attention_mask=image_attention_mask,
output_attentions=output_attentions,
use_cache=use_cache,
past_key_value=None, # not implemented
no_images=no_images,
)
hidden_states = outputs[0]
layer_outputs = main_block(
hidden_states,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_value=past_key_value,
output_attentions=output_attentions,
use_cache=use_cache,
)
return layer_outputs
# if self.gradient_checkpointing and self.training:
# past_key_value = None
# if use_cache:
# logger.warning_once(
# "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
# )
# use_cache = False
# layer_outputs = torch.utils.checkpoint.checkpoint(
# vblock,
# decoder_layer,
# hidden_states,
# attention_mask,
# position_ids,
# past_key_value,
# image_hidden_states,
# image_attention_mask,
# output_attentions,
# use_cache,
# no_images,
# idx,
# self.cross_layer_interval,
# self.gated_cross_attn_layers,
# )
# else:
layer_outputs = vblock(
decoder_layer,
hidden_states,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_value=past_key_value,
image_hidden_states=image_hidden_states,
image_attention_mask=image_attention_mask,
output_attentions=output_attentions,
use_cache=use_cache,
no_images=no_images,
layer_idx=idx,
cross_layer_interval=self.cross_layer_interval,
gated_cross_attn_layers=self.gated_cross_attn_layers,
)
hidden_states = layer_outputs[0]
if use_cache:
next_decoder_cache += (layer_outputs[2 if output_attentions else 1],)
if output_attentions:
all_self_attns += (layer_outputs[1],)
hidden_states = self.norm(hidden_states)
# add hidden states from the last decoder layer
if output_hidden_states:
all_hidden_states += (hidden_states,)
next_cache = next_decoder_cache if use_cache else None
if not return_dict:
return tuple(
v
for v in [hidden_states, next_cache, all_hidden_states, all_self_attns]
if v is not None
)
return BaseModelOutputWithPastImage(
last_hidden_state=hidden_states,
past_key_values=next_cache,
hidden_states=all_hidden_states,
attentions=all_self_attns,
image_hidden_states=image_hidden_states,
)
class IdeficsForVisionText2Text(IdeficsPreTrainedModel):
def __init__(
self,
config,
weights,
):
super().__init__(config)
self.model = IdeficsModel(
config=config,
weights=weights,
)
self.lm_head = IdeficsDecoupledTensorParallelLinear(
config=config,
weights=weights,
)
def forward(
self,
input_ids: torch.LongTensor = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[List[torch.FloatTensor]] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
pixel_values: Optional[torch.FloatTensor] = None,
image_embeddings: Optional[torch.FloatTensor] = None,
image_hidden_states: Optional[torch.FloatTensor] = None,
image_attention_mask: Optional[torch.Tensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, CausalLMOutputWithPastImage]:
r"""
Args:
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Returns:
Example:
```python
>>> from transformers import AutoTokenizer, LlamaForCausalLM
>>> model = LlamaForCausalLM.from_pretrained(PATH_TO_CONVERTED_WEIGHTS)
>>> tokenizer = AutoTokenizer.from_pretrained(PATH_TO_CONVERTED_TOKENIZER)
>>> prompt = "Hey, are you consciours? Can you talk to me?"
>>> inputs = tokenizer(prompt, return_tensors="pt")
>>> # Generate
>>> generate_ids = model.generate(inputs.input_ids, max_length=30)
>>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
"Hey, are you consciours? Can you talk to me?\nI'm not consciours, but I can talk to you."
```"""
output_attentions = (
output_attentions
if output_attentions is not None
else self.config.output_attentions
)
output_hidden_states = (
output_hidden_states
if output_hidden_states is not None
else self.config.output_hidden_states
)
return_dict = (
return_dict if return_dict is not None else self.config.use_return_dict
)
# decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
outputs = self.model(
input_ids=input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
pixel_values=pixel_values,
image_embeddings=image_embeddings,
image_hidden_states=image_hidden_states,
image_attention_mask=image_attention_mask,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_states = outputs[0]
logits, speculative_logits = self.lm_head(hidden_states)
loss = None
return (
CausalLMOutputWithPastImage(
loss=loss,
logits=logits,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
image_hidden_states=outputs.image_hidden_states,
),
speculative_logits,
)
def prepare_inputs_for_generation(self, input_ids, past=None, **kwargs):
inputs = prepare_inputs_for_generation(input_ids, past=past, **kwargs)
unwanted_kwargs = ["token_type_ids"]
for kwarg in unwanted_kwargs:
inputs.pop(kwarg, None)
return inputs
@staticmethod
def _expand_inputs_for_generation(
*args,
**model_kwargs,
):
return expand_inputs_for_generation(*args, **model_kwargs)
@staticmethod
def _update_model_kwargs_for_generation(
outputs, model_kwargs, is_encoder_decoder=False
):
return update_model_kwargs_for_generation(
outputs, model_kwargs, is_encoder_decoder=is_encoder_decoder
)
@staticmethod
def _reorder_cache(past, beam_idx):
reordered_past = ()
for layer_past in past:
reordered_past += (
tuple(
past_state.index_select(0, beam_idx) for past_state in layer_past
),
)
return reordered_past
| text-generation-inference/server/text_generation_server/models/custom_modeling/idefics_modeling.py/0 | {
"file_path": "text-generation-inference/server/text_generation_server/models/custom_modeling/idefics_modeling.py",
"repo_id": "text-generation-inference",
"token_count": 28598
} | 300 |
from contextlib import nullcontext
import math
import os
import time
import torch
import torch.distributed
import numpy as np
from loguru import logger
from dataclasses import dataclass
from opentelemetry import trace
from transformers import (
PreTrainedTokenizerBase,
AutoConfig,
AutoTokenizer,
GenerationConfig,
)
from typing import (
Any,
ContextManager,
Iterable,
Optional,
Tuple,
List,
Type,
Dict,
Union,
)
from text_generation_server.adapters import AdapterBatchData, AdapterBatchMetadata
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE
from text_generation_server.utils.chunks import concat_text_chunks
from text_generation_server.utils.import_utils import SYSTEM
from text_generation_server.models import Model
from text_generation_server.utils.log import log_master
from text_generation_server.utils.prefill_chunking import (
get_support_chunking,
get_max_prefill_tokens,
)
from text_generation_server.utils.tokens import batch_top_tokens
from text_generation_server.utils.speculate import get_speculate
from text_generation_server.utils import (
initialize_torch_distributed,
weight_files,
Weights,
)
from text_generation_server.models.types import (
Batch,
Tokens,
Generation,
GeneratedText,
)
from text_generation_server.pb import generate_pb2
from text_generation_server.models.globals import (
MEM_POOL,
ATTENTION,
BLOCK_SIZE,
CUDA_GRAPHS,
REQUEST_LOGPROBS,
TGI_WIGGLE_ROOM,
get_adapter_to_index,
)
from text_generation_server.layers.attention import KVCache, Seqlen
from text_generation_server.utils import StoppingCriteria, HeterogeneousNextTokenChooser
from text_generation_server.utils.dist import MEMORY_FRACTION
from text_generation_server.utils.quantization import get_loader
from text_generation_server.utils.segments import SegmentConcatBuilder, find_segments
from text_generation_server.utils.import_utils import (
empty_cache,
synchronize,
get_free_memory,
)
from text_generation_server.models.metadata_kernels import (
has_triton,
copy_next_input_ids_inplace,
block_tables_to_ragged,
block_tables_to_padded,
prepare_position_slot_ids,
slots_filtering,
)
tracer = trace.get_tracer(__name__)
def small_power_of_2(n: int):
return 1 << ((n - 1).bit_length() - 1)
def init_cpu_threads_env(rank_id: int, world_size: int):
import importlib.util
if importlib.util.find_spec("numa") is not None:
import numa
import psutil
nodes = numa.info.get_max_node() + 1
rank_per_node = math.ceil(world_size / nodes)
num_cpus_per_nodes = int(psutil.cpu_count(logical=False) / nodes)
node_id = int(rank_id / rank_per_node)
rank_offset_per_node = rank_id % rank_per_node
if os.getenv("OMP_NUM_THREADS") is None:
num_cpus_per_rank = max(int(num_cpus_per_nodes / rank_per_node), 1)
else:
num_cpus_per_rank = int(os.getenv("OMP_NUM_THREADS"))
if len(numa.memory.get_membind_nodes()) == nodes:
numa.memory.set_membind_nodes((node_id))
torch.set_num_threads(num_cpus_per_rank)
if len(numa.schedule.get_affinitive_cpus(0)) == psutil.cpu_count(logical=True):
cpu_start = num_cpus_per_rank * rank_offset_per_node
numa.schedule.run_on_cpus(
0,
*(
numa.info.node_to_cpus(node_id)[
cpu_start : cpu_start + num_cpus_per_rank
]
),
)
logger.info(
f"affinity={numa.schedule.get_affinitive_cpus(0)}, membind = {numa.memory.get_membind_nodes()}"
)
@dataclass
class FlashCausalLMBatch(Batch):
batch_id: int
requests: List[generate_pb2.Request]
# request id -> idx in list mapping
requests_idx_mapping: Dict[int, int]
# Decoder values
# Can be a list for easy filtering
# If `input_ids` is a list, it needs to be materialized to a tensor first
input_ids: Union[torch.Tensor, List[List[int]]]
# Will be set by `generate_token` and reset after each prefill forward before staying set in decode
position_ids: Optional[torch.Tensor]
speculative_ids: Optional[torch.Tensor]
# Set when creating the batch
# tensor of indices of the currently used slots, length = \sum_{i=0}^{b} s_i in prefill, length = b in decode
# Will be set by `generate_token` and reset after each prefill forward before staying set in decode
slot_indices: Optional[torch.Tensor]
# list of length b of list of length s_i // block_size
block_tables: List[List[int]]
# tensor of size [b, max_total_seqlen // block_size] holding the paged attention block tables for all sequences
block_tables_tensor: torch.Tensor
# tensor of length \sum_{i=0}^{b} max_s_i holding the paged attention slots for all sequences
slots: torch.Tensor
# list of length b + 1 containing the cumulative sequence slot lengths of the sequences in the batch
# used for filtering
cu_slots: torch.Tensor
max_input_length: int
max_current_length: int
# Whether this batch contains at least one request that is prefilling
prefilling: bool
# Whether each request is prefilling
prefilling_mask: List[bool]
# Prefill metadata tensors to efficiently compute logprobs
# tensor of length b + 1 containing the cumulative sequence lengths of the sequences in the batch, only used in prefill
cu_seqlen_prefill: Optional[torch.Tensor]
# Prefill cache indices is used to slice into the kv tensor before caching it into the paged attention buffers
# as we only keep SLIDING_WINDOW values instead of the whole tensor
prefill_cache_indices: Optional[torch.Tensor]
# Will be set by `generate_token` and reset after each prefill forward
prefill_head_indices: Optional[torch.Tensor]
# Will be set by `generate_token` and reset after each prefill forward
prefill_next_token_indices: Optional[torch.tensor]
# Will be set by `generate_token` and reset after each prefill forward
prefill_cu_outlens: Optional[List[int]]
# Will be set by `generate_token` and reset after each prefill forward
prefill_logprob_tokens: List[Optional[Tokens]]
# All tokens
all_input_ids: List[List[int]]
all_input_ids_tensor: torch.Tensor
# Lengths of all generations present in the batch
input_lengths: List[int]
# size [b], containing the number of blocks that can be retrieved from the cache
cache_lengths: List[int]
prompt_lengths: List[int]
# Will be set by `generate_token` and reset after each prefill forward before staying set in decode
input_lengths_tensor: Optional[torch.Tensor]
cache_lengths_tensor: Optional[torch.Tensor]
prompt_lengths_tensor: torch.Tensor
prefix_offsets: List[Optional[int]]
read_offsets: List[Optional[int]]
# Generation helpers
next_token_chooser: HeterogeneousNextTokenChooser
stopping_criterias: List[StoppingCriteria]
top_n_tokens: List[int]
top_n_tokens_tensor: torch.Tensor
# Adapter metadata for each request
# Will be set by `generate_token` and reset after each prefill forward before staying set in decode
adapter_meta: Optional[AdapterBatchMetadata]
# Number of blocks in this batch
num_blocks: int
# Maximum number of blocks
max_blocks: int
def to_pb(self) -> generate_pb2.CachedBatch:
return generate_pb2.CachedBatch(
id=self.batch_id,
request_ids=[r.id for r in self.requests],
size=len(self),
max_tokens=self.num_blocks * BLOCK_SIZE,
current_tokens=(
sum([len(i) for i in self.input_ids])
if isinstance(self.input_ids, list)
else len(self.input_ids)
),
)
@classmethod
def batch_tokenized_inputs(
cls, requests: Iterable[generate_pb2.Request], tokenizer
):
max_length = 0
all_input_ids = []
batch_size = 0
for r in requests:
batch_size += 1
inputs = concat_text_chunks(r.input_chunks.chunks)
input_ids = tokenizer(
inputs,
truncation=True,
max_length=r.truncate,
add_special_tokens=r.add_special_tokens,
)["input_ids"]
max_length = max(max_length, len(input_ids))
all_input_ids.append(input_ids)
return all_input_ids
@classmethod
def from_tokenized(
cls,
pb: generate_pb2.Batch,
tokenizer: PreTrainedTokenizerBase,
batch_tokenized_inputs,
dtype: torch.dtype,
device: torch.device,
) -> "FlashCausalLMBatch":
speculate = get_speculate()
cache_lengths = []
input_lengths = []
prompt_lengths = []
prefix_offsets = []
read_offsets = []
all_input_ids = []
all_postfix_ids = []
requests_idx_mapping = {}
slots = []
cu_slots = [0]
next_token_chooser_parameters = []
stopping_criterias = []
top_n_tokens = []
num_blocks = 0
max_input_length = 0
max_current_length = 0
max_length = 0
max_blocks = 0
cu_blocks = [0]
block_tables = []
block_tables_ragged = []
# Parse batch
for i, (r, tokenized_input) in enumerate(
zip(pb.requests, batch_tokenized_inputs)
):
### XXX: This consumes so much memory on long requests
### Deactivating it by default seems like the best course.
if not REQUEST_LOGPROBS:
r.prefill_logprobs = False
# request id -> idx in list mapping
requests_idx_mapping[r.id] = i
prompt_length = len(tokenized_input)
prompt_lengths.append(prompt_length)
cache_length = r.cache_len
assert (
cache_length <= prompt_length
), f"Prefix {cache_length} vs input {prompt_length}"
if cache_length == prompt_length:
assert False, "unreachable"
# `chunk_len` is an optional field in the protobuf
# It is only set if the model support chunking
if r.HasField("chunk_len"):
input_length = r.chunk_len
if cache_length + input_length < prompt_length:
# FIXME: speculate is not supported for context chunking at the moment
assert speculate == 0
assert get_support_chunking()
assert input_length > 0
postfix_ids = tokenized_input[
cache_length : cache_length + input_length
]
assert (
len(postfix_ids) == input_length
), "Rust and Python tokenizers are not aligned"
else:
# Use all the remaining ids
postfix_ids = tokenized_input[cache_length:]
input_length = len(postfix_ids)
input_lengths.append(input_length)
prefix_offsets.append(prompt_length - 5)
read_offsets.append(prompt_length)
all_postfix_ids.append(postfix_ids)
all_input_ids.append(tokenized_input)
next_token_chooser_parameters.append(r.parameters)
stopping_criteria = StoppingCriteria.from_pb(
r.stopping_parameters, tokenizer
)
max_new_tokens = stopping_criteria.max_new_tokens
stopping_criterias.append(stopping_criteria)
top_n_tokens.append(r.top_n_tokens)
# Paged attention
# Remove one as the first token des not have a past
speculative_length = get_speculate()
speculative_length = 0 if speculative_length is None else speculative_length
# Tokens that need to be mapped to blocks.
block_tokens = prompt_length + max_new_tokens - 1 + speculative_length
# blocks and slots can be empty (for example in warmup)
if not r.blocks:
needed_blocks = math.ceil(block_tokens / BLOCK_SIZE)
request_blocks = [
b for b in range(num_blocks, num_blocks + needed_blocks)
]
request_slots = [
s
for b in request_blocks
for s in range(b * BLOCK_SIZE, (b + 1) * BLOCK_SIZE)
]
else:
request_blocks = r.blocks
request_slots = r.slots
block_tables.append(request_blocks)
block_tables_ragged.extend(request_blocks)
cu_blocks.append(len(block_tables_ragged))
slots.extend(request_slots)
cu_slots.append(len(slots))
cache_lengths.append(cache_length)
num_blocks += len(request_blocks)
# Update
max_blocks = max(max_blocks, len(request_blocks))
max_input_length = max(max_input_length, input_length)
max_current_length = max(max_current_length, cache_length + input_length)
max_length = max(
max_length,
prompt_length + max_new_tokens + speculative_length,
)
next_token_chooser = HeterogeneousNextTokenChooser.from_pb(
next_token_chooser_parameters, dtype, device, tokenizer
)
# Padded all_input_ids_tensor
all_input_ids_tensor = np.zeros(
(len(all_input_ids), max_length), dtype=np.int64
)
for i, input_ids in enumerate(all_input_ids):
all_input_ids_tensor[i, : len(input_ids)] = input_ids
# Create tensors on device
all_input_ids_tensor = torch.tensor(
all_input_ids_tensor, dtype=torch.int64, device=device
)
top_n_tokens_tensor = torch.tensor(
top_n_tokens, device=device, dtype=torch.int64
)
block_tables_ragged = torch.tensor(
block_tables_ragged, device=device, dtype=torch.int32
)
cu_blocks = torch.tensor(cu_blocks, device=device, dtype=torch.int64)
block_tables_tensor = torch.empty(
(len(block_tables), max_blocks),
device=device,
dtype=torch.int32,
)
# If the device supports Triton, we can use a fused kernel
if has_triton():
block_tables_to_padded(
max_blocks, cu_blocks, block_tables_tensor, block_tables_ragged
)
else:
for i, request_blocks in enumerate(block_tables):
block_tables_tensor[i, : len(request_blocks)] = torch.tensor(
request_blocks
)
prompt_lengths_tensor = torch.tensor(
prompt_lengths, dtype=torch.int32, device=device
)
slots = torch.tensor(slots, dtype=torch.int64, device=device)
cu_slots = torch.tensor(cu_slots, dtype=torch.int64)
return cls(
batch_id=pb.id,
requests=pb.requests,
requests_idx_mapping=requests_idx_mapping,
input_ids=all_postfix_ids,
block_tables=block_tables,
block_tables_tensor=block_tables_tensor,
cache_lengths=cache_lengths,
max_input_length=max_input_length,
max_current_length=max_current_length,
prefilling=True,
prefilling_mask=[True] * len(pb.requests),
prefill_logprob_tokens=[None] * len(pb.requests),
input_lengths=input_lengths,
prompt_lengths=prompt_lengths,
prefix_offsets=prefix_offsets,
read_offsets=read_offsets,
all_input_ids=all_input_ids,
all_input_ids_tensor=all_input_ids_tensor,
next_token_chooser=next_token_chooser,
stopping_criterias=stopping_criterias,
top_n_tokens=top_n_tokens,
top_n_tokens_tensor=top_n_tokens_tensor,
num_blocks=num_blocks,
max_blocks=max_blocks,
speculative_ids=None,
prompt_lengths_tensor=prompt_lengths_tensor,
# These values will be set by `FlashCausalLMBatch.prepare_for_prefill`
position_ids=None,
cu_seqlen_prefill=None,
prefill_cache_indices=None,
slot_indices=None,
slots=slots,
cu_slots=cu_slots,
prefill_head_indices=None,
prefill_next_token_indices=None,
prefill_cu_outlens=None,
cache_lengths_tensor=None,
input_lengths_tensor=None,
adapter_meta=None,
)
@classmethod
def from_pb(
cls,
pb: generate_pb2.Batch,
tokenizer: PreTrainedTokenizerBase,
dtype: torch.dtype,
device: torch.device,
) -> "FlashCausalLMBatch":
assert len(pb.requests) > 0
batch_tokenized_inputs = cls.batch_tokenized_inputs(pb.requests, tokenizer)
return cls.from_tokenized(pb, tokenizer, batch_tokenized_inputs, dtype, device)
@tracer.start_as_current_span("filter")
def filter(self, request_ids: List[int]) -> "FlashCausalLMBatch":
if len(request_ids) == 0:
raise ValueError("Batch must have at least one request")
# We assume that if len(requests) == len(self) then the requests are the same
if len(request_ids) == len(self):
return self
device = self.block_tables_tensor.device
# New values after filtering
requests_idx_mapping = {}
# Used to index into tensors
indices = []
if not has_triton():
# slots to keep after filtering
slot_filtering_indices = torch.zeros(
self.slots.shape[0], dtype=torch.bool, device=device
)
# Create on CPU to only move to GPU once instead of at every copy
slot_indices = torch.empty(len(request_ids), dtype=torch.int64)
max_input_length = 0
max_current_length = 0
requests = []
block_tables = []
all_input_ids = []
input_ids = []
prompt_lengths = []
input_lengths = []
cache_lengths = []
prefix_offsets = []
read_offsets = []
cu_slots = [0]
prefilling_mask = []
prefill_logprob_tokens = []
stopping_criterias = []
top_n_tokens = []
adapter_set = set()
num_blocks = 0
max_blocks = 0
max_slots = 0
cumulative_slot_tokens = 0
for i, request_id in enumerate(request_ids):
idx = self.requests_idx_mapping[request_id]
indices.append(idx)
requests_idx_mapping[request_id] = i
requests.append(self.requests[idx])
# Prefilling
request_prefilling = self.prefilling_mask[idx]
prefilling_mask.append(request_prefilling)
# Get length
request_input_length = self.input_lengths[idx]
request_cache_length = self.cache_lengths[idx]
max_input_length = max(max_input_length, request_input_length)
max_current_length = max(
max_current_length, request_cache_length + request_input_length
)
all_input_ids.append(self.all_input_ids[idx])
prompt_lengths.append(self.prompt_lengths[idx])
input_lengths.append(request_input_length)
cache_lengths.append(request_cache_length)
prefix_offsets.append(self.prefix_offsets[idx])
read_offsets.append(self.read_offsets[idx])
stopping_criteria = self.stopping_criterias[idx]
stopping_criterias.append(stopping_criteria)
top_n_tokens.append(self.top_n_tokens[idx])
prefill_logprob_tokens.append(self.prefill_logprob_tokens[idx])
ADAPTER_TO_INDEX = get_adapter_to_index()
adapter_index = ADAPTER_TO_INDEX.get(self.requests[idx].adapter_id, 0)
adapter_set.add(adapter_index)
request_block_table = self.block_tables[idx]
num_blocks += len(request_block_table)
block_tables.append(request_block_table)
start_slot = self.cu_slots[idx]
end_slot = self.cu_slots[idx + 1]
slot_length = end_slot - start_slot
if not has_triton():
# Set slice
slot_filtering_indices[start_slot:end_slot] = True
cu_slots.append(cumulative_slot_tokens + slot_length)
# Input ids if the request was part of a prefilling batch
# If the batch was decoding we can index into the tensor directly later
if self.prefilling:
input_ids.append(self.input_ids[idx])
else:
# Copy to tensor (CPU)
slot_indices[i] = cumulative_slot_tokens + request_cache_length
cumulative_slot_tokens += slot_length
max_blocks = max(max_blocks, len(request_block_table))
max_slots = max(max_slots, slot_length)
all_input_ids_tensor = self.all_input_ids_tensor[indices]
block_tables_tensor = self.block_tables_tensor[indices]
next_token_chooser = self.next_token_chooser.filter(indices)
top_n_tokens_tensor = self.top_n_tokens_tensor[indices]
speculative_ids = (
self.speculative_ids[indices] if self.speculative_ids is not None else None
)
prompt_lengths_tensor = self.prompt_lengths_tensor[indices]
cu_slots = torch.tensor(cu_slots, dtype=torch.int64)
if not has_triton():
slots = self.slots[slot_filtering_indices]
else:
slots = self.slots.new_empty(cumulative_slot_tokens)
gpu_cu_slots = cu_slots.to(device)
slots_indexing_start = self.cu_slots.to(device)[indices]
slots_filtering(
max_slots, self.slots, slots, gpu_cu_slots, slots_indexing_start
)
if self.prefilling:
# These values will be set by `FlashCausalLMBatch.prepare_for_prefill`
position_ids = None
slot_indices = None
cache_lengths_tensor = None
input_lengths_tensor = None
adapter_meta = None
else:
# Index into tensors
input_ids = self.input_ids[indices]
position_ids = self.position_ids[indices]
adapter_indices = self.adapter_meta.adapter_indices[indices]
input_lengths_tensor = self.input_lengths_tensor[indices]
cache_lengths_tensor = self.cache_lengths_tensor[indices]
# Move to GPU now that we have the whole tensor
slot_indices = slot_indices.to(device)
adapter_segments, adapter_segment_indices = find_segments(adapter_indices)
adapter_segments = torch.tensor(
adapter_segments, dtype=torch.int32, device=device
)
adapter_meta = AdapterBatchMetadata(
adapter_indices=adapter_indices,
adapter_set=adapter_set,
adapter_segments=adapter_segments,
segment_indices=adapter_segment_indices,
)
return type(self)(
batch_id=self.batch_id,
requests=requests,
requests_idx_mapping=requests_idx_mapping,
input_ids=input_ids,
position_ids=position_ids,
cu_seqlen_prefill=None,
prefill_cache_indices=None,
slot_indices=slot_indices,
block_tables=block_tables,
block_tables_tensor=block_tables_tensor,
slots=slots,
cu_slots=cu_slots,
max_input_length=max_input_length,
max_current_length=max_current_length,
prefilling=self.prefilling,
prefilling_mask=prefilling_mask,
prefill_head_indices=None,
prefill_next_token_indices=None,
prefill_cu_outlens=None,
prefill_logprob_tokens=prefill_logprob_tokens,
prompt_lengths=prompt_lengths,
prompt_lengths_tensor=prompt_lengths_tensor,
input_lengths=input_lengths,
input_lengths_tensor=input_lengths_tensor,
cache_lengths=cache_lengths,
cache_lengths_tensor=cache_lengths_tensor,
prefix_offsets=prefix_offsets,
read_offsets=read_offsets,
all_input_ids=all_input_ids,
all_input_ids_tensor=all_input_ids_tensor,
next_token_chooser=next_token_chooser,
stopping_criterias=stopping_criterias,
top_n_tokens=top_n_tokens,
top_n_tokens_tensor=top_n_tokens_tensor,
num_blocks=num_blocks,
max_blocks=max_blocks,
speculative_ids=speculative_ids,
adapter_meta=adapter_meta,
)
@classmethod
@tracer.start_as_current_span("concatenate")
def concatenate(cls, batches: List["FlashCausalLMBatch"]) -> "FlashCausalLMBatch":
# Batch attributes
requests = []
requests_idx_mapping = {}
prefilling = False
num_blocks = 0
total_batch_size = 0
total_slots = 0
max_blocks = 0
max_length = 0
max_input_length = 0
max_current_length = 0
for b in batches:
total_batch_size += len(b)
max_blocks = max(max_blocks, b.max_blocks)
total_slots += len(b.slots)
num_blocks += b.num_blocks
speculative_length = (
b.speculative_ids.shape[1] if b.speculative_ids is not None else 0
)
max_input_length = max(max_input_length, b.max_input_length)
max_current_length = max(max_current_length, b.max_current_length)
max_length = max(
max_length,
max(
prompt_length
+ stopping_criteria.max_new_tokens
+ speculative_length
for prompt_length, stopping_criteria in zip(
b.prompt_lengths, b.stopping_criterias
)
),
)
prefilling = prefilling or b.prefilling
slots = batches[0].slots.new_empty(total_slots)
cu_slots = torch.zeros(total_batch_size + 1, dtype=torch.int64)
if prefilling:
input_ids = []
# These values will be set by `FlashCausalLMBatch.prepare_for_prefill`
position_ids = None
slot_indices = None
cache_lengths_tensor = None
input_lengths_tensor = None
adapter_meta = None
adapter_segment_builder = None
else:
input_ids = batches[0].input_ids.new_empty(total_batch_size)
if (
batches[0].position_ids is not None
and batches[0].position_ids.dim() == 2
):
# Qwen2_vl case:
position_ids = batches[0].position_ids.new_empty(
(total_batch_size, batches[0].position_ids.shape[-1])
)
else:
position_ids = batches[0].position_ids.new_empty(total_batch_size)
slot_indices = batches[0].slot_indices.new_empty(total_batch_size)
input_lengths_tensor = batches[0].input_lengths_tensor.new_empty(
total_batch_size
)
cache_lengths_tensor = batches[0].cache_lengths_tensor.new_empty(
total_batch_size
)
total_indices_size = sum(
b.adapter_meta.adapter_indices.shape[0] for b in batches
)
adapter_indices = batches[0].adapter_meta.adapter_indices.new_empty(
total_indices_size
)
adapter_segment_builder = SegmentConcatBuilder()
adapter_set = set()
prompt_lengths_tensor = batches[0].prompt_lengths_tensor.new_empty(
total_batch_size
)
block_tables_tensor = batches[0].block_tables_tensor.new_zeros(
(total_batch_size, max_blocks)
)
all_input_ids_tensor = batches[0].all_input_ids_tensor.new_zeros(
(total_batch_size, max_length)
)
top_n_tokens_tensor = batches[0].top_n_tokens_tensor.new_zeros(
total_batch_size,
)
block_tables = []
cache_lengths = []
all_input_ids = []
prompt_lengths = []
input_lengths = []
prefix_offsets = []
read_offsets = []
prefill_logprob_tokens = []
next_token_chooser_parameters = []
fsm_grammar_states = []
stopping_criterias = []
top_n_tokens = []
prefilling_mask = []
# Cumulative length
cumulative_batch_size = 0
cumulative_slots = 0
cumulative_adapter_indices_size = 0
for i, batch in enumerate(batches):
requests.extend(batch.requests)
if i == 0:
requests_idx_mapping = batch.requests_idx_mapping
else:
# We need to offset the mapping for each batch by the cumulative batch size
for k, v in batch.requests_idx_mapping.items():
requests_idx_mapping[k] = v + cumulative_batch_size
start_index = cumulative_batch_size
end_index = cumulative_batch_size + len(batch)
# Copy tensors (GPU)
top_n_tokens_tensor[start_index:end_index] = batch.top_n_tokens_tensor
all_input_ids_tensor[
start_index:end_index, : batch.all_input_ids_tensor.shape[1]
] = batch.all_input_ids_tensor[:, :max_length]
block_tables_tensor[
start_index:end_index, : batch.block_tables_tensor.shape[1]
] = batch.block_tables_tensor[:, :max_blocks]
prompt_lengths_tensor[start_index:end_index] = batch.prompt_lengths_tensor
slots_start_index = cumulative_slots
slots_end_index = cumulative_slots + len(batch.slots)
slots[slots_start_index:slots_end_index] = batch.slots
cu_slots[start_index + 1 : end_index + 1] = (
batch.cu_slots[1:] + cumulative_slots
)
if not prefilling:
input_ids[start_index:end_index] = batch.input_ids
position_ids[start_index:end_index] = batch.position_ids
slot_indices[start_index:end_index] = (
batch.slot_indices + cumulative_slots
)
input_lengths_tensor[start_index:end_index] = batch.input_lengths_tensor
cache_lengths_tensor[start_index:end_index] = batch.cache_lengths_tensor
# Copy over adapter indices
adapter_start_index = cumulative_adapter_indices_size
adapter_end_index = (
cumulative_adapter_indices_size
+ batch.adapter_meta.adapter_indices.shape[0]
)
adapter_indices[adapter_start_index:adapter_end_index] = (
batch.adapter_meta.adapter_indices
)
cumulative_adapter_indices_size = adapter_end_index
adapter_set.update(batch.adapter_meta.adapter_set)
adapter_segment_builder.concat(
batch.adapter_meta.adapter_segments,
batch.adapter_meta.segment_indices,
)
else:
if isinstance(batch.input_ids, torch.Tensor):
batch.input_ids = batch.input_ids.view(-1, 1).tolist()
input_ids.extend(batch.input_ids)
prefilling_mask.extend(batch.prefilling_mask)
block_tables.extend(batch.block_tables)
cache_lengths.extend(batch.cache_lengths)
all_input_ids.extend(batch.all_input_ids)
prompt_lengths.extend(batch.prompt_lengths)
input_lengths.extend(batch.input_lengths)
prefix_offsets.extend(batch.prefix_offsets)
read_offsets.extend(batch.read_offsets)
prefill_logprob_tokens.extend(batch.prefill_logprob_tokens)
next_token_chooser_parameters.extend([r.parameters for r in batch.requests])
fsm_grammar_states.extend(batch.next_token_chooser.fsm_grammar_states)
stopping_criterias.extend(batch.stopping_criterias)
top_n_tokens.extend(batch.top_n_tokens)
# Update
cumulative_slots += len(batch.slots)
cumulative_batch_size += len(batch)
next_token_chooser = HeterogeneousNextTokenChooser.from_pb(
next_token_chooser_parameters,
dtype=batches[0].next_token_chooser.dtype,
device=batches[0].next_token_chooser.device,
tokenizer=batches[0].next_token_chooser.tokenizer,
fsm_grammar_states=fsm_grammar_states,
)
# We skip computing the speculative_ids when the batch size is too large, so
# we must check that all batches have them, otherwise they must be discarded
if get_speculate() > 0 and all(b.speculative_ids is not None for b in batches):
speculative_ids = torch.cat([b.speculative_ids for b in batches], dim=0)
else:
speculative_ids = None
if adapter_segment_builder is not None:
adapter_segments, adapter_segment_indices = adapter_segment_builder.build()
adapter_meta = AdapterBatchMetadata(
adapter_indices=adapter_indices,
adapter_set=adapter_set,
adapter_segments=adapter_segments,
segment_indices=adapter_segment_indices,
)
return cls(
batch_id=batches[0].batch_id,
requests=requests,
requests_idx_mapping=requests_idx_mapping,
input_ids=input_ids,
position_ids=position_ids,
cu_seqlen_prefill=None,
prefill_cache_indices=None,
slot_indices=slot_indices,
block_tables=block_tables,
block_tables_tensor=block_tables_tensor,
cache_lengths=cache_lengths,
cache_lengths_tensor=cache_lengths_tensor,
slots=slots,
cu_slots=cu_slots,
max_input_length=max_input_length,
max_current_length=max_current_length,
prefilling=prefilling,
prefilling_mask=prefilling_mask,
prefill_head_indices=None,
prefill_next_token_indices=None,
prefill_cu_outlens=None,
prefill_logprob_tokens=prefill_logprob_tokens,
prompt_lengths=prompt_lengths,
prompt_lengths_tensor=prompt_lengths_tensor,
input_lengths=input_lengths,
input_lengths_tensor=input_lengths_tensor,
prefix_offsets=prefix_offsets,
read_offsets=read_offsets,
all_input_ids=all_input_ids,
all_input_ids_tensor=all_input_ids_tensor,
next_token_chooser=next_token_chooser,
stopping_criterias=stopping_criterias,
top_n_tokens=top_n_tokens,
top_n_tokens_tensor=top_n_tokens_tensor,
num_blocks=num_blocks,
max_blocks=max_blocks,
speculative_ids=speculative_ids,
adapter_meta=adapter_meta,
)
def prepare_for_prefill(self):
# Prepare values if we need to continue prefilling
# Speculation must be ignored while we prefill even with chunking
# it simplifies everything
assert self.speculative_ids is None
device = self.block_tables_tensor.device
if isinstance(self.input_ids, list):
if len(self) > 1:
input_ids = np.concatenate(self.input_ids, dtype=np.int64)
else:
input_ids = self.input_ids[0]
self.input_ids = torch.tensor(input_ids, dtype=torch.int64, device=device)
self.input_lengths_tensor = torch.tensor(
self.input_lengths, dtype=torch.int32, device=device
)
cu_seqlen_prefill = self.input_lengths_tensor.new_zeros(len(self) + 1)
torch.cumsum(self.input_lengths_tensor, out=cu_seqlen_prefill[1:], dim=0)
self.cu_seqlen_prefill = cu_seqlen_prefill.to(torch.int32)
self.cache_lengths_tensor = torch.tensor(
self.cache_lengths, dtype=torch.int32, device=device
)
# If the device supports Triton, we can use a fused kernel
if has_triton():
self.position_ids = torch.empty(
len(self.input_ids), dtype=torch.int32, device=device
)
self.slot_indices = torch.empty(
len(self.input_ids), dtype=torch.int64, device=device
)
cu_slots_gpu = self.cu_slots.to(device)
prepare_position_slot_ids(
self.max_input_length,
self.cache_lengths_tensor,
self.cu_seqlen_prefill,
cu_slots_gpu,
self.position_ids,
self.slot_indices,
)
position_ids = []
slot_indices = []
all_prefill_logprobs = True
no_prefill_logprobs = True
prefill_cu_outlens = [0]
# Cumulative length
cumulative_length = 0
cumulative_slot_tokens = 0
prefill_out_cumulative_length = 0
adapter_indices_list = []
adapter_set = set()
for i, (
r,
cache_length,
input_length,
prompt_length,
request_prefilling,
blocks,
) in enumerate(
zip(
self.requests,
self.cache_lengths,
self.input_lengths,
self.prompt_lengths,
self.prefilling_mask,
self.block_tables,
)
):
next_chunk_length = input_length
if not has_triton():
# Position ids
request_position_ids = torch.arange(
cache_length, cache_length + input_length, dtype=torch.int32
)
position_ids.append(request_position_ids)
if not r.slots:
request_slots = [
s
for b in blocks
for s in range(b * BLOCK_SIZE, (b + 1) * BLOCK_SIZE)
]
else:
request_slots = r.slots
request_slot_indices = torch.arange(
cache_length + cumulative_slot_tokens,
cache_length + cumulative_slot_tokens + input_length,
dtype=torch.int64,
)
slot_indices.append(request_slot_indices)
# Update
cumulative_slot_tokens += len(request_slots)
# Prefill logprobs is ignored if the request is done prefilling
prefill_logprobs = r.prefill_logprobs and request_prefilling
all_prefill_logprobs = all_prefill_logprobs and prefill_logprobs
no_prefill_logprobs = no_prefill_logprobs and not prefill_logprobs
if prefill_logprobs:
prefill_cu_outlens.append(prefill_out_cumulative_length + input_length)
prefill_out_cumulative_length += input_length
else:
prefill_cu_outlens.append(prefill_out_cumulative_length + 1)
prefill_out_cumulative_length += 1
ADAPTER_TO_INDEX = get_adapter_to_index()
if ADAPTER_TO_INDEX:
adapter_index = ADAPTER_TO_INDEX.get(r.adapter_id, 0)
adapter_indices_list.append(
torch.full((next_chunk_length,), adapter_index)
)
adapter_set.add(adapter_index)
# Update
cumulative_length += next_chunk_length
if not all_prefill_logprobs and not no_prefill_logprobs:
prefill_head_indices = []
prefill_next_token_indices = []
# Cumulative length
cumulative_length = 0
prefill_out_cumulative_length = 0
for i, (
r,
input_length,
request_prefilling,
) in enumerate(
zip(
self.requests,
self.input_lengths,
self.prefilling_mask,
)
):
# Prefill logprobs is ignored if the request is done prefilling
prefill_logprobs = r.prefill_logprobs and request_prefilling
if prefill_logprobs:
prefill_head_indices.append(
torch.arange(
cumulative_length,
cumulative_length + input_length,
dtype=torch.int64,
)
)
prefill_next_token_indices.append(
prefill_out_cumulative_length + input_length - 1
)
prefill_out_cumulative_length += input_length
else:
prefill_head_indices.append(
torch.tensor(
[cumulative_length + input_length - 1],
dtype=torch.int64,
)
)
prefill_next_token_indices.append(prefill_out_cumulative_length)
prefill_out_cumulative_length += 1
# Update
cumulative_length += input_length
if len(self) > 1:
if position_ids:
position_ids = torch.cat(position_ids)
if slot_indices:
slot_indices = torch.cat(slot_indices)
else:
if position_ids:
position_ids = position_ids[0]
if slot_indices:
slot_indices = slot_indices[0]
if not has_triton():
self.position_ids = position_ids.to(device)
self.slot_indices = slot_indices.to(device)
self.prefill_cu_outlens = prefill_cu_outlens
self.prefill_cache_indices = None
if all_prefill_logprobs:
prefill_head_indices = None
prefill_next_token_indices = self.cu_seqlen_prefill[1:] - 1
elif no_prefill_logprobs:
prefill_head_indices = self.cu_seqlen_prefill[1:] - 1
prefill_next_token_indices = None
else:
prefill_head_indices = torch.cat(prefill_head_indices).to(device)
prefill_next_token_indices = torch.tensor(
prefill_next_token_indices, dtype=torch.int64, device=device
)
self.prefill_head_indices = prefill_head_indices
self.prefill_next_token_indices = prefill_next_token_indices
if adapter_set:
adapter_indices = torch.cat(adapter_indices_list).to(
dtype=torch.int64, device=device
)
adapter_segments, adapter_segment_indices = find_segments(adapter_indices)
else:
adapter_indices = torch.zeros_like(self.input_ids)
adapter_segments = [0, len(adapter_indices)]
adapter_segment_indices = [len(adapter_indices) - 1]
adapter_segments = torch.tensor(
adapter_segments, dtype=torch.int32, device=device
)
self.adapter_meta = AdapterBatchMetadata(
adapter_indices=adapter_indices,
adapter_set=adapter_set,
adapter_segments=adapter_segments,
segment_indices=adapter_segment_indices,
)
def __len__(self):
return len(self.requests)
ADAPTER_LAYERS = [
"q_proj",
"k_proj",
"v_proj",
"o_proj",
"gate_proj",
"up_proj",
"down_proj",
]
ROW_PARALLEL = {"o_proj", "down_proj", "lm_head"}
class FlashCausalLM(Model):
def __init__(
self,
model_id: str,
model_class,
revision: Optional[str] = None,
quantize: Optional[str] = None,
speculator: Optional[str] = None,
dtype: Optional[torch.dtype] = None,
trust_remote_code: bool = False,
lora_adapter_ids: Optional[list] = [],
tokenizer_class: PreTrainedTokenizerBase = AutoTokenizer,
config_class: PreTrainedTokenizerBase = AutoConfig,
default_dtype=torch.float16,
aliases=None,
# Used for Santacoder override of config
num_kv_heads: Optional[int] = None,
# Deepseek V2 uses different QK and V dims.
head_size: Optional[int] = None,
skip_special_tokens: bool = True,
kv_cache_dtype: Optional[torch.dtype] = None,
support_chunking: bool = True,
):
self.quantize = quantize
self.process_group, rank, world_size = initialize_torch_distributed()
if torch.cuda.is_available():
device = torch.device(f"cuda:{rank}")
dtype = default_dtype if dtype is None else dtype
elif SYSTEM == "ipex":
if hasattr(torch, "xpu") and torch.xpu.is_available():
device = torch.device(f"xpu:{rank}")
dtype = default_dtype if dtype is None else dtype
else:
device = torch.device("cpu")
dtype = torch.bfloat16 if dtype is None else dtype
init_cpu_threads_env(rank_id=rank, world_size=world_size)
else:
raise NotImplementedError(f"{model_class} is only available on GPU")
tokenizer = tokenizer_class.from_pretrained(
model_id,
revision=revision,
padding_side="left",
truncation_side="left",
trust_remote_code=trust_remote_code,
)
try:
generation_config = GenerationConfig.from_pretrained(
model_id, revision=revision, trust_remote_code=trust_remote_code
)
if isinstance(generation_config.eos_token_id, (list, set)):
# TODO Huge hack
tokenizer._eos_token_ids = set(generation_config.eos_token_id)
except Exception:
pass
config = config_class.from_pretrained(
model_id, revision=revision, trust_remote_code=trust_remote_code
)
config.quantize = quantize
config.speculator = speculator
torch.distributed.barrier(group=self.process_group)
weights_loader = get_loader(quantize, model_id, revision)
filenames = weight_files(model_id, revision=revision, extension=".safetensors")
weights = Weights(
filenames,
device,
dtype,
process_group=self.process_group,
aliases=aliases,
weights_loader=weights_loader,
)
prefix = None
model = model_class(prefix, config, weights)
torch.distributed.barrier(group=self.process_group)
# VLM models define the config we care about in their text_config
text_config = getattr(config, "text_config", None)
if text_config is not None:
config = text_config
if getattr(config, "sliding_window", None) is None:
config.sliding_window = None
self.num_layers = config.num_hidden_layers
self.num_heads = config.num_attention_heads // self.process_group.size()
self.config = config
# Validation is done in the model itself
if num_kv_heads is None:
num_kv_heads = getattr(config, "num_key_value_heads", None)
# GPT-2 workaround
if num_kv_heads is None:
num_kv_heads = getattr(config, "n_head", None)
if num_kv_heads is None:
raise ValueError("Cannot get the number of key/value heads")
self.num_kv_heads = (
num_kv_heads // self.process_group.size()
if num_kv_heads > 1
else num_kv_heads
)
assert self.num_kv_heads > 0
if head_size is None:
# Some models use GQA and different sizes for o_proj
# and q_proj, that allows for that.
if getattr(config, "head_dim", None) is not None:
self.head_size = config.head_dim
else:
self.head_size = config.hidden_size // config.num_attention_heads
else:
self.head_size = head_size
self.cuda_graphs = {}
self.kv_cache = []
self.kv_cache_dtype = dtype if kv_cache_dtype is None else kv_cache_dtype
if ATTENTION == "flashinfer":
from text_generation_server.layers.attention.flashinfer import (
create_prefill_state,
create_decode_state,
create_prefill_with_paged_kv_state,
)
self.prefill_state = create_prefill_state(device=device)
self.prefill_with_paged_kv_state = create_prefill_with_paged_kv_state(
device=device
)
self.decode_state = create_decode_state(
device=device,
num_heads=self.num_heads,
num_kv_heads=self.num_kv_heads,
)
super().__init__(
model_id=model_id,
model=model,
tokenizer=tokenizer,
requires_padding=False,
dtype=dtype,
device=device,
rank=rank,
world_size=world_size,
sliding_window=config.sliding_window,
support_chunking=support_chunking,
)
@property
def batch_type(self) -> Type[FlashCausalLMBatch]:
return FlashCausalLMBatch
def init_kv_cache(
self,
num_blocks: int,
num_layers: int,
num_heads: int,
head_size: int,
dtype: torch.dtype,
device: torch.device,
):
self.kv_cache = []
empty_cache()
self.kv_cache = [
KVCache(
num_blocks=num_blocks,
num_heads=num_heads,
head_size=head_size,
dtype=dtype,
device=device,
)
for _ in range(num_layers)
]
def cuda_graph_warmup(self, bs: int, max_s: int, max_bt: int):
max_bs = max(self.cuda_graphs.keys()) if self.cuda_graphs else None
input_lengths = [max_s] * bs
cache_lengths = [0] * bs
if max_bs is None:
input_ids = torch.zeros(bs, dtype=torch.int64, device=self.device)
position_ids = torch.zeros(bs, dtype=torch.int32, device=self.device)
config = getattr(self.model, "config", None)
rope_scaling = getattr(config, "rope_scaling", None) if config else None
if ( # mrope have position_ids per section, if so repeat n times
isinstance(rope_scaling, dict) and rope_scaling["rope_type"] == "mrope"
):
n_sections = len(self.model.config.rope_scaling["mrope_section"])
position_ids = position_ids.unsqueeze(1).repeat(1, n_sections)
slots = torch.arange(bs, dtype=torch.int64, device=self.device)
input_lengths_tensor = (
torch.ones(bs, dtype=torch.int32, device=self.device) * max_s
)
cache_lengths_tensor = torch.zeros(
bs, dtype=torch.int32, device=self.device
)
block_tables = torch.arange(
max_bt, dtype=torch.int32, device=self.device
).repeat(bs)
block_tables = block_tables.reshape((bs, max_bt))
if ATTENTION == "flashinfer":
block_tables = block_tables_to_ragged(
block_tables=block_tables,
input_lengths=input_lengths,
cache_lengths=cache_lengths,
input_lengths_tensor=input_lengths_tensor,
cache_lengths_tensor=cache_lengths_tensor,
max_current_length=max_s,
)
else:
if bs > max_bs:
raise RuntimeError(
"Cuda graphs should be generated in decreasing order size to reduce VRAM usage"
)
input_ids = self.cuda_graphs[max_bs]["input_ids"][:bs]
position_ids = self.cuda_graphs[max_bs]["position_ids"][:bs]
if ATTENTION == "flashinfer":
block_tables = self.cuda_graphs[max_bs]["block_tables"][: bs * max_bt]
else:
block_tables = self.cuda_graphs[max_bs]["block_tables"][:bs]
slots = self.cuda_graphs[max_bs]["slots"][:bs]
input_lengths_tensor = self.cuda_graphs[max_bs]["input_lengths"][:bs]
cache_lengths_tensor = self.cuda_graphs[max_bs]["cache_lengths"][:bs]
if ATTENTION == "flashinfer":
from text_generation_server.layers.attention.flashinfer import (
create_decode_state_cuda_graphs,
)
block_tables_ptr = torch.zeros(
bs + 1, dtype=torch.int32, device=self.device
)
last_page_len = torch.ones(bs, dtype=torch.int32, device=self.device)
state = create_decode_state_cuda_graphs(
device=input_ids.device,
block_tables=block_tables,
block_tables_ptr=block_tables_ptr,
last_page_len=last_page_len,
num_heads=self.num_heads,
num_kv_heads=self.num_kv_heads,
)
else:
state = None
graph = torch.cuda.CUDAGraph()
self.cuda_graphs[bs] = {
"input_ids": input_ids,
"position_ids": position_ids,
"kv_cache": self.kv_cache,
"block_tables": block_tables,
"slots": slots,
"input_lengths": input_lengths_tensor,
"cache_lengths": cache_lengths_tensor,
"state": state,
"graph": graph,
}
torch.cuda.synchronize()
# Run once outside to warmup
with self._forward_context(
block_tables=block_tables,
cu_seqlen_prefill=None,
input_lengths_tensor=input_lengths_tensor,
state=state,
cache_lengths_tensor=cache_lengths_tensor,
):
seqlen = Seqlen(
input_lengths=input_lengths_tensor,
cache_lengths=cache_lengths_tensor,
cu_seqlen_q=None,
max_q=1,
max_k=max_s,
)
self.model.forward(
input_ids=input_ids,
position_ids=position_ids,
cu_seqlen_prefill=None,
kv_cache=self.kv_cache,
block_tables=block_tables,
slots=slots,
seqlen=seqlen,
max_s=max_s,
prefill_cache_indices=None,
lm_head_indices=None,
)
del seqlen
torch.cuda.synchronize()
with torch.cuda.graph(graph, pool=MEM_POOL):
seqlen = Seqlen(
input_lengths=input_lengths_tensor,
cache_lengths=cache_lengths_tensor,
cu_seqlen_q=None,
max_q=1,
max_k=max_s,
)
logits, speculative_logits = self.model.forward(
input_ids=input_ids,
position_ids=position_ids,
cu_seqlen_prefill=None,
kv_cache=self.kv_cache,
block_tables=block_tables,
slots=slots,
seqlen=seqlen,
max_s=max_s,
prefill_cache_indices=None,
lm_head_indices=None,
)
self.cuda_graphs[bs]["logits"] = logits
self.cuda_graphs[bs]["speculative_logits"] = speculative_logits
torch.cuda.synchronize()
def warmup(
self,
batch: FlashCausalLMBatch,
max_input_tokens: Optional[int],
max_total_tokens: Optional[int],
):
# The warmup batch is the biggest batch we could ever receive
self.kv_cache = []
empty_cache()
# Inspired by the original implementation in [vllm](https://github.com/vllm-project/vllm)
# Calculate the number of blocks that can be allocated with the free memory
dtype_size = torch.tensor([], dtype=self.kv_cache_dtype).element_size()
cache_block_size = BLOCK_SIZE * self.num_kv_heads * self.head_size
total_cache_size = self.num_layers * cache_block_size * 2 * dtype_size
try:
self.init_kv_cache(
batch.num_blocks,
self.num_layers,
self.num_kv_heads,
self.head_size,
self.kv_cache_dtype,
self.device,
)
batch_num_blocks = batch.num_blocks
num_tokens = batch.to_pb().current_tokens
if SYSTEM == "rocm" and os.environ.get("PYTORCH_TUNABLEOP_ENABLED", False):
torch.cuda.tunable.tuning_enable(False)
synchronize(self.device)
free_memory = get_free_memory(
self.device, MEMORY_FRACTION * TGI_WIGGLE_ROOM
)
real_free_memory = get_free_memory(self.device, MEMORY_FRACTION)
log_master(
logger.debug,
f"Free memory {free_memory / 1e9:.2f}GB , (real: {real_free_memory / 1e9:.2f}GB",
)
_, _batch, _ = self.generate_token(batch)
except torch.cuda.OutOfMemoryError as e:
raise RuntimeError(
f"Not enough memory to handle {num_tokens} prefill tokens. "
f"You need to decrease `--max-batch-prefill-tokens`"
) from e
synchronize(self.device)
free_memory = get_free_memory(self.device, MEMORY_FRACTION * TGI_WIGGLE_ROOM)
kv_memory = free_memory
num_blocks = (
# Leave 5% for some wiggle room
int(kv_memory // total_cache_size)
# Add batch.num_blocks as we allocated it above, so it is included in the peak memory.
+ batch_num_blocks
)
log_master(logger.info, f"KV-cache blocks: {num_blocks}, size: {BLOCK_SIZE}")
if max_total_tokens is None:
if get_support_chunking():
model_max_length = self.tokenizer.model_max_length
max_position_embeddings = getattr(
self.config, "max_position_embeddings", model_max_length
)
max_total_tokens = min(
num_blocks * BLOCK_SIZE, model_max_length, max_position_embeddings
)
else:
max_total_tokens = sum(batch.cache_lengths)
if max_input_tokens is None:
max_input_tokens = max_total_tokens - 1
del _batch, batch
self.kv_cache = []
empty_cache()
self.init_kv_cache(
num_blocks,
self.num_layers,
self.num_kv_heads,
self.head_size,
self.kv_cache_dtype,
self.device,
)
if SYSTEM == "rocm":
if (
os.environ.get("PYTORCH_TUNABLEOP_ENABLED") is None
or os.environ.get("PYTORCH_TUNABLEOP_ENABLED") == "1"
):
torch.cuda.tunable.enable()
if os.environ.get("PYTORCH_TUNABLEOP_TUNING") != "0":
torch.cuda.tunable.tuning_enable(True)
if os.environ.get("PYTORCH_TUNABLEOP_SEQLENS") is not None:
tuning_sequences = [
int(val)
for val in os.environ["PYTORCH_TUNABLEOP_SEQLENS"].split(",")
]
elif CUDA_GRAPHS is not None:
tuning_sequences = CUDA_GRAPHS
else:
tuning_sequences = [1, 2, 3, 4, 5, 6, 7]
tunableop_filepath = os.path.join(
HUGGINGFACE_HUB_CACHE,
f"tunableop_{self.model_id.replace('/', '-')}_tp{self.world_size}_rank{self.rank}.csv",
)
log_master(
logger.info,
f"PyTorch TunableOp is enabled. The warmup may take several minutes, picking the ROCm optimal matrix multiplication kernel for the target lengths {', '.join([str(seqlen) for seqlen in tuning_sequences])}, with typical 5-8% latency improvement for small sequence lengths. The picked GEMMs are saved in the file {tunableop_filepath}. To disable TunableOp, please launch TGI with `PYTORCH_TUNABLEOP_ENABLED=0`.",
)
torch.cuda.tunable.set_filename(
tunableop_filepath, insert_device_ordinal=False
)
if os.path.isfile(tunableop_filepath):
log_master(
logger.info,
f"The file {tunableop_filepath} already exists and will be reused.",
)
torch.cuda.tunable.read_file(tunableop_filepath)
os.makedirs(HUGGINGFACE_HUB_CACHE, exist_ok=True)
for seqlen in tuning_sequences:
log_master(logger.info, f"Warming up TunableOp for seqlen={seqlen}")
self.tunableop_warmup(seqlen, max_total_tokens)
torch.cuda.tunable.write_file(tunableop_filepath)
if os.environ.get("PYTORCH_TUNABLEOP_TUNING_AFTER_WARMUP") != "1":
torch.cuda.tunable.tuning_enable(False)
else:
log_master(
logger.info,
"PyTorch ROCm TunableOp (https://github.com/pytorch/pytorch/tree/main/aten/src/ATen/cuda/tunable) is disabled. TunableOp brings an additional 5-8% latency improvement for small sequence lengths but requires a warmup. If necessary, please use the environment variable PYTORCH_TUNABLEOP_ENABLED=1 to enable TunableOp.",
)
if CUDA_GRAPHS:
try:
log_master(
logger.info, f"Cuda Graphs are enabled for sizes {CUDA_GRAPHS}"
)
# Warmup cuda graphs
for bs in CUDA_GRAPHS:
synchronize(self.device)
free_memory = get_free_memory(
self.device, MEMORY_FRACTION * TGI_WIGGLE_ROOM
)
log_master(
logger.debug,
f"Free RAM before cuda graph {bs} {free_memory / 1e9:.2f}GB",
)
if self.speculate is None or self.speculate + 1 <= bs:
self.cuda_graph_warmup(bs, max_total_tokens, max_total_tokens)
empty_cache()
synchronize(self.device)
free_memory = get_free_memory(
self.device, MEMORY_FRACTION * TGI_WIGGLE_ROOM
)
log_master(
logger.debug,
f"Free RAM after cuda graphs {free_memory / 1e9:.2f}GB",
)
except torch.cuda.OutOfMemoryError:
logger.exception("Decode cuda graph warmup failed")
else:
log_master(
logger.info, f"Cuda Graphs are disabled (CUDA_GRAPHS={CUDA_GRAPHS})."
)
assert max_input_tokens is not None
assert max_total_tokens is not None
return int(num_blocks * BLOCK_SIZE), max_input_tokens, max_total_tokens
def tunableop_warmup(self, seqlen: int, max_bt: int):
input_ids = torch.zeros(seqlen, dtype=torch.int64, device=self.device)
position_ids = torch.zeros(seqlen, dtype=torch.int32, device=self.device)
slots = torch.arange(seqlen, dtype=torch.int64, device=self.device)
# Dummy value, some models (starcoder2) don't accept `None`.
input_lengths = torch.ones(seqlen, dtype=torch.int32, device=self.device)
cache_lengths_tensor = torch.zeros(
seqlen, dtype=torch.int32, device=self.device
)
cu_seqlen_prefill = torch.tensor(
[0, seqlen], device=self.device, dtype=torch.int32
)
max_s = seqlen
block_tables = torch.arange(
max_bt, dtype=torch.int32, device=self.device
).repeat(seqlen)
block_tables = block_tables.reshape((seqlen, max_bt))
seqlen = Seqlen(
input_lengths=input_lengths,
cache_lengths=cache_lengths_tensor,
max_k=seqlen,
)
# We pass a `cu_seqlen_prefill` in order not to have to deal with paged attention cache allocation/deallocation.
self.model.forward(
input_ids=input_ids,
position_ids=position_ids,
cu_seqlen_prefill=cu_seqlen_prefill,
kv_cache=self.kv_cache,
block_tables=block_tables,
seqlen=seqlen,
slots=slots,
max_s=max_s,
lm_head_indices=None,
prefill_cache_indices=None,
)
def forward(
self, batch: FlashCausalLMBatch, adapter_data: AdapterBatchData
) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:
# Model Forward
if batch.speculative_ids is not None:
input_ids = batch.input_ids
position_ids = batch.position_ids
cu_seqlen_prefill = batch.cu_seqlen_prefill
kv_cache = self.kv_cache
block_tables = batch.block_tables_tensor
slots = batch.slots[batch.slot_indices]
input_lengths = batch.input_lengths_tensor
max_s = batch.max_current_length
lm_head_indices = batch.prefill_head_indices
speculative_ids = batch.speculative_ids
B, speculative_length = speculative_ids.shape
new_length = speculative_length + 1
new_input_ids = torch.cat(
[input_ids.unsqueeze(-1), speculative_ids], dim=1
).reshape(-1)
arange = torch.arange(new_length, device=position_ids.device).unsqueeze(0)
arange_int = arange.to(dtype=torch.int32)
new_position_ids = (
position_ids.unsqueeze(-1).expand(B, new_length) + arange
).view(-1)
# Slots can be discontiguous when prefix caching is enabled, so we need to expand the slot_indices,
# then update the slots with the additional indices to ensure we're grabbing the ones that have been
# allocated
slot_indices = (
batch.slot_indices.unsqueeze(-1).expand(B, new_length) + arange_int
).view(-1)
slots = batch.slots[slot_indices]
input_lengths = (
input_lengths.unsqueeze(-1).expand(B, new_length) + arange_int
).view(-1)
cache_lengths_tensor = (
batch.cache_lengths_tensor.unsqueeze(-1).expand(B, new_length)
).reshape(-1)
# Add Copy the block tables for all members
block_tables = (
block_tables.unsqueeze(1)
.expand(B, new_length, -1)
.reshape(B * new_length, -1)
.contiguous()
)
max_s = max_s + speculative_length
input_ids = new_input_ids
position_ids = new_position_ids
else:
input_ids = batch.input_ids
position_ids = batch.position_ids
cu_seqlen_prefill = batch.cu_seqlen_prefill
kv_cache = self.kv_cache
block_tables = batch.block_tables_tensor
slots = batch.slots[batch.slot_indices]
input_lengths = batch.input_lengths_tensor
cache_lengths_tensor = batch.cache_lengths_tensor
max_s = batch.max_current_length
lm_head_indices = batch.prefill_head_indices
bs = input_ids.shape[0]
sorted_padded_bs = sorted([k for k in self.cuda_graphs.keys() if k >= bs])
if sorted_padded_bs:
# Get associated cuda graph
cuda_graph = self.cuda_graphs[sorted_padded_bs[0]]
else:
cuda_graph = None
if cu_seqlen_prefill is not None or cuda_graph is None:
if ATTENTION == "flashinfer":
block_tables = block_tables_to_ragged(
block_tables=block_tables,
input_lengths=batch.input_lengths,
cache_lengths=batch.cache_lengths,
input_lengths_tensor=batch.input_lengths_tensor,
cache_lengths_tensor=batch.cache_lengths_tensor,
max_current_length=batch.max_current_length,
)
with self._forward_context(
block_tables=block_tables,
cu_seqlen_prefill=cu_seqlen_prefill,
input_lengths_tensor=input_lengths,
cache_lengths_tensor=cache_lengths_tensor,
):
seqlen = Seqlen(
input_lengths=input_lengths,
cache_lengths=cache_lengths_tensor,
cu_seqlen_q=cu_seqlen_prefill,
max_q=batch.max_input_length,
max_k=batch.max_current_length,
)
logits, speculative_logits = self.model.forward(
input_ids=input_ids,
position_ids=position_ids,
cu_seqlen_prefill=cu_seqlen_prefill,
kv_cache=kv_cache,
block_tables=block_tables,
slots=slots,
seqlen=seqlen,
max_s=max_s,
prefill_cache_indices=batch.prefill_cache_indices,
lm_head_indices=lm_head_indices,
adapter_data=adapter_data,
)
if batch.prefill_cache_indices is not None:
batch.prefill_cache_indices = None
return logits, speculative_logits
# Copy inputs to the static inputs of the cuda graph
# Static inputs are potentially padded
cuda_graph["input_ids"][: input_ids.shape[0]] = input_ids
cuda_graph["position_ids"][: position_ids.shape[-1]] = position_ids
if ATTENTION == "flashinfer":
block_tables = block_tables_to_ragged(
block_tables=block_tables,
input_lengths=batch.input_lengths,
cache_lengths=batch.cache_lengths,
input_lengths_tensor=batch.input_lengths_tensor,
cache_lengths_tensor=batch.cache_lengths_tensor,
max_current_length=batch.max_current_length,
)
# assert block_tables.shape[0] >= slots.shape[0]
cuda_graph["block_tables"][: block_tables.shape[0]] = block_tables
else:
cuda_graph["block_tables"][
: block_tables.shape[0], : block_tables.shape[1]
] = block_tables
# XXX: This is working only because block 0 is reserved for the healthcheck
# so it doesn't matter if we override it with bogus values.
cuda_graph["slots"].fill_(0)
cuda_graph["slots"][: slots.shape[0]] = slots
cuda_graph["input_lengths"].zero_()
cuda_graph["input_lengths"][: input_lengths.shape[0]] = input_lengths
cuda_graph["cache_lengths"].zero_()
cuda_graph["cache_lengths"][
: cache_lengths_tensor.shape[0]
] = cache_lengths_tensor
with self._forward_context(
block_tables=cuda_graph["block_tables"],
cu_seqlen_prefill=None,
input_lengths_tensor=cuda_graph["input_lengths"],
cache_lengths_tensor=cuda_graph["cache_lengths"],
state=cuda_graph["state"],
):
# Replay the graph
cuda_graph["graph"].replay()
# Slice output to the correct shape
speculative_logits = (
cuda_graph["speculative_logits"][:bs]
if cuda_graph["speculative_logits"] is not None
else None
)
logits = cuda_graph["logits"][:bs]
return logits, speculative_logits
@tracer.start_as_current_span("generate_token")
def generate_token(
self, batch: FlashCausalLMBatch
) -> Tuple[List[Generation], Optional[FlashCausalLMBatch], Tuple[int, int]]:
start = time.time_ns()
prefill = batch.prefilling
if prefill:
batch.prepare_for_prefill()
if hasattr(self, "set_inputs_embeds") and callable(self.set_inputs_embeds):
self.set_inputs_embeds(batch)
prefill_logprobs = batch.prefill_next_token_indices is not None
# Update adapter indices for speculative tokens (if present)
adapter_meta = batch.adapter_meta
if batch.speculative_ids is not None:
B, speculative_length = batch.speculative_ids.shape
new_length = speculative_length + 1
adapter_indices = (
adapter_meta.adapter_indices.unsqueeze(-1)
.expand(B, new_length)
.reshape(-1)
)
adapter_segments = adapter_meta.adapter_segments * new_length
adapter_meta = AdapterBatchMetadata(
adapter_indices=adapter_indices,
adapter_set=adapter_meta.adapter_set,
adapter_segments=adapter_segments,
segment_indices=adapter_meta.segment_indices,
)
# Assign pointers to adapter weights
# TODO(travis): don't update this if indices haven't changed
adapter_data = AdapterBatchData.from_meta(
adapter_meta,
self.layer_to_adapter_weights,
prefill,
batch.prefill_head_indices,
)
out, speculative_logits = self.forward(batch, adapter_data)
if prefill:
next_token_logits = (
out[batch.prefill_next_token_indices] if prefill_logprobs else out
)
if speculative_logits is not None:
speculative_logits = (
speculative_logits[batch.prefill_next_token_indices]
if prefill_logprobs
else speculative_logits
)
if len(batch) > 1 and prefill_logprobs:
# We create the prefill_tokens_indices tensor that will be used to gather prefill logprobs
# When batch == 1, we will just use the batch.input_ids values directly
prefill_tokens_indices = batch.input_ids.new_zeros(len(out))
else:
prefill_logprobs = None
next_token_logits = out
finished_prefilling = True
next_chunk_lengths = []
current_prefilling_mask = batch.prefilling_mask
if prefill:
if get_support_chunking():
next_prefilling_mask = []
# Budget in tokens for the next batch
# We remove (len(batch) - 1) to always have enough space for at least a single decode
# for the remaining requests -1 because the first request does not need to be removed from the budget
# (ex: you have one request in the batch, you want it to take the full budget not budget -1)
batch_budget = get_max_prefill_tokens() - (len(batch) - 1)
# We reverse to prioritize older requests
# zip() is not reversible so reverse the underlying lists instead
for cache_length, input_length, prompt_length in zip(
reversed(batch.cache_lengths),
reversed(batch.input_lengths),
reversed(batch.prompt_lengths),
):
remaining_prefill_tokens = max(
prompt_length - cache_length - input_length, 0
)
if remaining_prefill_tokens > 0:
next_chunk_length = max(
min(remaining_prefill_tokens, batch_budget), 1
)
batch_budget -= next_chunk_length
finished_prefilling = False
next_prefilling_mask.append(True)
else:
# FIXME: use true number of accepted tokens instead of 1
# Since speculation will be turned off, this is always true
next_chunk_length = 1
next_prefilling_mask.append(False)
next_chunk_lengths.append(next_chunk_length)
# Reverse back the obtained values²
next_chunk_lengths.reverse()
next_prefilling_mask.reverse()
else:
# The model does not support chunking
# We know we only do a single prefill
finished_prefilling = True
next_prefilling_mask = [False] * len(batch)
batch.prefilling = not finished_prefilling
batch.prefilling_mask = next_prefilling_mask
speculate = get_speculate()
(
next_input_ids,
next_token_logprobs,
logprobs,
accepted_ids,
speculative_ids,
) = batch.next_token_chooser(
batch.all_input_ids_tensor[:, : batch.max_current_length],
next_token_logits,
speculate,
batch.speculative_ids,
speculative_logits,
)
batch_top_token_ids, batch_top_token_logprobs = batch_top_tokens(
batch.top_n_tokens, batch.top_n_tokens_tensor, logprobs, accepted_ids
)
# Since we are done prefilling, all the tensors that were concatenating values for all the requests
# instantly become of shape [BATCH_SIZE]
if prefill and finished_prefilling:
indices = batch.cu_seqlen_prefill[1:] - 1
batch.position_ids = batch.position_ids[indices]
batch.slot_indices = batch.slot_indices[indices]
batch.adapter_meta.adapter_indices = batch.adapter_meta.adapter_indices[
indices
]
# Zipped iterator
iterator = zip(
batch.requests,
batch.prompt_lengths,
batch.cache_lengths,
batch.input_lengths,
batch.all_input_ids,
accepted_ids,
current_prefilling_mask,
batch.prefilling_mask,
)
# We do two for loops as the first one can run completely asynchronously from the GPU while for the second
# one, we need to first do a GPU <-> CPU sync
# It is faster if we delay this sync for the maximum amount of time
# For each member of the batch
# Cumulative length
cu_accepted_ids = accepted_ids.new_zeros(accepted_ids.shape[0] + 1)
torch.cumsum(accepted_ids, dim=0, out=cu_accepted_ids[1:])
cumulative_length = 0
for i, (
request,
prompt_length,
cache_length,
input_length,
all_input_ids,
n_accepted_ids,
request_was_prefilling,
request_is_prefilling,
) in enumerate(iterator):
# Used to gather prefill logprobs
# Copy batch.all_input_ids_tensor to prefill_token_indices
if request.prefill_logprobs and request_was_prefilling:
# Indexing metadata
out_start_index = batch.prefill_cu_outlens[i]
out_end_index = batch.prefill_cu_outlens[i + 1]
# Logprobs generated by the model are for the next token
# So we need to translate the id tensor by 1
ids = batch.all_input_ids_tensor[
i, cache_length + 1 : cache_length + input_length + 1
]
if len(batch) > 1:
prefill_tokens_indices[out_start_index:out_end_index] = ids
else:
# Set prefill_tokens_indices to the correct slice
prefill_tokens_indices = ids
# If the device does not support triton, we copy one by one
if not request_is_prefilling and not has_triton():
# Only save tokens if we are done prefilling for this request
batch.all_input_ids_tensor[
i,
batch.cache_lengths_tensor[i]
+ batch.input_lengths[i] : batch.cache_lengths_tensor[i]
+ batch.input_lengths[i]
+ accepted_ids[i],
] = next_input_ids[cu_accepted_ids[i] : cu_accepted_ids[i + 1]]
cumulative_length += input_length
# If the device support triton, we can use a fused kernel
if has_triton():
copy_next_input_ids_inplace(
speculate + 1,
batch.all_input_ids_tensor,
batch.cache_lengths_tensor,
batch.input_lengths_tensor,
batch.prompt_lengths_tensor,
next_input_ids,
cu_accepted_ids,
)
# Update values
# These values can be updated without a GPU -> CPU sync
if not prefill or (prefill and finished_prefilling):
batch.input_ids = next_input_ids[cu_accepted_ids[1:] - 1]
batch.speculative_ids = speculative_ids
if batch.position_ids.dim() == 2:
# Qwen2_vl case:
batch.position_ids += accepted_ids.unsqueeze(-1)
else:
batch.position_ids += accepted_ids
batch.cache_lengths_tensor += batch.input_lengths_tensor + accepted_ids - 1
batch.input_lengths_tensor = torch.ones_like(batch.input_lengths_tensor)
batch.slot_indices += accepted_ids
if prefill and prefill_logprobs:
# Get prefill logprobs with inplace softmax (avoid copying the `out` tensor (max_batch_prefill_tokens * vocab_size))
torch.log_softmax(out, -1, out=out)
prefill_logprobs_tensor = out
prefill_logprobs = torch.gather(
prefill_logprobs_tensor, 1, prefill_tokens_indices.view(-1, 1)
)
# GPU <-> CPU sync
prefill_logprobs = prefill_logprobs.view(-1).tolist()
# Does a GPU <-> CPU sync internally
if prefill and finished_prefilling:
# adjust segment lengths to account for all request lengths being 1 during decoding
adapter_segments, _ = find_segments(batch.adapter_meta.adapter_indices)
batch.adapter_meta.adapter_segments = torch.tensor(
adapter_segments,
dtype=torch.int32,
device=batch.adapter_meta.adapter_segments.device,
)
# GPU <-> CPU sync
next_token_logprobs = next_token_logprobs.tolist()
next_token_ids = next_input_ids.tolist()
accepted_ids = accepted_ids.tolist()
# Update values if we need to continue prefilling
# This represents the `else` case of the `Update values` if above
# but since this require the `next_token_ids` to be on CPU, it is better to do it here
if prefill and not finished_prefilling:
# Speculation must be ignored while we prefill even with chunking
# it simplifies everything
assert batch.speculative_ids is None
all_postfix_ids = []
for i, (
request_prefilling,
next_token_id,
all_input_ids,
cache_length,
input_length,
next_chunk_length,
) in enumerate(
zip(
batch.prefilling_mask,
next_token_ids,
batch.all_input_ids,
batch.cache_lengths,
batch.input_lengths,
next_chunk_lengths,
)
):
if request_prefilling:
next_cache_length = cache_length + input_length
# Get new prompt IDs to prefill
postfix_ids = all_input_ids[
next_cache_length : next_cache_length + next_chunk_length
]
else:
# This request is done prefilling, the new id is the one selected the sampling method
postfix_ids = [next_token_id]
all_postfix_ids.append(postfix_ids)
batch.input_ids = all_postfix_ids
start_decode = time.time_ns()
# Results
generations: List[Generation] = []
stopped = True
# Zipped iterator
iterator = zip(
batch.requests,
batch.prompt_lengths,
batch.cache_lengths,
batch.input_lengths,
batch.prefix_offsets,
batch.read_offsets,
batch.stopping_criterias,
batch.all_input_ids,
batch.next_token_chooser.do_sample,
batch.next_token_chooser.seeds,
batch.top_n_tokens,
current_prefilling_mask,
batch.prefilling_mask,
accepted_ids,
batch_top_token_ids,
batch_top_token_logprobs,
)
# Reset max_input_length
batch.max_input_length = 0
# For each member of the batch
index = 0
for i, (
request,
prompt_length,
cache_length,
input_length,
prefix_offset,
read_offset,
stopping_criteria,
all_input_ids,
do_sample,
seed,
top_n_tokens,
request_was_prefilling,
request_is_prefilling,
n_accepted_ids,
top_token_ids,
top_token_logprobs,
) in enumerate(iterator):
# Compute logprobs first as, even though we might skip the token,
# it can still be required to compute the logprobs
# modulo on request.id as it is robust to batch.filter whereas the index in the batch is not and we need
# this state to be stable
if request.id % self.world_size == self.rank:
# Prefill
if request_was_prefilling and request.prefill_logprobs:
out_start_index = batch.prefill_cu_outlens[i]
out_end_index = batch.prefill_cu_outlens[i + 1]
if not request_is_prefilling:
# The request is dones prefilling, meaning that we started generating new tokens
# The last logprob is a logprob for a generated token that was not part of the prompt
# We need to remove it
out_end_index -= 1
request_prefill_logprobs = prefill_logprobs[
out_start_index:out_end_index
]
# Logprobs generated by the model are for the next token
# So we need to translate the id tensor by 1
prefill_token_ids = all_input_ids[
cache_length + 1 : cache_length + input_length + 1
]
past_prefill_logprob_tokens = batch.prefill_logprob_tokens[i]
if past_prefill_logprob_tokens is None:
# add nan for cached prompt tokens/first token
request_prefill_logprobs = [float("nan")] * (
cache_length + 1
) + request_prefill_logprobs
prefill_token_ids = (
all_input_ids[: cache_length + 1] + prefill_token_ids
)
prefill_texts = self.tokenizer.batch_decode(
prefill_token_ids,
clean_up_tokenization_spaces=False,
skip_special_tokens=False,
)
prefill_logprob_tokens = Tokens(
prefill_token_ids,
request_prefill_logprobs,
prefill_texts,
is_special=[],
)
if past_prefill_logprob_tokens is not None:
prefill_logprob_tokens = (
past_prefill_logprob_tokens + prefill_logprob_tokens
)
batch.prefill_logprob_tokens[i] = prefill_logprob_tokens
else:
batch.prefill_logprob_tokens[i] = None
# If it is, the tokens we decoded should be ignored
if request_is_prefilling:
# Make sure that we do not stop as even though this request did not create a token, it is still
# processing
stopped = False
new_input_length = next_chunk_lengths[i]
new_cache_length = cache_length + input_length
else:
new_input_length = 1
new_cache_length = cache_length + input_length + n_accepted_ids - 1
# Append next token to all tokens
next_token_texts = []
left = 0
if n_accepted_ids > 1:
log_master(logger.debug, f"speculated ids {n_accepted_ids - 1}")
current_stopped = False
for j in range(index, index + n_accepted_ids):
# Generated token
next_token_id = next_token_ids[j]
all_input_ids.append(next_token_id)
next_token_text, prefix_offset, read_offset = self.decode_token(
all_input_ids,
prefix_offset,
read_offset,
)
next_token_texts.append(next_token_text)
stop, reason = stopping_criteria(
next_token_id,
next_token_text,
)
if stop:
left = index + n_accepted_ids - j - 1
current_stopped = True
break
else:
current_stopped = False
stopped = stopped and current_stopped
_next_token_ids = next_token_ids[index : index + n_accepted_ids - left]
_next_token_logprobs = next_token_logprobs[
index : index + n_accepted_ids - left
]
# Shard generations
# All generations will be appended in the rust sharded client
if request.id % self.world_size == self.rank:
if stop:
# Decode generated tokens
output_text, _, _ = self.decode_token(
all_input_ids,
prefix_offset=len(all_input_ids)
- stopping_criteria.current_tokens
- 1,
read_offset=len(all_input_ids)
- stopping_criteria.current_tokens,
skip_special_tokens=True,
)
generated_text = GeneratedText(
output_text,
stopping_criteria.current_tokens,
reason,
seed if do_sample else None,
)
else:
generated_text = None
if top_n_tokens > 0:
all_top_tokens = []
for top_token_ids, top_token_logprobs in zip(
top_token_ids, top_token_logprobs
):
toptoken_texts = self.tokenizer.batch_decode(
top_token_ids,
clean_up_tokenization_spaces=False,
skip_special_tokens=False,
)
special_toptokens = [
token_id in self.all_special_ids
for token_id in top_token_ids
]
top_tokens = Tokens(
top_token_ids,
top_token_logprobs,
toptoken_texts,
special_toptokens,
)
all_top_tokens.append(top_tokens)
top_tokens = all_top_tokens
else:
top_tokens = None
generation = Generation(
request.id,
batch.prefill_logprob_tokens[i],
Tokens(
_next_token_ids,
_next_token_logprobs,
next_token_texts,
[nid in self.all_special_ids for nid in _next_token_ids],
),
generated_text,
top_tokens,
)
generations.append(generation)
# accept each new token for this specific request since we may
# have more than one new token per request with speculative decoding
for next_token_id in _next_token_ids:
batch.next_token_chooser = (
batch.next_token_chooser.advance_grammar_single(
i, next_token_id
)
)
# Update values
index += n_accepted_ids
batch.cache_lengths[i] = new_cache_length
batch.max_input_length = max(batch.max_input_length, new_input_length)
batch.input_lengths[i] = new_input_length
current_length = new_cache_length + new_input_length
batch.max_current_length = max(batch.max_current_length, current_length)
batch.prefix_offsets[i] = prefix_offset
batch.read_offsets[i] = read_offset
batch.all_input_ids[i] = all_input_ids
if stopped:
# No need to return a batch if we know that all requests stopped
forward_ns = start_decode - start
decode_ns = time.time_ns() - start_decode
return generations, None, (forward_ns, decode_ns)
if prefill and finished_prefilling:
# We do not need prefill tensors anymore
batch.cu_seqlen_prefill = None
batch.prefill_cache_indices = None
batch.prefill_cu_outlens = None
batch.prefill_head_indices = None
batch.prefill_next_token_indices = None
forward_ns = start_decode - start
decode_ns = time.time_ns() - start_decode
return generations, batch, (forward_ns, decode_ns)
def _forward_context(
self,
*,
block_tables: torch.Tensor,
cu_seqlen_prefill: Optional[torch.Tensor],
input_lengths_tensor: torch.Tensor,
cache_lengths_tensor: torch.Tensor,
state: Optional[Any] = None,
attention_mask: Optional[torch.Tensor] = None,
) -> ContextManager:
if ATTENTION != "flashinfer":
return nullcontext()
from text_generation_server.layers.attention.flashinfer import (
use_decode_state,
use_prefill_with_paged_kv_state,
)
if cu_seqlen_prefill is not None:
return use_prefill_with_paged_kv_state(
state=(
state if state is not None else self.prefill_with_paged_kv_state
),
block_tables=block_tables,
cu_seqlens=cu_seqlen_prefill,
custom_mask=attention_mask,
input_lengths=input_lengths_tensor + cache_lengths_tensor,
num_heads=self.num_heads,
num_kv_heads=self.num_kv_heads,
head_size=self.head_size,
page_size=BLOCK_SIZE,
kv_dtype=self.kv_cache_dtype,
q_dtype=self.dtype,
)
else:
assert input_lengths_tensor is not None
return use_decode_state(
state=state if state is not None else self.decode_state,
input_lengths=input_lengths_tensor + cache_lengths_tensor,
block_tables=block_tables,
num_heads=self.num_heads,
num_kv_heads=self.num_kv_heads,
head_size=self.head_size,
page_size=BLOCK_SIZE,
kv_cache_dtype=self.kv_cache_dtype,
q_dtype=self.dtype,
)
| text-generation-inference/server/text_generation_server/models/flash_causal_lm.py/0 | {
"file_path": "text-generation-inference/server/text_generation_server/models/flash_causal_lm.py",
"repo_id": "text-generation-inference",
"token_count": 51272
} | 301 |
from text_generation_server.utils.convert import convert_file, convert_files
from text_generation_server.utils.dist import initialize_torch_distributed
from text_generation_server.utils.weights import Weights
from text_generation_server.utils.peft import download_and_unload_peft
from text_generation_server.utils.hub import (
weight_files,
weight_hub_files,
download_weights,
EntryNotFoundError,
LocalEntryNotFoundError,
RevisionNotFoundError,
)
from text_generation_server.utils.tokens import (
NextTokenChooser,
HeterogeneousNextTokenChooser,
StoppingCriteria,
StopSequenceCriteria,
FinishReason,
Sampling,
Greedy,
)
__all__ = [
"convert_file",
"convert_files",
"initialize_torch_distributed",
"weight_files",
"weight_hub_files",
"download_weights",
"download_and_unload_peft",
"EntryNotFoundError",
"HeterogeneousNextTokenChooser",
"LocalEntryNotFoundError",
"RevisionNotFoundError",
"Greedy",
"NextTokenChooser",
"Sampling",
"StoppingCriteria",
"StopSequenceCriteria",
"FinishReason",
"Weights",
]
| text-generation-inference/server/text_generation_server/utils/__init__.py/0 | {
"file_path": "text-generation-inference/server/text_generation_server/utils/__init__.py",
"repo_id": "text-generation-inference",
"token_count": 417
} | 302 |
target
.yarn | tokenizers/bindings/node/.prettierignore/0 | {
"file_path": "tokenizers/bindings/node/.prettierignore",
"repo_id": "tokenizers",
"token_count": 5
} | 303 |
{
"name": "tokenizers-win32-ia32-msvc",
"version": "0.13.4-rc1",
"os": [
"win32"
],
"cpu": [
"ia32"
],
"main": "tokenizers.win32-ia32-msvc.node",
"files": [
"tokenizers.win32-ia32-msvc.node"
],
"description": "Tokenizers platform specific bindings",
"keywords": [
"napi-rs",
"NAPI",
"N-API",
"Rust",
"node-addon",
"node-addon-api"
],
"license": "MIT",
"engines": {
"node": ">= 10"
},
"publishConfig": {
"registry": "https://registry.npmjs.org/",
"access": "public"
},
"repository": "tokenizers"
} | tokenizers/bindings/node/npm/win32-ia32-msvc/package.json/0 | {
"file_path": "tokenizers/bindings/node/npm/win32-ia32-msvc/package.json",
"repo_id": "tokenizers",
"token_count": 277
} | 304 |
use crate::decoders::Decoder;
use crate::encoding::{JsEncoding, JsTruncationDirection, JsTruncationStrategy};
use crate::models::Model;
use crate::normalizers::Normalizer;
use crate::pre_tokenizers::PreTokenizer;
use crate::processors::Processor;
use crate::tasks::tokenizer::{DecodeBatchTask, DecodeTask, EncodeBatchTask, EncodeTask};
use crate::trainers::Trainer;
use std::collections::HashMap;
use tokenizers::Model as ModelTrait;
use napi::bindgen_prelude::*;
use napi_derive::napi;
use std::sync::{Arc, RwLock};
use tokenizers as tk;
#[napi]
#[derive(Default)]
pub enum PaddingDirection {
#[default]
Left,
Right,
}
impl From<PaddingDirection> for tk::PaddingDirection {
fn from(w: PaddingDirection) -> Self {
match w {
PaddingDirection::Left => tk::PaddingDirection::Left,
PaddingDirection::Right => tk::PaddingDirection::Right,
}
}
}
impl TryFrom<String> for PaddingDirection {
type Error = Error;
fn try_from(w: String) -> Result<Self> {
match w.as_str() {
"left" => Ok(PaddingDirection::Left),
"right" => Ok(PaddingDirection::Right),
s => Err(Error::from_reason(format!(
"{s:?} is not a valid direction"
))),
}
}
}
#[napi(object)]
#[derive(Default)]
pub struct PaddingOptions {
pub max_length: Option<u32>,
pub direction: Option<Either<String, PaddingDirection>>,
pub pad_to_multiple_of: Option<u32>,
pub pad_id: Option<u32>,
pub pad_type_id: Option<u32>,
pub pad_token: Option<String>,
}
impl TryFrom<PaddingOptions> for tk::PaddingParams {
type Error = Error;
fn try_from(value: PaddingOptions) -> Result<Self> {
let direction = match value.direction {
Some(either) => match either {
Either::A(string) => {
let direction: PaddingDirection = string.try_into()?;
direction.into()
}
Either::B(direction) => direction.into(),
},
None => tk::PaddingDirection::Right,
};
Ok(Self {
pad_to_multiple_of: value.pad_to_multiple_of.map(|s| s as usize),
pad_id: value.pad_id.unwrap_or_default(),
pad_type_id: value.pad_type_id.unwrap_or_default(),
pad_token: value.pad_token.unwrap_or("[PAD]".to_string()),
direction,
strategy: match value.max_length {
Some(length) => tk::PaddingStrategy::Fixed(length as usize),
None => tk::PaddingStrategy::BatchLongest,
},
})
}
}
#[napi(object)]
#[derive(Default)]
pub struct EncodeOptions {
pub is_pretokenized: Option<bool>,
pub add_special_tokens: Option<bool>,
}
#[derive(Default)]
struct EncodeOptionsDef {
// TODO
// is_pretokenized: bool,
add_special_tokens: bool,
}
impl From<EncodeOptions> for EncodeOptionsDef {
fn from(value: EncodeOptions) -> Self {
EncodeOptionsDef {
// TODO
// is_pretokenized: value.is_pretokenized.unwrap_or(false),
add_special_tokens: value.add_special_tokens.unwrap_or(true),
}
}
}
#[napi(object)]
#[derive(Default)]
pub struct TruncationOptions {
pub max_length: Option<u32>,
pub strategy: Option<JsTruncationStrategy>,
pub direction: Option<Either<String, JsTruncationDirection>>,
pub stride: Option<u32>,
}
impl TryFrom<TruncationOptions> for tk::TruncationParams {
type Error = Error;
fn try_from(value: TruncationOptions) -> Result<Self> {
let direction = match value.direction {
Some(either) => match either {
Either::A(string) => {
let direction: JsTruncationDirection = string.try_into()?;
direction.into()
}
Either::B(direction) => direction.into(),
},
None => Default::default(),
};
Ok(Self {
max_length: value.max_length.unwrap_or(0) as usize,
strategy: value.strategy.map(|s| s.into()).unwrap_or_default(),
direction,
stride: value.stride.unwrap_or_default() as usize,
})
}
}
#[napi(object)]
pub struct AddedTokenOptions {
pub single_word: Option<bool>,
pub left_strip: Option<bool>,
pub right_strip: Option<bool>,
pub normalized: Option<bool>,
}
#[napi]
#[derive(Clone)]
pub struct AddedToken {
token: tk::AddedToken,
}
#[napi]
impl AddedToken {
#[napi(constructor)]
pub fn from(token: String, is_special: bool, options: Option<AddedTokenOptions>) -> Self {
let mut token = tk::AddedToken::from(token, is_special);
if let Some(options) = options {
if let Some(sw) = options.single_word {
token = token.single_word(sw);
}
if let Some(ls) = options.left_strip {
token = token.lstrip(ls);
}
if let Some(rs) = options.right_strip {
token = token.rstrip(rs);
}
if let Some(n) = options.normalized {
token = token.normalized(n);
}
}
Self { token }
}
#[napi]
pub fn get_content(&self) -> String {
self.token.content.clone()
}
}
impl From<AddedToken> for tk::AddedToken {
fn from(v: AddedToken) -> Self {
v.token
}
}
type RsTokenizer = tk::TokenizerImpl<Model, Normalizer, PreTokenizer, Processor, Decoder>;
#[napi]
#[derive(Clone)]
pub struct Tokenizer {
pub(crate) tokenizer: Arc<RwLock<RsTokenizer>>,
}
#[napi]
impl Tokenizer {
#[napi(constructor)]
pub fn new(model: &Model) -> Self {
Self {
tokenizer: Arc::new(RwLock::new(tk::TokenizerImpl::new((*model).clone()))),
}
}
#[napi]
pub fn set_pre_tokenizer(&mut self, pre_tokenizer: &PreTokenizer) {
self
.tokenizer
.write()
.unwrap()
.with_pre_tokenizer(Some((*pre_tokenizer).clone()));
}
#[napi]
pub fn set_decoder(&mut self, decoder: &Decoder) {
self
.tokenizer
.write()
.unwrap()
.with_decoder(Some((*decoder).clone()));
}
#[napi]
pub fn set_model(&mut self, model: &Model) {
self.tokenizer.write().unwrap().with_model((*model).clone());
}
#[napi]
pub fn set_post_processor(&mut self, post_processor: &Processor) {
self
.tokenizer
.write()
.unwrap()
.with_post_processor(Some((*post_processor).clone()));
}
#[napi]
pub fn set_normalizer(&mut self, normalizer: &Normalizer) {
self
.tokenizer
.write()
.unwrap()
.with_normalizer(Some((*normalizer).clone()));
}
#[napi]
pub fn save(&self, path: String, pretty: Option<bool>) -> Result<()> {
let pretty = pretty.unwrap_or(false);
self
.tokenizer
.read()
.unwrap()
.save(path, pretty)
.map_err(|e| Error::from_reason(format!("{e}")))
}
#[napi]
pub fn add_added_tokens(&mut self, tokens: Vec<&AddedToken>) -> u32 {
let tokens: Vec<_> = tokens
.into_iter()
.map(|tok| (*tok).clone().into())
.collect();
self.tokenizer.write().unwrap().add_tokens(&tokens) as u32
}
#[napi]
pub fn add_tokens(&mut self, tokens: Vec<String>) -> u32 {
let tokens: Vec<_> = tokens
.into_iter()
.map(|tok| tk::AddedToken::from(tok, false))
.collect();
self.tokenizer.write().unwrap().add_tokens(&tokens) as u32
}
#[napi(ts_return_type = "Promise<JsEncoding>")]
pub fn encode(
&self,
#[napi(ts_arg_type = "InputSequence")] sentence: String,
#[napi(ts_arg_type = "InputSequence | null")] pair: Option<String>,
encode_options: Option<EncodeOptions>,
) -> AsyncTask<EncodeTask<'static>> {
let options: EncodeOptionsDef = encode_options.unwrap_or_default().into();
let input: tk::EncodeInput = match pair {
Some(pair) => (sentence, pair).into(),
None => sentence.into(),
};
AsyncTask::new(EncodeTask {
tokenizer: (*self).clone(),
input: Some(input),
add_special_tokens: options.add_special_tokens,
})
}
#[napi(ts_return_type = "Promise<JsEncoding[]>")]
pub fn encode_batch(
&self,
#[napi(ts_arg_type = "EncodeInput[]")] sentences: Vec<String>,
encode_options: Option<EncodeOptions>,
) -> AsyncTask<EncodeBatchTask<'static>> {
let options: EncodeOptionsDef = encode_options.unwrap_or_default().into();
let inputs: Vec<tk::EncodeInput> = sentences
.into_iter()
.map(|sentence| sentence.into())
.collect();
AsyncTask::new(EncodeBatchTask {
tokenizer: (*self).clone(),
inputs: Some(inputs),
add_special_tokens: options.add_special_tokens,
})
}
#[napi(ts_return_type = "Promise<string>")]
pub fn decode(&self, ids: Vec<u32>, skip_special_tokens: bool) -> AsyncTask<DecodeTask> {
AsyncTask::new(DecodeTask {
tokenizer: (*self).clone(),
ids,
skip_special_tokens,
})
}
#[napi(ts_return_type = "Promise<string[]>")]
pub fn decode_batch(
&self,
ids: Vec<Vec<u32>>,
skip_special_tokens: bool,
) -> AsyncTask<DecodeBatchTask> {
AsyncTask::new(DecodeBatchTask {
tokenizer: (*self).clone(),
ids,
skip_special_tokens,
})
}
#[napi(factory)]
pub fn from_string(s: String) -> Result<Self> {
let tokenizer: tk::tokenizer::TokenizerImpl<
Model,
Normalizer,
PreTokenizer,
Processor,
Decoder,
> = s.parse().map_err(|e| Error::from_reason(format!("{e}")))?;
Ok(Self {
tokenizer: Arc::new(RwLock::new(tokenizer)),
})
}
#[napi(factory)]
pub fn from_file(file: String) -> Result<Self> {
let tokenizer = tk::tokenizer::TokenizerImpl::from_file(file)
.map_err(|e| Error::from_reason(format!("Error loading from file{e}")))?;
Ok(Self {
tokenizer: Arc::new(RwLock::new(tokenizer)),
})
}
#[napi]
pub fn add_special_tokens(&mut self, tokens: Vec<String>) {
let tokens: Vec<_> = tokens
.into_iter()
.map(|s| tk::AddedToken::from(s, true))
.collect();
self.tokenizer.write().unwrap().add_special_tokens(&tokens);
}
#[napi]
pub fn set_truncation(
&mut self,
max_length: u32,
options: Option<TruncationOptions>,
) -> Result<()> {
let mut options: tk::TruncationParams = if let Some(options) = options {
options.try_into()?
} else {
Default::default()
};
options.max_length = max_length as usize;
self
.tokenizer
.write()
.unwrap()
.with_truncation(Some(options))
.unwrap();
Ok(())
}
#[napi]
pub fn disable_truncation(&mut self) {
self
.tokenizer
.write()
.unwrap()
.with_truncation(None)
.unwrap();
}
#[napi]
pub fn set_padding(&mut self, options: Option<PaddingOptions>) -> Result<()> {
let options = if let Some(options) = options {
Some(options.try_into()?)
} else {
None
};
self.tokenizer.write().unwrap().with_padding(options);
Ok(())
}
#[napi]
pub fn disable_padding(&mut self) {
self.tokenizer.write().unwrap().with_padding(None);
}
#[napi]
pub fn get_decoder(&self) -> Option<Decoder> {
self.tokenizer.read().unwrap().get_decoder().cloned()
}
#[napi]
pub fn get_normalizer(&self) -> Option<Normalizer> {
self.tokenizer.read().unwrap().get_normalizer().cloned()
}
#[napi]
pub fn get_pre_tokenizer(&self) -> Option<PreTokenizer> {
self.tokenizer.read().unwrap().get_pre_tokenizer().cloned()
}
#[napi]
pub fn get_post_processor(&self) -> Option<Processor> {
self.tokenizer.read().unwrap().get_post_processor().cloned()
}
#[napi]
pub fn get_vocab(&self, with_added_tokens: Option<bool>) -> HashMap<String, u32> {
let with_added_tokens = with_added_tokens.unwrap_or(true);
self.tokenizer.read().unwrap().get_vocab(with_added_tokens)
}
#[napi]
pub fn get_vocab_size(&self, with_added_tokens: Option<bool>) -> u32 {
self.get_vocab(with_added_tokens).len() as u32
}
#[napi]
pub fn id_to_token(&self, id: u32) -> Option<String> {
self.tokenizer.read().unwrap().id_to_token(id)
}
#[napi]
pub fn token_to_id(&self, token: String) -> Option<u32> {
self.tokenizer.read().unwrap().token_to_id(&token)
}
#[napi]
pub fn train(&mut self, files: Vec<String>) -> Result<()> {
let mut trainer: Trainer = self
.tokenizer
.read()
.unwrap()
.get_model()
.model
.as_ref()
.unwrap()
.read()
.unwrap()
.get_trainer()
.into();
self
.tokenizer
.write()
.unwrap()
.train_from_files(&mut trainer, files)
.map_err(|e| Error::from_reason(format!("{e}")))?;
Ok(())
}
#[napi]
pub fn running_tasks(&self) -> u32 {
std::sync::Arc::strong_count(&self.tokenizer) as u32
}
#[napi]
pub fn post_process(
&self,
encoding: &JsEncoding,
pair: Option<&JsEncoding>,
add_special_tokens: Option<bool>,
) -> Result<JsEncoding> {
let add_special_tokens = add_special_tokens.unwrap_or(true);
Ok(
self
.tokenizer
.read()
.unwrap()
.post_process(
(*encoding).clone().try_into()?,
if let Some(pair) = pair {
Some((*pair).clone().try_into()?)
} else {
None
},
add_special_tokens,
)
.map_err(|e| Error::from_reason(format!("{e}")))?
.into(),
)
}
}
#[napi(object)]
#[derive(Default)]
pub struct JsFromPretrainedParameters {
pub revision: Option<String>,
pub auth_token: Option<String>,
}
| tokenizers/bindings/node/src/tokenizer.rs/0 | {
"file_path": "tokenizers/bindings/node/src/tokenizer.rs",
"repo_id": "tokenizers",
"token_count": 5695
} | 305 |
import argparse
import logging
import time
from tqdm import tqdm
from tokenizers import Tokenizer, decoders, pre_tokenizers
from tokenizers.models import BPE, WordPiece
from tokenizers.normalizers import BertNormalizer
from tokenizers.processors import BertProcessing
from transformers import BertTokenizer, GPT2Tokenizer
logging.getLogger("transformers").disabled = True
logging.getLogger("transformers.tokenization_utils").disabled = True
parser = argparse.ArgumentParser()
parser.add_argument("--type", default="gpt2", type=str, help="The type of tokenizer (bert|gpt2)")
parser.add_argument("--file", default=None, type=str, help="The file to encode")
parser.add_argument("--vocab", default=None, type=str, required=True, help="The vocab file")
parser.add_argument("--merges", default=None, type=str, help="The merges.txt file")
parser.add_argument("--debug", action="store_true", help="Verbose output")
args = parser.parse_args()
if args.type == "gpt2" and args.merges is None:
raise Exception("Expected merges.txt file")
if args.file is not None:
with open(args.file, "r") as fp:
text = [line.strip() for line in fp]
else:
text = """
The Zen of Python, by Tim Peters
Beautiful is better than ugly.
Explicit is better than implicit.
Simple is better than complex.
Complex is better than complicated.
Flat is better than nested.
Sparse is better than dense.
Readability counts.
Special cases aren't special enough to break the rules.
Although practicality beats purity.
Errors should never pass silently.
Unless explicitly silenced.
In the face of ambiguity, refuse the temptation to guess.
There should be one-- and preferably only one --obvious way to do it.
Although that way may not be obvious at first unless you're Dutch.
Now is better than never.
Although never is often better than *right* now.
If the implementation is hard to explain, it's a bad idea.
If the implementation is easy to explain, it may be a good idea.
Namespaces are one honking great idea -- let's do more of those!
""".split("\n")
if args.type == "gpt2":
print("Running GPT-2 tokenizer")
tok_p = GPT2Tokenizer.from_pretrained("gpt2")
# Create a Tokenizer using BPE
tok_r = Tokenizer(BPE(args.vocab, args.merges))
# Use ByteLevel PreTokenizer
tok_r.pre_tokenizer = pre_tokenizers.ByteLevel(add_prefix_space=False)
# Use ByteLevel Decoder
tok_r.decoder = decoders.ByteLevel()
elif args.type == "bert":
print("Running Bert tokenizer")
tok_p = BertTokenizer.from_pretrained(args.vocab)
tok_r = Tokenizer(WordPiece(args.vocab, unk_token="[UNK]", max_input_chars_per_word=100))
tok_r.normalizer = BertNormalizer(
clean_text=True,
handle_chinese_chars=True,
strip_accents=True,
lowercase=True,
)
# tok_r.pre_tokenizer = pre_tokenizers.Whitespace()
tok_r.pre_tokenizer = pre_tokenizers.BertPreTokenizer()
tok_r.decoder = decoders.WordPiece()
tok_r.post_processor = BertProcessing(
("[SEP]", tok_r.token_to_id("[SEP]")),
("[CLS]", tok_r.token_to_id("[CLS]")),
)
else:
raise Exception(f"Unknown type {args.type}")
def tokenize_r():
return tok_r.encode_batch(text)
def tokenize_p():
return [tok_p.encode(sentence, add_special_tokens=True) for sentence in tqdm(text)]
print(f"Tokenizing {len(text)} lines")
# Rust version
start = time.time()
encoded_r = tokenize_r()
end = time.time()
time_r = end - start
print(f"Rust tokenizer took: {time_r} sec")
# Python version
start = time.time()
encoded_p = tokenize_p()
end = time.time()
time_p = end - start
print(f"Transformer tokenizer took: {time_p} sec")
print(f"SpeedUp Ratio: {time_p / time_r}")
ids_r = [sentence.ids for sentence in encoded_r]
diff_ids = 0
for i in range(0, len(encoded_r)):
if encoded_r[i].ids != encoded_p[i]:
diff_ids += 1
if args.debug:
print(encoded_r[i].ids)
print(encoded_p[i])
print(encoded_r[i].tokens)
print(tok_p.tokenize(text[i]))
print(text[i])
print("")
print(f"Ids differences: {diff_ids}")
decoded_r = tok_r.decode_batch([sentence.ids for sentence in encoded_r], False)
decoded_p = [tok_p.decode(en) for en in encoded_p]
diff_decoded = 0
for i in range(0, len(text)):
if decoded_r[i] != decoded_p[i]:
diff_decoded += 1
if args.debug:
print(f"Original: {text[i]}")
print(f"Rust: {decoded_r[i]}")
print(f"Python: {decoded_p[i]}")
print("")
print(f"Decoding differences: {diff_decoded}")
| tokenizers/bindings/python/examples/example.py/0 | {
"file_path": "tokenizers/bindings/python/examples/example.py",
"repo_id": "tokenizers",
"token_count": 1770
} | 306 |
# Generated content DO NOT EDIT
from .. import models
Model = models.Model
BPE = models.BPE
Unigram = models.Unigram
WordLevel = models.WordLevel
WordPiece = models.WordPiece
| tokenizers/bindings/python/py_src/tokenizers/models/__init__.py/0 | {
"file_path": "tokenizers/bindings/python/py_src/tokenizers/models/__init__.py",
"repo_id": "tokenizers",
"token_count": 56
} | 307 |
from argparse import ArgumentParser
from json import dump
from logging import basicConfig, getLogger
from os import linesep, remove
from os.path import exists
from tempfile import NamedTemporaryFile
from typing import Dict, List, Tuple
from requests import get
from sentencepiece import SentencePieceProcessor
from tqdm import trange, tqdm
basicConfig()
logger = getLogger()
class SentencePieceExtractor:
"""
Extractor implementation for SentencePiece trained models.
https://github.com/google/sentencepiece
"""
def __init__(self, model: str):
# Get SentencePiece
self.sp = SentencePieceProcessor()
self.sp.Load(model)
def extract(self) -> Tuple[Dict[str, int], List[Tuple]]:
sp = self.sp
vocab = {sp.id_to_piece(index): index for index in trange(sp.GetPieceSize())}
# Merges
merges = []
for piece_l in tqdm(vocab.keys(), total=sp.GetPieceSize()):
for piece_r in vocab.keys():
merge = f"{piece_l}{piece_r}"
piece_id = vocab.get(merge, None)
if piece_id:
merges += [(piece_l, piece_r, piece_id)]
merges = sorted(merges, key=lambda val: val[2])
merges = [(val[0], val[1]) for val in merges]
return vocab, merges
class YouTokenToMeExtractor:
"""
Extractor implementation for YouTokenToMe trained models format.
Model are as follow:
vocab_size nb_merges
piece piece_id
...(repeated vocab_size)
piece_id_left piece_id_right piece_id
...(repeated nb merges)
"""
def __init__(self, model: str):
self._model = model
def extract(self) -> Tuple[Dict[str, int], List[Tuple]]:
with open(self._model, "r") as model_f:
# Retrieve information
nb_pieces, nb_merges = map(int, model_f.readline().split())
vocab, merges = {}, []
# Vocab
for _ in trange(nb_pieces):
piece, piece_id = map(int, model_f.readline().split())
vocab[piece_id] = chr(piece)
# Merges
for _ in trange(nb_merges):
piece_id_l, piece_id_r, piece = map(int, model_f.readline().split())
piece_l, piece_r = vocab[piece_id_l], vocab[piece_id_r]
vocab[piece] = f"{piece_l}{piece_r}"
merges += [(piece_l, piece_r)]
# Special tokens
unk, pad, bos, eos = map(int, model_f.readline().split())
vocab[unk] = "<unk>"
vocab[pad] = "<pad>"
vocab[bos] = "<bos>"
vocab[eos] = "<eos>"
# Invert key and value for vocab
vocab = dict(zip(vocab.values(), vocab.keys()))
return vocab, merges
if __name__ == "__main__":
parser = ArgumentParser("SentencePiece vocab extractor")
parser.add_argument(
"--provider",
type=str,
required=True,
choices=["sentencepiece", "youtokentome"],
help="Indicate the format of the file.",
)
parser.add_argument("--model", type=str, required=True, help="SentencePiece model to extract vocab from.")
parser.add_argument(
"--vocab-output-path",
type=str,
required=True,
help="Path where the vocab.json file will be extracted",
)
parser.add_argument(
"--merges-output-path",
type=str,
required=True,
help="Path where the merges file will be extracted",
)
# Parse cli arguments
args = parser.parse_args()
try:
if args.model.startswith("http"):
# Saving model
with NamedTemporaryFile("wb", delete=False) as f:
logger.info("Writing content from {} to {}".format(args.model, f.name))
response = get(args.model, allow_redirects=True)
f.write(response.content)
args.remote_model = args.model
args.model = f.name
# Allocate extractor
extractor = SentencePieceExtractor if args.provider == "sentencepiece" else YouTokenToMeExtractor
extractor = extractor(args.model)
logger.info(f"Using {type(extractor).__name__}")
# Open output files and let's extract model information
with open(args.vocab_output_path, "w") as vocab_f:
with open(args.merges_output_path, "w") as merges_f:
# Do the extraction
vocab, merges = extractor.extract()
# Save content
dump(vocab, vocab_f)
merges_f.writelines(map(lambda x: f"{x[0]} {x[1]}{linesep}", merges))
finally:
# If model was downloaded from internet we need to cleanup the tmp folder.
if hasattr(args, "remote_model") and exists(args.model):
remove(args.model)
| tokenizers/bindings/python/scripts/sentencepiece_extractor.py/0 | {
"file_path": "tokenizers/bindings/python/scripts/sentencepiece_extractor.py",
"repo_id": "tokenizers",
"token_count": 2231
} | 308 |
use super::regex::PyRegex;
use super::{DestroyPtr, RefMutContainer, RefMutGuard};
use crate::error::ToPyResult;
use pyo3::exceptions;
use pyo3::prelude::*;
use pyo3::types::*;
use tk::normalizer::{char_to_bytes, NormalizedString, Range, SplitDelimiterBehavior};
use tk::pattern::Pattern;
/// Represents a Pattern as used by `NormalizedString`
#[derive(FromPyObject)]
pub enum PyPattern {
#[pyo3(annotation = "str")]
Str(String),
#[pyo3(annotation = "tokenizers.Regex")]
Regex(Py<PyRegex>),
// TODO: Add the compatibility for Fn(char) -> bool
}
impl Pattern for PyPattern {
fn find_matches(&self, inside: &str) -> tk::Result<Vec<(tk::Offsets, bool)>> {
match self {
PyPattern::Str(s) => {
let mut chars = s.chars();
if let (Some(c), None) = (chars.next(), chars.next()) {
c.find_matches(inside)
} else {
s.find_matches(inside)
}
}
PyPattern::Regex(r) => {
Python::with_gil(|py| (&r.borrow(py).inner).find_matches(inside))
}
}
}
}
impl From<PyPattern> for tk::normalizers::replace::ReplacePattern {
fn from(pattern: PyPattern) -> Self {
match pattern {
PyPattern::Str(s) => Self::String(s.to_owned()),
PyPattern::Regex(r) => Python::with_gil(|py| Self::Regex(r.borrow(py).pattern.clone())),
}
}
}
impl From<PyPattern> for tk::pre_tokenizers::split::SplitPattern {
fn from(pattern: PyPattern) -> Self {
match pattern {
PyPattern::Str(s) => Self::String(s.to_owned()),
PyPattern::Regex(r) => Python::with_gil(|py| Self::Regex(r.borrow(py).pattern.clone())),
}
}
}
#[derive(Debug, Clone, FromPyObject)]
pub enum PyRange<'s> {
#[pyo3(annotation = "int")]
Single(isize),
#[pyo3(annotation = "Tuple[uint, uint]")]
Range(usize, usize),
#[pyo3(annotation = "slice")]
Slice(Bound<'s, PySlice>),
}
impl PyRange<'_> {
pub fn to_range(&self, max_len: usize) -> PyResult<std::ops::Range<usize>> {
match self {
PyRange::Single(i) => {
if i.is_negative() {
let i = -i as usize;
if i > max_len {
Err(exceptions::PyValueError::new_err(format!(
"{i} is bigger than max len"
)))
} else {
Ok(max_len - i..max_len - i + 1)
}
} else {
let i = *i as usize;
Ok(i..i + 1)
}
}
PyRange::Range(s, e) => Ok(*s..*e),
PyRange::Slice(s) => {
let r = s.indices(max_len.try_into()?)?;
Ok(r.start as usize..r.stop as usize)
}
}
}
}
#[derive(Clone)]
pub struct PySplitDelimiterBehavior(pub SplitDelimiterBehavior);
impl FromPyObject<'_> for PySplitDelimiterBehavior {
fn extract_bound(obj: &Bound<'_, PyAny>) -> PyResult<Self> {
let s = obj.extract::<String>()?;
Ok(Self(match s.as_ref() {
"removed" => Ok(SplitDelimiterBehavior::Removed),
"isolated" => Ok(SplitDelimiterBehavior::Isolated),
"merged_with_previous" => Ok(SplitDelimiterBehavior::MergedWithPrevious),
"merged_with_next" => Ok(SplitDelimiterBehavior::MergedWithNext),
"contiguous" => Ok(SplitDelimiterBehavior::Contiguous),
_ => Err(exceptions::PyValueError::new_err(
"Wrong value for SplitDelimiterBehavior, expected one of: \
`removed, isolated, merged_with_previous, merged_with_next, contiguous`",
)),
}?))
}
}
impl From<PySplitDelimiterBehavior> for SplitDelimiterBehavior {
fn from(v: PySplitDelimiterBehavior) -> Self {
v.0
}
}
impl From<SplitDelimiterBehavior> for PySplitDelimiterBehavior {
fn from(v: SplitDelimiterBehavior) -> Self {
Self(v)
}
}
fn filter(normalized: &mut NormalizedString, func: &Bound<'_, PyAny>) -> PyResult<()> {
let err = "`filter` expect a callable with the signature: `fn(char) -> bool`";
if !func.is_callable() {
Err(exceptions::PyTypeError::new_err(err))
} else {
normalized.filter(|c| {
func.call1((c.to_string(),))
.expect(err)
.extract()
.expect(err)
});
Ok(())
}
}
fn for_each(normalized: &NormalizedString, func: &Bound<'_, PyAny>) -> PyResult<()> {
let err = "`for_each` expect a callable with the signature: `fn(char)`";
if !func.is_callable() {
Err(exceptions::PyTypeError::new_err(err))
} else {
normalized.for_each(|c| {
func.call1((c.to_string(),)).expect(err);
});
Ok(())
}
}
fn map(normalized: &mut NormalizedString, func: &Bound<'_, PyAny>) -> PyResult<()> {
let err = "`map` expect a callable with the signature: `fn(char) -> char`";
if !func.is_callable() {
Err(exceptions::PyTypeError::new_err(err))
} else {
normalized.map(|c| {
let c: String = func
.call1((c.to_string(),))
.expect(err)
.extract()
.expect(err);
c.chars().next().expect(err)
});
Ok(())
}
}
fn slice(
normalized: &NormalizedString,
range: &PyRange<'_>,
) -> PyResult<Option<PyNormalizedString>> {
let n_char = normalized.len();
let char_range = range.to_range(n_char)?;
Ok(
char_to_bytes(normalized.get(), char_range).and_then(|bytes_range| {
normalized
.slice(Range::Normalized(bytes_range))
.map(|n| n.into())
}),
)
}
/// NormalizedString
///
/// A NormalizedString takes care of modifying an "original" string, to obtain a "normalized" one.
/// While making all the requested modifications, it keeps track of the alignment information
/// between the two versions of the string.
///
/// Args:
/// sequence: str:
/// The string sequence used to initialize this NormalizedString
#[pyclass(module = "tokenizers", name = "NormalizedString")]
#[derive(Clone)]
pub struct PyNormalizedString {
pub(crate) normalized: NormalizedString,
}
#[pymethods]
impl PyNormalizedString {
#[new]
#[pyo3(text_signature = None)]
fn new(s: &str) -> Self {
NormalizedString::from(s).into()
}
/// The normalized part of the string
#[getter]
fn get_normalized(&self) -> &str {
self.normalized.get()
}
#[getter]
fn get_original(&self) -> &str {
self.normalized.get_original()
}
/// Runs the NFD normalization
#[pyo3(text_signature = "(self)")]
fn nfd(&mut self) {
self.normalized.nfd();
}
/// Runs the NFKD normalization
#[pyo3(text_signature = "(self)")]
fn nfkd(&mut self) {
self.normalized.nfkd();
}
/// Runs the NFC normalization
#[pyo3(text_signature = "(self)")]
fn nfc(&mut self) {
self.normalized.nfc();
}
/// Runs the NFKC normalization
#[pyo3(text_signature = "(self)")]
fn nfkc(&mut self) {
self.normalized.nfkc();
}
/// Lowercase the string
#[pyo3(text_signature = "(self)")]
fn lowercase(&mut self) {
self.normalized.lowercase();
}
/// Uppercase the string
#[pyo3(text_signature = "(self)")]
fn uppercase(&mut self) {
self.normalized.uppercase();
}
/// Prepend the given sequence to the string
#[pyo3(text_signature = "(self, s)")]
fn prepend(&mut self, s: &str) {
self.normalized.prepend(s);
}
/// Append the given sequence to the string
#[pyo3(text_signature = "(self, s)")]
fn append(&mut self, s: &str) {
self.normalized.append(s);
}
/// Strip the left of the string
#[pyo3(text_signature = "(self)")]
fn lstrip(&mut self) {
self.normalized.lstrip();
}
/// Strip the right of the string
#[pyo3(text_signature = "(self)")]
fn rstrip(&mut self) {
self.normalized.rstrip();
}
/// Strip both ends of the string
#[pyo3(text_signature = "(self)")]
fn strip(&mut self) {
self.normalized.strip();
}
/// Clears the string
#[pyo3(text_signature = "(self)")]
fn clear(&mut self) {
self.normalized.clear();
}
/// Slice the string using the given range
#[pyo3(text_signature = "(self, range)")]
fn slice(&self, range: PyRange) -> PyResult<Option<PyNormalizedString>> {
slice(&self.normalized, &range)
}
/// Filter each character of the string using the given func
#[pyo3(text_signature = "(self, func)")]
fn filter(&mut self, func: &Bound<'_, PyAny>) -> PyResult<()> {
filter(&mut self.normalized, func)
}
/// Calls the given function for each character of the string
#[pyo3(text_signature = "(self, func)")]
fn for_each(&self, func: &Bound<'_, PyAny>) -> PyResult<()> {
for_each(&self.normalized, func)
}
/// Calls the given function for each character of the string
///
/// Replaces each character of the string using the returned value. Each
/// returned value **must** be a str of length 1 (ie a character).
#[pyo3(text_signature = "(self, func)")]
fn map(&mut self, func: &Bound<'_, PyAny>) -> PyResult<()> {
map(&mut self.normalized, func)
}
/// Split the NormalizedString using the given pattern and the specified behavior
///
/// Args:
/// pattern: Pattern:
/// A pattern used to split the string. Usually a string or a regex built with `tokenizers.Regex`
///
/// behavior: SplitDelimiterBehavior:
/// The behavior to use when splitting.
/// Choices: "removed", "isolated", "merged_with_previous", "merged_with_next",
/// "contiguous"
///
/// Returns:
/// A list of NormalizedString, representing each split
#[pyo3(text_signature = "(self, pattern, behavior)")]
fn split(
&mut self,
pattern: PyPattern,
behavior: PySplitDelimiterBehavior,
) -> PyResult<Vec<PyNormalizedString>> {
Ok(ToPyResult(self.normalized.split(pattern, behavior.into()))
.into_py()?
.into_iter()
.map(|n| n.into())
.collect())
}
/// Replace the content of the given pattern with the provided content
///
/// Args:
/// pattern: Pattern:
/// A pattern used to match the string. Usually a string or a Regex
///
/// content: str:
/// The content to be used as replacement
#[pyo3(text_signature = "(self, pattern, content)")]
fn replace(&mut self, pattern: PyPattern, content: &str) -> PyResult<()> {
ToPyResult(self.normalized.replace(pattern, content)).into()
}
fn __repr__(&self) -> String {
format!(
r#"NormalizedString(original="{}", normalized="{}")"#,
self.normalized.get_original(),
self.normalized.get()
)
}
fn __str__(&self) -> &str {
self.normalized.get()
}
fn __getitem__(&self, range: PyRange<'_>) -> PyResult<Option<PyNormalizedString>> {
slice(&self.normalized, &range)
}
}
impl From<NormalizedString> for PyNormalizedString {
fn from(normalized: NormalizedString) -> Self {
Self { normalized }
}
}
impl From<PyNormalizedString> for NormalizedString {
fn from(normalized: PyNormalizedString) -> Self {
normalized.normalized
}
}
#[pyclass(module = "tokenizers", name = "NormalizedStringRefMut")]
#[derive(Clone)]
pub struct PyNormalizedStringRefMut {
inner: RefMutContainer<NormalizedString>,
}
impl DestroyPtr for PyNormalizedStringRefMut {
fn destroy(&mut self) {
self.inner.destroy();
}
}
impl PyNormalizedStringRefMut {
pub fn new(normalized: &mut NormalizedString) -> RefMutGuard<'_, Self> {
RefMutGuard::new(Self {
inner: RefMutContainer::new(normalized),
})
}
pub fn destroyed_error() -> PyErr {
exceptions::PyException::new_err("Cannot use a NormalizedStringRefMut outside `normalize`")
}
/// Provides a way to access a reference to the underlying NormalizedString
pub fn map_as_ref<F: FnOnce(&NormalizedString) -> U, U>(&self, f: F) -> PyResult<U> {
self.inner
.map(f)
.ok_or_else(PyNormalizedStringRefMut::destroyed_error)
}
/// Provides a way to access a mutable reference to the underlying NormalizedString
pub fn map_as_mut<F: FnOnce(&mut NormalizedString) -> U, U>(&mut self, f: F) -> PyResult<U> {
self.inner
.map_mut(f)
.ok_or_else(PyNormalizedStringRefMut::destroyed_error)
}
}
#[pymethods]
impl PyNormalizedStringRefMut {
#[getter]
fn get_normalized(&self) -> PyResult<String> {
self.inner
.map(|n| n.get().to_owned())
.ok_or_else(PyNormalizedStringRefMut::destroyed_error)
}
#[getter]
fn get_original(&self) -> PyResult<String> {
self.inner
.map(|n| n.get_original().to_owned())
.ok_or_else(PyNormalizedStringRefMut::destroyed_error)
}
fn nfd(&mut self) -> PyResult<()> {
self.inner
.map_mut(|n| {
n.nfd();
})
.ok_or_else(PyNormalizedStringRefMut::destroyed_error)?;
Ok(())
}
fn nfkd(&mut self) -> PyResult<()> {
self.inner
.map_mut(|n| {
n.nfkd();
})
.ok_or_else(PyNormalizedStringRefMut::destroyed_error)?;
Ok(())
}
fn nfc(&mut self) -> PyResult<()> {
self.inner
.map_mut(|n| {
n.nfc();
})
.ok_or_else(PyNormalizedStringRefMut::destroyed_error)?;
Ok(())
}
fn nfkc(&mut self) -> PyResult<()> {
self.inner
.map_mut(|n| {
n.nfkc();
})
.ok_or_else(PyNormalizedStringRefMut::destroyed_error)?;
Ok(())
}
fn lowercase(&mut self) -> PyResult<()> {
self.inner
.map_mut(|n| {
n.lowercase();
})
.ok_or_else(PyNormalizedStringRefMut::destroyed_error)?;
Ok(())
}
fn uppercase(&mut self) -> PyResult<()> {
self.inner
.map_mut(|n| {
n.uppercase();
})
.ok_or_else(PyNormalizedStringRefMut::destroyed_error)?;
Ok(())
}
fn prepend(&mut self, s: &str) -> PyResult<()> {
self.inner
.map_mut(|n| {
n.prepend(s);
})
.ok_or_else(PyNormalizedStringRefMut::destroyed_error)?;
Ok(())
}
fn append(&mut self, s: &str) -> PyResult<()> {
self.inner
.map_mut(|n| {
n.append(s);
})
.ok_or_else(PyNormalizedStringRefMut::destroyed_error)?;
Ok(())
}
fn lstrip(&mut self) -> PyResult<()> {
self.inner
.map_mut(|n| {
n.lstrip();
})
.ok_or_else(PyNormalizedStringRefMut::destroyed_error)?;
Ok(())
}
fn rstrip(&mut self) -> PyResult<()> {
self.inner
.map_mut(|n| {
n.rstrip();
})
.ok_or_else(PyNormalizedStringRefMut::destroyed_error)?;
Ok(())
}
fn strip(&mut self) -> PyResult<()> {
self.inner
.map_mut(|n| {
n.strip();
})
.ok_or_else(PyNormalizedStringRefMut::destroyed_error)?;
Ok(())
}
fn clear(&mut self) -> PyResult<()> {
self.inner
.map_mut(|n| {
n.clear();
})
.ok_or_else(PyNormalizedStringRefMut::destroyed_error)?;
Ok(())
}
fn slice(&self, range: PyRange) -> PyResult<Option<PyNormalizedString>> {
self.inner
.map(|n| slice(n, &range))
.ok_or_else(PyNormalizedStringRefMut::destroyed_error)?
}
fn filter(&mut self, func: &Bound<'_, PyAny>) -> PyResult<()> {
self.inner
.map_mut(|n| filter(n, func))
.ok_or_else(PyNormalizedStringRefMut::destroyed_error)??;
Ok(())
}
fn for_each(&self, func: &Bound<'_, PyAny>) -> PyResult<()> {
self.inner
.map(|n| for_each(n, func))
.ok_or_else(PyNormalizedStringRefMut::destroyed_error)??;
Ok(())
}
fn map(&mut self, func: &Bound<'_, PyAny>) -> PyResult<()> {
self.inner
.map_mut(|n| map(n, func))
.ok_or_else(PyNormalizedStringRefMut::destroyed_error)??;
Ok(())
}
fn split(
&mut self,
pattern: PyPattern,
behavior: PySplitDelimiterBehavior,
) -> PyResult<Vec<PyNormalizedString>> {
Ok(ToPyResult(
self.inner
.map_mut(|n| n.split(pattern, behavior.into()))
.ok_or_else(PyNormalizedStringRefMut::destroyed_error)?,
)
.into_py()?
.into_iter()
.map(|n| n.into())
.collect())
}
fn replace(&mut self, pattern: PyPattern, content: &str) -> PyResult<()> {
ToPyResult(
self.inner
.map_mut(|n| n.replace(pattern, content))
.ok_or_else(PyNormalizedStringRefMut::destroyed_error)?,
)
.into()
}
}
| tokenizers/bindings/python/src/utils/normalization.rs/0 | {
"file_path": "tokenizers/bindings/python/src/utils/normalization.rs",
"repo_id": "tokenizers",
"token_count": 8532
} | 309 |
# Decoders
<tokenizerslangcontent>
<python>
## BPEDecoder
[[autodoc]] tokenizers.decoders.BPEDecoder
## ByteLevel
[[autodoc]] tokenizers.decoders.ByteLevel
## CTC
[[autodoc]] tokenizers.decoders.CTC
## Metaspace
[[autodoc]] tokenizers.decoders.Metaspace
## WordPiece
[[autodoc]] tokenizers.decoders.WordPiece
</python>
<rust>
The Rust API Reference is available directly on the [Docs.rs](https://docs.rs/tokenizers/latest/tokenizers/) website.
</rust>
<node>
The node API has not been documented yet.
</node>
</tokenizerslangcontent> | tokenizers/docs/source-doc-builder/api/decoders.mdx/0 | {
"file_path": "tokenizers/docs/source-doc-builder/api/decoders.mdx",
"repo_id": "tokenizers",
"token_count": 197
} | 310 |
# Training from memory
In the [Quicktour](quicktour), we saw how to build and train a
tokenizer using text files, but we can actually use any Python Iterator.
In this section we'll see a few different ways of training our
tokenizer.
For all the examples listed below, we'll use the same [`~tokenizers.Tokenizer`] and
[`~tokenizers.trainers.Trainer`], built as
following:
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_tutorial_train_from_iterators.py",
"language": "python",
"start-after": "START init_tokenizer_trainer",
"end-before": "END init_tokenizer_trainer",
"dedent": 8}
</literalinclude>
This tokenizer is based on the [`~tokenizers.models.Unigram`] model. It
takes care of normalizing the input using the NFKC Unicode normalization
method, and uses a [`~tokenizers.pre_tokenizers.ByteLevel`] pre-tokenizer with the corresponding decoder.
For more information on the components used here, you can check
[here](components).
## The most basic way
As you probably guessed already, the easiest way to train our tokenizer
is by using a `List`{.interpreted-text role="obj"}:
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_tutorial_train_from_iterators.py",
"language": "python",
"start-after": "START train_basic",
"end-before": "END train_basic",
"dedent": 8}
</literalinclude>
Easy, right? You can use anything working as an iterator here, be it a
`List`{.interpreted-text role="obj"}, `Tuple`{.interpreted-text
role="obj"}, or a `np.Array`{.interpreted-text role="obj"}. Anything
works as long as it provides strings.
## Using the 🤗 Datasets library
An awesome way to access one of the many datasets that exist out there
is by using the 🤗 Datasets library. For more information about it, you
should check [the official documentation
here](https://huggingface.co/docs/datasets/).
Let's start by loading our dataset:
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_tutorial_train_from_iterators.py",
"language": "python",
"start-after": "START load_dataset",
"end-before": "END load_dataset",
"dedent": 8}
</literalinclude>
The next step is to build an iterator over this dataset. The easiest way
to do this is probably by using a generator:
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_tutorial_train_from_iterators.py",
"language": "python",
"start-after": "START def_batch_iterator",
"end-before": "END def_batch_iterator",
"dedent": 8}
</literalinclude>
As you can see here, for improved efficiency we can actually provide a
batch of examples used to train, instead of iterating over them one by
one. By doing so, we can expect performances very similar to those we
got while training directly from files.
With our iterator ready, we just need to launch the training. In order
to improve the look of our progress bars, we can specify the total
length of the dataset:
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_tutorial_train_from_iterators.py",
"language": "python",
"start-after": "START train_datasets",
"end-before": "END train_datasets",
"dedent": 8}
</literalinclude>
And that's it!
## Using gzip files
Since gzip files in Python can be used as iterators, it is extremely
simple to train on such files:
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_tutorial_train_from_iterators.py",
"language": "python",
"start-after": "START single_gzip",
"end-before": "END single_gzip",
"dedent": 8}
</literalinclude>
Now if we wanted to train from multiple gzip files, it wouldn't be much
harder:
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_tutorial_train_from_iterators.py",
"language": "python",
"start-after": "START multi_gzip",
"end-before": "END multi_gzip",
"dedent": 8}
</literalinclude>
And voilà!
| tokenizers/docs/source-doc-builder/training_from_memory.mdx/0 | {
"file_path": "tokenizers/docs/source-doc-builder/training_from_memory.mdx",
"repo_id": "tokenizers",
"token_count": 1199
} | 311 |
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath("./_ext"))
sys.path.insert(0, os.path.abspath("."))
# -- Project information -----------------------------------------------------
project = "tokenizers"
copyright = "2020, huggingface"
author = "huggingface"
# The full version, including alpha/beta/rc tags
release = ""
# -- Custom information ------------------------------------------------------
# The possible values for languages (used by `_ext/entities`)
languages = ["node", "rust", "python"]
# This defines the version used to generate links to docs.rs
rust_version = "latest"
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ["sphinx.ext.autodoc", "sphinx.ext.napoleon", "entities", "rust_doc", "toctree_tags"]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {"analytics_id": "UA-83738774-2"}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
def setup(app):
for language in languages:
if not tags.has(language):
exclude_patterns.append(f"tutorials/{language}/*")
app.add_css_file("css/huggingface.css")
app.add_css_file("css/code-snippets.css")
app.add_js_file("js/custom.js")
| tokenizers/docs/source/conf.py/0 | {
"file_path": "tokenizers/docs/source/conf.py",
"repo_id": "tokenizers",
"token_count": 781
} | 312 |
#[macro_use]
extern crate criterion;
mod common;
use std::fs::File;
use std::io::{BufRead, BufReader};
use std::path::Path;
use criterion::{Criterion, Throughput};
use tokenizers::models::wordpiece::{WordPiece, WordPieceTrainerBuilder};
use tokenizers::normalizers::{BertNormalizer, NormalizerWrapper};
use tokenizers::pre_tokenizers::bert::BertPreTokenizer;
use tokenizers::processors::bert::BertProcessing;
use tokenizers::{decoders, EncodeInput, Model, TokenizerImpl};
use common::{iter_bench_encode, iter_bench_encode_batch, iter_bench_train};
use tokenizers::decoders::DecoderWrapper;
use tokenizers::pre_tokenizers::whitespace::Whitespace;
use tokenizers::processors::PostProcessorWrapper;
static BATCH_SIZE: usize = 1_000;
type BertTokenizer = TokenizerImpl<
WordPiece,
BertNormalizer,
BertPreTokenizer,
BertProcessing,
decoders::wordpiece::WordPiece,
>;
/// Resembling the BertTokenizer implementation from the Python bindings.
fn create_bert_tokenizer(wp: WordPiece) -> BertTokenizer {
let sep_id = *wp.get_vocab().get("[SEP]").unwrap();
let cls_id = *wp.get_vocab().get("[CLS]").unwrap();
let mut tokenizer = TokenizerImpl::new(wp);
tokenizer.with_pre_tokenizer(Some(BertPreTokenizer));
tokenizer.with_normalizer(Some(BertNormalizer::default()));
tokenizer.with_decoder(Some(decoders::wordpiece::WordPiece::default()));
tokenizer.with_post_processor(Some(BertProcessing::new(
("[SEP]".to_string(), sep_id),
("[CLS]".to_string(), cls_id),
)));
tokenizer
}
pub fn bench_bert(c: &mut Criterion) {
let wp = WordPiece::from_file("data/bert-base-uncased-vocab.txt")
.build()
.unwrap();
let tokenizer = create_bert_tokenizer(wp);
let mut group = c.benchmark_group("bert-encode");
let data = std::fs::read_to_string("data/big.txt").unwrap();
group.throughput(Throughput::Bytes(data.len() as u64));
let mut lines: Vec<EncodeInput> = vec![];
let mut batches: Vec<Vec<EncodeInput>> = vec![vec![]];
for line in BufReader::new(File::open(Path::new("data/big.txt")).unwrap()).lines() {
let line: EncodeInput = line.unwrap().into();
lines.push(line.clone());
if batches.last().unwrap().len() >= BATCH_SIZE {
batches.push(vec![]);
}
batches.last_mut().unwrap().push(line);
}
group.bench_function("WordPiece BERT encode", |b| {
b.iter_custom(|iters| iter_bench_encode(iters, &tokenizer, &lines))
});
group.bench_function("WordPiece BERT encode batch", |b| {
b.iter_custom(|iters| iter_bench_encode_batch(iters, &tokenizer, &batches))
});
}
fn bench_train_small(c: &mut Criterion) {
let mut trainer = WordPieceTrainerBuilder::default()
.show_progress(false)
.build();
type Tok = TokenizerImpl<
WordPiece,
NormalizerWrapper,
Whitespace,
PostProcessorWrapper,
DecoderWrapper,
>;
let mut tokenizer = Tok::new(WordPiece::default());
tokenizer.with_pre_tokenizer(Some(Whitespace {}));
let mut group = c.benchmark_group("bert-train-small");
let data = std::fs::read_to_string("data/small.txt").unwrap();
group.throughput(Throughput::Bytes(data.len() as u64));
group.bench_function("WordPiece Train vocabulary (small)", |b| {
b.iter_custom(|iters| {
iter_bench_train(
iters,
&mut tokenizer,
&mut trainer,
vec!["data/small.txt".to_string()],
)
})
});
}
fn bench_train_big(c: &mut Criterion) {
let mut trainer = WordPieceTrainerBuilder::default()
.show_progress(false)
.build();
type Tok = TokenizerImpl<
WordPiece,
NormalizerWrapper,
Whitespace,
PostProcessorWrapper,
DecoderWrapper,
>;
let mut tokenizer = Tok::new(WordPiece::default());
tokenizer.with_pre_tokenizer(Some(Whitespace {}));
let mut group = c.benchmark_group("bert-train-big");
let data = std::fs::read_to_string("data/big.txt").unwrap();
group.throughput(Throughput::Bytes(data.len() as u64));
group.bench_function("WordPiece Train vocabulary (big)", |b| {
b.iter_custom(|iters| {
iter_bench_train(
iters,
&mut tokenizer,
&mut trainer,
vec!["data/big.txt".to_string()],
)
})
});
}
criterion_group! {
name = bert_benches;
config = Criterion::default().sample_size(20);
targets = bench_bert
}
criterion_group! {
name = benches_train_small;
config = Criterion::default().sample_size(10);
targets = bench_train_small
}
criterion_group! {
name = benches_train_big;
config = Criterion::default().sample_size(10);
targets = bench_train_big
}
criterion_main!(bert_benches, benches_train_small, benches_train_big);
| tokenizers/tokenizers/benches/bert_benchmark.rs/0 | {
"file_path": "tokenizers/tokenizers/benches/bert_benchmark.rs",
"repo_id": "tokenizers",
"token_count": 2072
} | 313 |
language: node_js
node_js: "10"
script:
- ./node_modules/.bin/webpack
| tokenizers/tokenizers/examples/unstable_wasm/www/.travis.yml/0 | {
"file_path": "tokenizers/tokenizers/examples/unstable_wasm/www/.travis.yml",
"repo_id": "tokenizers",
"token_count": 30
} | 314 |
use crate::decoders::DecoderWrapper;
use crate::tokenizer::{Decoder, Result};
use crate::utils::macro_rules_attribute;
use serde::{Deserialize, Serialize};
#[derive(Clone, Debug)]
#[macro_rules_attribute(impl_serde_type!)]
pub struct Sequence {
decoders: Vec<DecoderWrapper>,
}
impl Sequence {
pub fn new(decoders: Vec<DecoderWrapper>) -> Self {
Self { decoders }
}
pub fn get_decoders(&self) -> &[DecoderWrapper] {
&self.decoders
}
pub fn get_decoders_mut(&mut self) -> &mut [DecoderWrapper] {
&mut self.decoders
}
}
impl Decoder for Sequence {
fn decode_chain(&self, mut tokens: Vec<String>) -> Result<Vec<String>> {
for decoder in &self.decoders {
tokens = decoder.decode_chain(tokens)?;
}
Ok(tokens)
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::decoders::ctc::CTC;
use crate::pre_tokenizers::metaspace::Metaspace;
#[test]
fn sequence_basic() {
let decoders = vec![
DecoderWrapper::CTC(CTC::default()),
DecoderWrapper::Metaspace(Metaspace::default()),
];
let decoder = Sequence::new(decoders);
let tokens: Vec<String> = vec!["▁", "▁", "H", "H", "i", "i", "▁", "y", "o", "u"]
.into_iter()
.map(|s| s.to_string())
.collect();
let out_tokens = decoder.decode(tokens).unwrap();
assert_eq!(out_tokens, "Hi you");
}
}
| tokenizers/tokenizers/src/decoders/sequence.rs/0 | {
"file_path": "tokenizers/tokenizers/src/decoders/sequence.rs",
"repo_id": "tokenizers",
"token_count": 689
} | 315 |
use super::OrderedVocabIter;
use crate::tokenizer::{Model, Result, Token};
use ahash::AHashMap;
use serde_json::Value;
use std::collections::HashMap;
use std::fs::File;
use std::io::{BufReader, Read, Write};
use std::path::{Path, PathBuf};
mod serialization;
mod trainer;
// Re-export
pub use trainer::*;
type Vocab = AHashMap<String, u32>;
#[derive(thiserror::Error, Debug)]
pub enum Error {
#[error("WordLevel error: Missing [UNK] token from the vocabulary")]
MissingUnkToken,
#[error("Bad vocabulary json file")]
BadVocabulary,
}
struct Config {
files: Option<String>,
vocab: AHashMap<String, u32>,
unk_token: String,
}
/// A `WordLevelBuilder` can be used to create a `WordLevel`
/// model with a custom configuration.
pub struct WordLevelBuilder {
config: Config,
}
impl Default for WordLevelBuilder {
fn default() -> Self {
Self {
config: Config {
files: None,
vocab: AHashMap::new(),
unk_token: String::from("<unk>"),
},
}
}
}
impl WordLevelBuilder {
/// Construct a new `WordLevelBuilder`.
pub fn new() -> Self {
Self::default()
}
/// Set the input files.
#[must_use]
pub fn files(mut self, vocab: String) -> Self {
self.config.files = Some(vocab);
self
}
/// Set the vocab (token -> ID) mapping.
#[must_use]
pub fn vocab(mut self, vocab: AHashMap<String, u32>) -> Self {
self.config.vocab = vocab;
self
}
/// The the `UNK` token for the vocab.
#[must_use]
pub fn unk_token(mut self, unk_token: String) -> Self {
self.config.unk_token = unk_token;
self
}
/// Constructs a `WordLevel` model that uses the `WordLevelBuilder`'s configuration.
pub fn build(mut self) -> Result<WordLevel> {
if let Some(vocab) = self.config.files {
self.config.vocab = WordLevel::read_file(&vocab)?;
}
let vocab_r = self
.config
.vocab
.iter()
.map(|(key, val)| (*val, key.to_owned()))
.collect();
Ok(WordLevel {
vocab: self.config.vocab,
vocab_r,
unk_token: self.config.unk_token,
})
}
}
#[derive(PartialEq, Clone, Eq)]
pub struct WordLevel {
vocab: AHashMap<String, u32>,
vocab_r: AHashMap<u32, String>,
pub unk_token: String,
}
impl std::fmt::Debug for WordLevel {
fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result {
fmt.debug_struct("WordLevel")
.field("unk_token", &self.unk_token)
.field("vocab", &self.vocab.len())
.finish()
}
}
impl WordLevel {
pub fn builder() -> WordLevelBuilder {
WordLevelBuilder::new()
}
pub fn read_file(vocab_path: &str) -> Result<Vocab> {
let vocab_file = File::open(vocab_path)?;
let mut vocab_file = BufReader::new(vocab_file);
let mut buffer = String::new();
let mut vocab = AHashMap::new();
vocab_file.read_to_string(&mut buffer)?;
let json: Value = serde_json::from_str(&buffer)?;
match json {
Value::Object(m) => {
for (token, id) in m {
if let Value::Number(id) = id {
let id = id.as_u64().ok_or(Error::BadVocabulary)? as u32;
vocab.insert(token, id);
}
}
}
_ => return Err(Box::new(Error::BadVocabulary)),
};
Ok(vocab)
}
/// Initialize a WordLevel model from vocab and merges file.
pub fn from_file(vocab_path: &str, unk_token: String) -> Result<WordLevel> {
let vocab = WordLevel::read_file(vocab_path)?;
Self::builder().vocab(vocab).unk_token(unk_token).build()
}
}
impl Default for WordLevel {
fn default() -> Self {
Self {
vocab: AHashMap::new(),
vocab_r: AHashMap::new(),
unk_token: String::from("<unk>"),
}
}
}
impl Model for WordLevel {
type Trainer = WordLevelTrainer;
fn tokenize(&self, token: &str) -> Result<Vec<Token>> {
if let Some(&id) = self.vocab.get(token) {
Ok(vec![Token {
id,
value: token.to_owned(),
offsets: (0, token.len()),
}])
} else if let Some(&unk_id) = self.vocab.get(&self.unk_token) {
Ok(vec![Token {
id: unk_id,
value: self.unk_token.to_owned(),
offsets: (0, token.len()),
}])
} else {
Err(Box::new(Error::MissingUnkToken))
}
}
fn token_to_id(&self, token: &str) -> Option<u32> {
self.vocab.get(token).copied()
}
fn id_to_token(&self, id: u32) -> Option<String> {
self.vocab_r.get(&id).cloned()
}
fn get_vocab(&self) -> HashMap<String, u32> {
self.vocab.clone().into_iter().collect()
}
fn get_vocab_size(&self) -> usize {
self.vocab.keys().len()
}
fn save(&self, folder: &Path, name: Option<&str>) -> Result<Vec<PathBuf>> {
let vocab_file_name = match name {
Some(name) => format!("{name}-vocab.json"),
None => "vocab.json".to_string(),
};
// Write vocab.json
let vocab_path: PathBuf = [folder, Path::new(vocab_file_name.as_str())]
.iter()
.collect();
let mut vocab_file = File::create(&vocab_path)?;
let order_vocab_iter = OrderedVocabIter::new(&self.vocab_r);
let serialized = serde_json::to_string(&order_vocab_iter)?;
vocab_file.write_all(serialized.as_bytes())?;
Ok(vec![vocab_path])
}
fn get_trainer(&self) -> Self::Trainer {
WordLevelTrainer::default()
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_tokenize_unk() {
let vocab: Vocab = [("<unk>".into(), 0), ("a".into(), 1), ("b".into(), 2)]
.iter()
.cloned()
.collect();
let wordlevel = WordLevelBuilder::default()
.vocab(vocab)
.unk_token("<unk>".to_string())
.build()
.unwrap();
let tokens = wordlevel.tokenize("c").unwrap();
assert_eq!(tokens, vec![Token::new(0u32, "<unk>".into(), (0, 1)),]);
let tokens = wordlevel.tokenize("a").unwrap();
assert_eq!(tokens, vec![Token::new(1u32, "a".into(), (0, 1)),]);
}
#[test]
fn test_tokenize_missing_unk_token() {
let vocab: Vocab = [("a".into(), 0), ("b".into(), 1)].iter().cloned().collect();
let wordlevel = WordLevelBuilder::default().vocab(vocab).build().unwrap();
let tokens = wordlevel.tokenize("a").unwrap();
assert_eq!(tokens, vec![Token::new(0u32, "a".into(), (0, 1)),]);
let error = wordlevel.tokenize("c").err().unwrap();
assert!(error.is::<Error>());
}
}
| tokenizers/tokenizers/src/models/wordlevel/mod.rs/0 | {
"file_path": "tokenizers/tokenizers/src/models/wordlevel/mod.rs",
"repo_id": "tokenizers",
"token_count": 3405
} | 316 |
use ahash::{AHashMap, AHashSet};
use std::sync::LazyLock;
use crate::utils::SysRegex;
use serde::{Deserialize, Serialize};
use crate::tokenizer::{
Decoder, Encoding, PostProcessor, PreTokenizedString, PreTokenizer, Result,
SplitDelimiterBehavior,
};
use crate::utils::macro_rules_attribute;
/// Converts bytes to unicode characters.
/// See https://github.com/openai/gpt-2/blob/master/src/encoder.py#L9
pub(crate) fn bytes_char() -> AHashMap<u8, char> {
let mut bs: Vec<u8> = vec![];
bs.extend(b'!'..=b'~');
bs.extend(b'\xA1'..=b'\xAC');
bs.extend(b'\xAE'..=b'\xFF');
let mut cs: Vec<u32> = bs.iter().map(|i| *i as u32).collect();
let mut n = 0;
for b in 0..=255u8 {
if !bs.contains(&b) {
bs.push(b);
cs.push(u32::pow(2, 8) + n);
n += 1;
}
}
// Safety: cs contains all values from bs (between 0 and 255),
// and some values of value 2⁸ + n, where n is between 0 and 255. This is between 255 and 512.
// Both ranges are valid UTF-32 values (which is fully saturated until 0xD000)
bs.into_iter()
.zip(cs)
.map(|(f, t)| (f, unsafe { std::char::from_u32_unchecked(t) }))
.collect()
}
/// Regex that matches exactly one token.
/// See https://github.com/openai/gpt-2/blob/master/src/encoder.py#L98
static RE: LazyLock<SysRegex> = LazyLock::new(|| {
SysRegex::new(r"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+")
.unwrap()
});
static BYTES_CHAR: LazyLock<AHashMap<u8, char>> = LazyLock::new(bytes_char);
static CHAR_BYTES: LazyLock<AHashMap<char, u8>> =
LazyLock::new(|| bytes_char().into_iter().map(|(c, b)| (b, c)).collect());
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
/// Provides all the necessary steps to handle the BPE tokenization at the byte-level. Takes care
/// of all the required processing steps to transform a UTF-8 string as needed before and after the
/// BPE model does its job.
#[macro_rules_attribute(impl_serde_type!)]
#[non_exhaustive]
pub struct ByteLevel {
/// Whether to add a leading space to the first word. This allows to treat the leading word
/// just as any other word.
pub add_prefix_space: bool,
/// Whether the post processing step should trim offsets to avoid including whitespaces.
pub trim_offsets: bool,
/// Whether to use the standard GPT2 regex for whitespace splitting
/// Set it to False if you want to use your own splitting.
#[serde(default = "default_true")]
pub use_regex: bool,
}
fn default_true() -> bool {
true
}
impl Default for ByteLevel {
fn default() -> Self {
Self {
add_prefix_space: true,
trim_offsets: true,
use_regex: true,
}
}
}
impl ByteLevel {
pub fn new(add_prefix_space: bool, trim_offsets: bool, use_regex: bool) -> Self {
Self {
add_prefix_space,
trim_offsets,
use_regex,
}
}
pub fn alphabet() -> AHashSet<char> {
BYTES_CHAR.values().copied().collect()
}
#[must_use]
pub fn add_prefix_space(mut self, v: bool) -> Self {
self.add_prefix_space = v;
self
}
#[must_use]
pub fn trim_offsets(mut self, v: bool) -> Self {
self.trim_offsets = v;
self
}
#[must_use]
pub fn use_regex(mut self, v: bool) -> Self {
self.use_regex = v;
self
}
}
/// As a `PreTokenizer`, `ByteLevel` is in charge of transforming all the unicode characters into
/// their byte-level counterpart. It also splits the input according to the configured regex.
// TODO: Give the ability to modify this regex
impl PreTokenizer for ByteLevel {
fn pre_tokenize(&self, pretokenized: &mut PreTokenizedString) -> Result<()> {
let re_ref: &SysRegex = &RE;
pretokenized.split(|_, mut normalized| {
if self.add_prefix_space && !normalized.get().starts_with(' ') {
normalized.prepend(" ");
}
if self.use_regex {
normalized.split(re_ref, SplitDelimiterBehavior::Isolated)
} else {
Ok(vec![normalized])
}
})?;
pretokenized.normalize(|normalized| {
let s = normalized.get();
let mut transformations: Vec<(char, isize)> = Vec::with_capacity(s.len());
for (i, cur_char) in s.char_indices() {
let size = cur_char.len_utf8();
transformations.extend(
s.as_bytes()[i..i + size]
.iter()
.enumerate()
.map(|(i, b)| (BYTES_CHAR[b], isize::from(i > 0))),
);
}
normalized.transform(transformations, 0);
Ok(())
})
}
}
/// As a `Decoder`, `ByteLevel` is in charge of converting any byte-level characters to their
/// unicode counterpart, before merging everything back into a single String.
/// This decoder will consume the tokens and merge them in one step to alleviate
/// the fact that single token decoded might be a byte not representable as
/// as String.
impl Decoder for ByteLevel {
fn decode_chain(&self, tokens: Vec<String>) -> Result<Vec<String>> {
let toks = tokens
.into_iter()
.flat_map(|t| {
t.chars()
.try_fold(vec![], |mut acc, c| {
CHAR_BYTES.get(&c).map(|b| {
acc.push(*b);
acc
})
})
.unwrap_or_else(|| t.as_bytes().to_vec())
})
.collect::<Vec<u8>>();
Ok(vec![String::from_utf8_lossy(&toks).to_string()])
}
}
/// As a `PostProcessor`, `ByteLevel` is in charge of trimming the offsets if necessary.
impl PostProcessor for ByteLevel {
fn added_tokens(&self, _is_pair: bool) -> usize {
0
}
fn process_encodings(
&self,
mut encodings: Vec<Encoding>,
_add_special_tokens: bool,
) -> Result<Vec<Encoding>> {
if self.trim_offsets {
for encoding in encodings.iter_mut() {
process_offsets(encoding, self.add_prefix_space);
encoding
.get_overflowing_mut()
.iter_mut()
.for_each(|encoding| process_offsets(encoding, self.add_prefix_space));
}
}
for (i, encoding) in encodings.iter_mut().enumerate() {
encoding.set_sequence_id(i);
}
Ok(encodings)
//<dyn PostProcessor>::default_process(encodings, add_special_tokens)
}
}
pub fn process_offsets(encoding: &mut Encoding, add_prefix_space: bool) {
encoding.process_tokens_with_offsets_mut(|(i, (token, offsets))| {
let mut leading_spaces = token
.chars()
.take_while(|c| *c == BYTES_CHAR[&b' '] || c.is_whitespace())
.count();
let trailing_spaces = token
.chars()
.rev()
.take_while(|c| *c == BYTES_CHAR[&b' '] || c.is_whitespace())
.count();
if leading_spaces > 0 || trailing_spaces > 0 {
if leading_spaces > 0 {
// If user uses `is_pretokenized=True` we might have
// offsets that might begin at the start of the string but are
// NOT the first token.
let is_first = i == 0 || offsets.0 == 0;
if is_first && add_prefix_space && leading_spaces == 1 {
// If we are processing the first pair of offsets, with `add_prefix_space`,
// then we shouldn't remove anything we added. If there are more than one
// leading spaces though, it means we didn't add them, and they should be
// removed.
leading_spaces = 0;
}
offsets.0 = std::cmp::min(offsets.0 + leading_spaces, offsets.1);
}
if trailing_spaces > 0 && offsets.1 >= trailing_spaces {
offsets.1 = std::cmp::max(offsets.1 - trailing_spaces, offsets.0);
}
}
});
}
#[cfg(test)]
mod tests {
use super::*;
use crate::tokenizer::{
Decoder, Encoding, OffsetReferential, OffsetType, PostProcessor, PreTokenizedString,
PreTokenizer,
};
use std::iter::FromIterator;
#[test]
fn pre_tokenization() {
let bytelevel = ByteLevel::default().add_prefix_space(false);
let mut pretokenized: PreTokenizedString = "Hello my friend, how is your day going?".into();
bytelevel.pre_tokenize(&mut pretokenized).unwrap();
assert_eq!(
pretokenized
.get_splits(OffsetReferential::Original, OffsetType::Byte)
.into_iter()
.map(|(s, o, _)| (s, o))
.collect::<Vec<_>>(),
vec![
("Hello", (0, 5)),
("Ġmy", (5, 8)),
("Ġfriend", (8, 15)),
(",", (15, 16)),
("Ġhow", (16, 20)),
("Ġis", (20, 23)),
("Ġyour", (23, 28)),
("Ġday", (28, 32)),
("Ġgoing", (32, 38)),
("?", (38, 39))
]
);
}
#[test]
fn pre_tokenization_no_regex() {
let bytelevel = ByteLevel::default().use_regex(false);
let mut pretokenized: PreTokenizedString = "Hello my friend, how is your day going?".into();
bytelevel.pre_tokenize(&mut pretokenized).unwrap();
assert_eq!(
pretokenized
.get_splits(OffsetReferential::Original, OffsetType::Byte)
.into_iter()
.map(|(s, o, _)| (s, o))
.collect::<Vec<_>>(),
vec![("ĠHelloĠmyĠfriend,ĠhowĠisĠyourĠdayĠgoing?", (0, 39))]
);
}
#[test]
fn decoding() {
let bytelevel = ByteLevel::default().add_prefix_space(false);
assert_eq!(
bytelevel
.decode_chain(
vec![
"Hello", "Ġmy", "Ġfriend", ",", "Ġhow", "Ġis", "Ġyour", "Ġday", "Ġgoing",
"?"
]
.into_iter()
.map(|s| s.into())
.collect::<Vec<String>>()
)
.unwrap(),
vec!["Hello my friend, how is your day going?"]
);
}
#[test]
fn add_prefix_space() {
let bytelevel = ByteLevel::default().add_prefix_space(true);
for s in &[
" Hello my friend, how is your day going?",
"Hello my friend, how is your day going?",
] {
let mut pretokenized = PreTokenizedString::from(*s);
bytelevel.pre_tokenize(&mut pretokenized).unwrap();
assert_eq!(
pretokenized
.get_splits(OffsetReferential::Normalized, OffsetType::Byte)
.into_iter()
.map(|(s, o, _)| (s, o))
.collect::<Vec<_>>(),
vec![
("ĠHello", (0, 7)),
("Ġmy", (7, 11)),
("Ġfriend", (11, 19)),
(",", (19, 20)),
("Ġhow", (20, 25)),
("Ġis", (25, 29)),
("Ġyour", (29, 35)),
("Ġday", (35, 40)),
("Ġgoing", (40, 47)),
("?", (47, 48))
]
);
}
}
#[test]
fn decode_works_on_separated_tokens() {
let samples = vec![
"A Nuskhuri abbreviation of იესუ ქრისტე ( iesu kriste ) \" Jesus Christ \"",
"An equal number have descenders , like p or q in English \
: გ , დ , ე , ვ , კ , ლ , ჟ , ტ , უ , ფ , ღ , ყ , ც",
];
let bytelevel = ByteLevel::default().add_prefix_space(false);
for sample in samples {
let mut pretokenized = PreTokenizedString::from(sample);
bytelevel.pre_tokenize(&mut pretokenized).unwrap();
let separated_tokens = pretokenized
.get_splits(OffsetReferential::Original, OffsetType::Byte)
.iter()
.flat_map(|(s, _, _)| s.split("").map(|t| t.into()))
.collect::<Vec<_>>();
assert_eq!(
sample,
bytelevel.decode_chain(separated_tokens).unwrap().join("")
);
}
}
#[test]
fn handling_of_newlines() {
let mut pretokenized = PreTokenizedString::from("Hello there\nHello there");
let bytelevel = ByteLevel::default().add_prefix_space(false);
bytelevel.pre_tokenize(&mut pretokenized).unwrap();
assert_eq!(
pretokenized
.get_splits(OffsetReferential::Original, OffsetType::Byte)
.into_iter()
.map(|(s, o, _)| (s, o))
.collect::<Vec<_>>(),
vec![
("Hello", (0, 5)),
("Ġthere", (5, 11)),
("Ċ", (11, 12)),
("Hello", (12, 17)),
("Ġthere", (17, 23))
]
);
}
#[test]
fn handling_of_multiple_whitespaces() {
let mut pretokenized = PreTokenizedString::from("Hello there dear");
let bytelevel = ByteLevel::default().add_prefix_space(false);
bytelevel.pre_tokenize(&mut pretokenized).unwrap();
assert_eq!(
pretokenized
.get_splits(OffsetReferential::Original, OffsetType::Byte)
.into_iter()
.map(|(s, o, _)| (s, o))
.collect::<Vec<_>>(),
vec![
("Hello", (0, 5)),
("Ġthere", (5, 11)),
("ĠĠĠĠĠĠ", (11, 17)),
("Ġdear", (17, 22))
]
);
}
#[test]
fn offsets_when_char_split_up() {
let input = "i⭢j";
let mut pretokenized = PreTokenizedString::from(input);
let bytelevel = ByteLevel::default().add_prefix_space(false);
bytelevel.pre_tokenize(&mut pretokenized).unwrap();
assert_eq!(
pretokenized
.get_splits(OffsetReferential::Original, OffsetType::Byte)
.into_iter()
.map(|(s, o, _)| (s, o))
.collect::<Vec<_>>(),
vec![("i", (0, 1)), ("âŃ¢", (1, 4)), ("j", (4, 5))]
);
assert_eq!(
pretokenized
.get_splits(OffsetReferential::Normalized, OffsetType::Byte)
.into_iter()
.map(|(s, o, _)| (s, o))
.collect::<Vec<_>>(),
vec![("i", (0, 1)), ("âŃ¢", (1, 7)), ("j", (7, 8))]
);
assert_eq!(
pretokenized
.get_splits(OffsetReferential::Original, OffsetType::Byte)
.into_iter()
.map(|(_, o, _)| &input[o.0..o.1])
.collect::<Vec<_>>(),
vec!["i", "⭢", "j"]
);
}
#[test]
fn processor_trims_offsets_pre_tokenized() {
// If user uses `is_pretokenized=True` we might have
// offsets that might begin at the start of the string but are
// NOT the first token.
let mut encoding = Encoding::new(
vec![0; 5],
vec![],
vec!["Ġl".into(), "ove".into(), "Ġl".into(), "ove".into()],
vec![],
vec![(0, 1), (1, 4), (0, 1), (1, 4)],
vec![],
vec![],
vec![],
AHashMap::new(),
);
process_offsets(&mut encoding, true);
assert_eq!(
encoding,
Encoding::new(
vec![0; 5],
vec![],
vec!["Ġl".into(), "ove".into(), "Ġl".into(), "ove".into()],
vec![],
vec![(0, 1), (1, 4), (0, 1), (1, 4)],
vec![],
vec![],
vec![],
AHashMap::new(),
)
);
}
#[test]
fn processor_trims_offsets() {
let start = Encoding::new(
vec![0; 5],
vec![],
vec![
"Ġ".into(),
"ĠĠĠĠHelloĠĠ".into(),
"ĠĠHello".into(),
"HelloĠĠ".into(),
"ĠĠĠĠ".into(),
],
vec![],
vec![(0, 1), (0, 11), (11, 18), (18, 25), (25, 29)],
vec![],
vec![],
vec![],
AHashMap::new(),
);
let expected = Encoding::new(
vec![0; 5],
vec![0; 5],
vec![
"Ġ".into(),
"ĠĠĠĠHelloĠĠ".into(),
"ĠĠHello".into(),
"HelloĠĠ".into(),
"ĠĠĠĠ".into(),
],
vec![],
vec![(0, 0), (4, 9), (13, 18), (18, 23), (29, 29)],
vec![],
vec![],
vec![],
AHashMap::from_iter(vec![(0, 0..5)]),
);
let bytelevel = ByteLevel::default().trim_offsets(true);
assert_eq!(
expected,
bytelevel.process(start.clone(), None, false).unwrap()
);
let pair_expected = Encoding::new(
vec![0; 10],
vec![0, 0, 0, 0, 0, 1, 1, 1, 1, 1],
vec![
"Ġ".into(),
"ĠĠĠĠHelloĠĠ".into(),
"ĠĠHello".into(),
"HelloĠĠ".into(),
"ĠĠĠĠ".into(),
"Ġ".into(),
"ĠĠĠĠHelloĠĠ".into(),
"ĠĠHello".into(),
"HelloĠĠ".into(),
"ĠĠĠĠ".into(),
],
vec![],
vec![
(0, 0),
(4, 9),
(13, 18),
(18, 23),
(29, 29),
(0, 0),
(4, 9),
(13, 18),
(18, 23),
(29, 29),
],
vec![],
vec![],
vec![],
AHashMap::from_iter(vec![(0, 0..5), (1, 5..10)]),
);
assert_eq!(
pair_expected,
bytelevel
.process(start.clone(), Some(start), false)
.unwrap()
);
}
#[test]
fn decode_unknown_characters() {
let byte_level = ByteLevel::default();
assert_eq!(
byte_level
.decode_chain(vec![
"Hello".into(),
"Ġthere".into(),
"Ġdear".into(),
"Ġfriend!".into(),
"Ġ".into(),
"[PA D]".into()
])
.unwrap(),
vec!["Hello there dear friend! [PA D]"]
);
}
#[test]
fn deserialization() {
// Before use_regex
let byte_level: ByteLevel = serde_json::from_str(
r#"{"type": "ByteLevel", "add_prefix_space": true, "trim_offsets": false}"#,
)
.unwrap();
assert!(byte_level.use_regex);
// Loading works, new future BC test.
let byte_level: ByteLevel = serde_json::from_str(
r#"{"type": "ByteLevel", "add_prefix_space": true, "trim_offsets": false, "use_regex": true}"#,
)
.unwrap();
assert!(byte_level.use_regex);
let byte_level: ByteLevel = serde_json::from_str(
r#"{"type": "ByteLevel", "add_prefix_space": true, "trim_offsets": false, "use_regex": false}"#,
)
.unwrap();
assert!(!byte_level.use_regex);
}
}
| tokenizers/tokenizers/src/pre_tokenizers/byte_level.rs/0 | {
"file_path": "tokenizers/tokenizers/src/pre_tokenizers/byte_level.rs",
"repo_id": "tokenizers",
"token_count": 10977
} | 317 |
use crate::processors::PostProcessorWrapper;
use crate::tokenizer::{Encoding, PostProcessor, Result};
use crate::utils::macro_rules_attribute;
use serde::{Deserialize, Serialize};
#[derive(Clone, Debug, PartialEq, Eq)]
#[macro_rules_attribute(impl_serde_type!)]
pub struct Sequence {
processors: Vec<PostProcessorWrapper>,
}
impl Sequence {
pub fn new(processors: Vec<PostProcessorWrapper>) -> Self {
Self { processors }
}
pub fn get(&self, index: usize) -> Option<&PostProcessorWrapper> {
self.processors.get(index)
}
pub fn get_mut(&mut self, index: usize) -> Option<&mut PostProcessorWrapper> {
self.processors.get_mut(index)
}
pub fn set_mut(&mut self, index: usize, post_proc: PostProcessorWrapper) {
self.processors[index] = post_proc;
}
}
impl AsRef<[PostProcessorWrapper]> for Sequence {
fn as_ref(&self) -> &[PostProcessorWrapper] {
&self.processors
}
}
impl AsMut<[PostProcessorWrapper]> for Sequence {
fn as_mut(&mut self) -> &mut [PostProcessorWrapper] {
&mut self.processors
}
}
impl IntoIterator for Sequence {
type Item = PostProcessorWrapper;
type IntoIter = std::vec::IntoIter<Self::Item>;
fn into_iter(self) -> Self::IntoIter {
self.processors.into_iter()
}
}
impl PostProcessor for Sequence {
fn added_tokens(&self, is_pair: bool) -> usize {
self.processors
.iter()
.map(|p| p.added_tokens(is_pair))
.sum::<usize>()
}
fn process_encodings(
&self,
mut encodings: Vec<Encoding>,
add_special_tokens: bool,
) -> Result<Vec<Encoding>> {
for processor in &self.processors {
encodings = processor.process_encodings(encodings, add_special_tokens)?;
}
Ok(encodings)
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::processors::{ByteLevel, PostProcessorWrapper};
use crate::tokenizer::{Encoding, PostProcessor};
use ahash::AHashMap;
use std::iter::FromIterator;
#[test]
fn process_chain() {
let start = Encoding::new(
vec![0; 5],
vec![0; 5],
vec![
"Ġ".into(),
"ĠĠĠĠHelloĠĠ".into(),
"ĠĠHello".into(),
"HelloĠĠ".into(),
"ĠĠĠĠ".into(),
],
vec![],
vec![(0, 1), (0, 11), (11, 18), (18, 25), (25, 29)],
vec![],
vec![],
vec![],
AHashMap::new(),
);
let bytelevel = ByteLevel::default().trim_offsets(true);
let sequence = Sequence::new(vec![PostProcessorWrapper::ByteLevel(bytelevel)]);
let expected = Encoding::new(
vec![0; 5],
vec![0; 5],
vec![
"Ġ".into(),
"ĠĠĠĠHelloĠĠ".into(),
"ĠĠHello".into(),
"HelloĠĠ".into(),
"ĠĠĠĠ".into(),
],
vec![],
vec![(0, 0), (4, 9), (13, 18), (18, 23), (29, 29)],
vec![],
vec![],
vec![],
AHashMap::from_iter(vec![(0, 0..5)]),
);
assert_eq!(
expected,
bytelevel.process(start.clone(), None, false).unwrap()
);
assert_eq!(
expected,
sequence.process(start.clone(), None, false).unwrap()
);
let pair_expected = Encoding::new(
vec![0; 10],
vec![0, 0, 0, 0, 0, 1, 1, 1, 1, 1],
vec![
"Ġ".into(),
"ĠĠĠĠHelloĠĠ".into(),
"ĠĠHello".into(),
"HelloĠĠ".into(),
"ĠĠĠĠ".into(),
"Ġ".into(),
"ĠĠĠĠHelloĠĠ".into(),
"ĠĠHello".into(),
"HelloĠĠ".into(),
"ĠĠĠĠ".into(),
],
vec![],
vec![
(0, 0),
(4, 9),
(13, 18),
(18, 23),
(29, 29),
(0, 0),
(4, 9),
(13, 18),
(18, 23),
(29, 29),
],
vec![],
vec![],
vec![],
AHashMap::from_iter(vec![(0, 0..5), (1, 5..10)]),
);
assert_eq!(
pair_expected,
bytelevel
.process(start.clone(), Some(start.clone()), false)
.unwrap()
);
assert_eq!(
pair_expected,
sequence.process(start.clone(), Some(start), false).unwrap()
);
}
}
| tokenizers/tokenizers/src/processors/sequence.rs/0 | {
"file_path": "tokenizers/tokenizers/src/processors/sequence.rs",
"repo_id": "tokenizers",
"token_count": 2674
} | 318 |
//!
//! This module defines helpers to allow optional Rayon usage.
//!
use rayon::iter::IterBridge;
use rayon::prelude::*;
use rayon_cond::CondIterator;
use std::sync::atomic::AtomicBool;
use std::sync::atomic::AtomicU8;
use std::sync::atomic::Ordering;
// Re-export rayon current_num_threads
pub use rayon::current_num_threads;
pub const ENV_VARIABLE: &str = "TOKENIZERS_PARALLELISM";
static USED_PARALLELISM: AtomicBool = AtomicBool::new(false);
static PARALLELISM: AtomicU8 = AtomicU8::new(0);
/// Check if the TOKENIZERS_PARALLELISM env variable has been explicitly set
pub fn is_parallelism_configured() -> bool {
std::env::var(ENV_VARIABLE).is_ok() || get_override_parallelism().is_some()
}
/// Check if at some point we used a parallel iterator
pub fn has_parallelism_been_used() -> bool {
USED_PARALLELISM.load(Ordering::SeqCst)
}
/// Get internally set parallelism
fn get_override_parallelism() -> Option<bool> {
match PARALLELISM.load(Ordering::SeqCst) {
0 => None,
1 => Some(false),
2 => Some(true),
_ => unreachable!(),
}
}
/// Get the currently set value for `TOKENIZERS_PARALLELISM` env variable
fn get_env_parallelism() -> bool {
match std::env::var(ENV_VARIABLE) {
Ok(mut v) => {
v.make_ascii_lowercase();
!matches!(v.as_ref(), "" | "off" | "false" | "f" | "no" | "n" | "0")
}
Err(_) => true, // If we couldn't get the variable, we use the default
}
}
pub fn get_parallelism() -> bool {
if let Some(parallel) = get_override_parallelism() {
parallel
} else {
get_env_parallelism()
}
}
/// Set the value for `TOKENIZERS_PARALLELISM` for the current process
pub fn set_parallelism(val: bool) {
PARALLELISM.store(if val { 2 } else { 1 }, Ordering::SeqCst);
}
/// Allows to convert into an iterator that can be executed either parallelly or serially.
///
/// The choice is made according to the currently set `TOKENIZERS_PARALLELISM` environment variable.
/// This variable can have one of the following values
/// - False => "" (empty value), "false", "f", "off", "no", "n", "0"
/// - True => Any other value
///
pub trait MaybeParallelIterator<P, S>
where
P: ParallelIterator,
S: Iterator<Item = P::Item>,
{
/// Convert ourself in a CondIterator, that will be executed either in parallel or serially,
/// based solely on the `TOKENIZERS_PARALLELISM` environment variable
fn into_maybe_par_iter(self) -> CondIterator<P, S>;
/// Convert ourself in a CondIterator, that will be executed either in parallel or serially,
/// based on both the `TOKENIZERS_PARALLELISM` environment variable and the provided bool.
/// Both must be true to run with parallelism activated.
fn into_maybe_par_iter_cond(self, cond: bool) -> CondIterator<P, S>;
}
impl<P, S, I> MaybeParallelIterator<P, S> for I
where
I: IntoParallelIterator<Iter = P, Item = P::Item> + IntoIterator<IntoIter = S, Item = S::Item>,
P: ParallelIterator,
S: Iterator<Item = P::Item>,
{
fn into_maybe_par_iter(self) -> CondIterator<P, S> {
let parallelism = get_parallelism();
if parallelism {
USED_PARALLELISM.store(true, Ordering::SeqCst);
}
CondIterator::new(self, parallelism)
}
fn into_maybe_par_iter_cond(self, cond: bool) -> CondIterator<P, S> {
if cond {
self.into_maybe_par_iter()
} else {
CondIterator::from_serial(self)
}
}
}
/// Shared reference version of MaybeParallelIterator, works the same but returns an iterator
/// over references, does not consume self
pub trait MaybeParallelRefIterator<'data, P, S>
where
P: ParallelIterator,
S: Iterator<Item = P::Item>,
P::Item: 'data,
{
fn maybe_par_iter(&'data self) -> CondIterator<P, S>;
fn maybe_par_iter_cond(&'data self, cond: bool) -> CondIterator<P, S>;
}
impl<'data, P, S, I: 'data + ?Sized> MaybeParallelRefIterator<'data, P, S> for I
where
&'data I: MaybeParallelIterator<P, S>,
P: ParallelIterator,
S: Iterator<Item = P::Item>,
P::Item: 'data,
{
fn maybe_par_iter(&'data self) -> CondIterator<P, S> {
self.into_maybe_par_iter()
}
fn maybe_par_iter_cond(&'data self, cond: bool) -> CondIterator<P, S> {
self.into_maybe_par_iter_cond(cond)
}
}
/// Exclusive reference version of MaybeParallelIterator, works the same but returns an iterator
/// over mutable references, does not consume self
pub trait MaybeParallelRefMutIterator<'data, P, S>
where
P: ParallelIterator,
S: Iterator<Item = P::Item>,
P::Item: 'data,
{
fn maybe_par_iter_mut(&'data mut self) -> CondIterator<P, S>;
fn maybe_par_iter_mut_cond(&'data mut self, cond: bool) -> CondIterator<P, S>;
}
impl<'data, P, S, I: 'data + ?Sized> MaybeParallelRefMutIterator<'data, P, S> for I
where
&'data mut I: MaybeParallelIterator<P, S>,
P: ParallelIterator,
S: Iterator<Item = P::Item>,
P::Item: 'data,
{
fn maybe_par_iter_mut(&'data mut self) -> CondIterator<P, S> {
self.into_maybe_par_iter()
}
fn maybe_par_iter_mut_cond(&'data mut self, cond: bool) -> CondIterator<P, S> {
self.into_maybe_par_iter_cond(cond)
}
}
/// Converts any serial iterator into a CondIterator, that can either run parallelly or serially.
pub trait MaybeParallelBridge<T, S>
where
S: Iterator<Item = T> + Send,
T: Send,
{
fn maybe_par_bridge(self) -> CondIterator<IterBridge<S>, S>;
fn maybe_par_bridge_cond(self, cond: bool) -> CondIterator<IterBridge<S>, S>;
}
impl<T, S> MaybeParallelBridge<T, S> for S
where
S: Iterator<Item = T> + Send,
T: Send,
{
fn maybe_par_bridge(self) -> CondIterator<IterBridge<S>, S> {
let iter = CondIterator::from_serial(self);
if get_parallelism() {
USED_PARALLELISM.store(true, Ordering::SeqCst);
CondIterator::from_parallel(iter.into_parallel().right().unwrap())
} else {
iter
}
}
fn maybe_par_bridge_cond(self, cond: bool) -> CondIterator<IterBridge<S>, S> {
if cond {
self.maybe_par_bridge()
} else {
CondIterator::from_serial(self)
}
}
}
/// Allows to convert into `chunks` that can be executed either parallelly or serially.
pub trait MaybeParallelSlice<'data, T>
where
T: Sync,
{
/// Create a CondIterator, that will be executed either in parallel or serially,
/// based solely on the `TOKENIZERS_PARALLELISM` environment variable
fn maybe_par_chunks(
&'_ self,
chunk_size: usize,
) -> CondIterator<rayon::slice::Chunks<'_, T>, std::slice::Chunks<'_, T>>;
/// Create a CondIterator, that will be executed either in parallel or serially,
/// based on both the `TOKENIZERS_PARALLELISM` environment variable and the provided bool.
/// Both must be true to run with parallelism activated.
fn maybe_par_chunks_cond(
&'_ self,
cond: bool,
chunk_size: usize,
) -> CondIterator<rayon::slice::Chunks<'_, T>, std::slice::Chunks<'_, T>>;
}
impl<T> MaybeParallelSlice<'_, T> for [T]
where
T: Sync,
{
fn maybe_par_chunks(
&'_ self,
chunk_size: usize,
) -> CondIterator<rayon::slice::Chunks<'_, T>, std::slice::Chunks<'_, T>> {
let parallelism = get_parallelism();
if parallelism {
CondIterator::from_parallel(self.par_chunks(chunk_size))
} else {
CondIterator::from_serial(self.chunks(chunk_size))
}
}
fn maybe_par_chunks_cond(
&'_ self,
cond: bool,
chunk_size: usize,
) -> CondIterator<rayon::slice::Chunks<'_, T>, std::slice::Chunks<'_, T>> {
if cond {
self.maybe_par_chunks(chunk_size)
} else {
CondIterator::from_serial(self.chunks(chunk_size))
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_maybe_parallel_iterator() {
let mut v = vec![1u32, 2, 3, 4, 5, 6];
assert_eq!(v.maybe_par_iter().sum::<u32>(), 21);
assert_eq!(
v.maybe_par_iter_mut()
.map(|v| {
*v *= 2;
*v
})
.sum::<u32>(),
42
);
assert_eq!(v.maybe_par_iter().sum::<u32>(), 42);
assert_eq!(v.into_maybe_par_iter().sum::<u32>(), 42);
}
#[test]
fn test_maybe_parallel_slice() {
let v = [1, 2, 3, 4, 5];
let chunks: Vec<_> = v.maybe_par_chunks(2).collect();
assert_eq!(chunks, vec![&[1, 2][..], &[3, 4], &[5]]);
}
}
| tokenizers/tokenizers/src/utils/parallelism.rs/0 | {
"file_path": "tokenizers/tokenizers/src/utils/parallelism.rs",
"repo_id": "tokenizers",
"token_count": 3698
} | 319 |
To install via [NPM](https://www.npmjs.com/package/@huggingface/transformers), run:
```bash
npm i @huggingface/transformers
```
Alternatively, you can use it in vanilla JS, without any bundler, by using a CDN or static hosting. For example, using [ES Modules](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Guide/Modules), you can import the library with:
```html
<script type="module">
import { pipeline } from 'https://cdn.jsdelivr.net/npm/@huggingface/transformers@3.7.2';
</script>
```
| transformers.js/docs/snippets/2_installation.snippet/0 | {
"file_path": "transformers.js/docs/snippets/2_installation.snippet",
"repo_id": "transformers.js",
"token_count": 176
} | 320 |
# Building an Electron application
*Full tutorial coming soon...* In the meantime, check out the example application: https://github.com/huggingface/transformers.js/tree/main/examples/electron
| transformers.js/docs/source/tutorials/electron.md/0 | {
"file_path": "transformers.js/docs/source/tutorials/electron.md",
"repo_id": "transformers.js",
"token_count": 51
} | 321 |
import Chart from 'chart.js/auto';
import Prism from 'prismjs';
// Import code and styles for supported languages
import 'prismjs/components/prism-javascript';
import 'prismjs/components/prism-python';
import 'prismjs/components/prism-markdown';
import 'prismjs/components/prism-clike';
import 'prismjs/themes/prism.css'
import './theme.css';
import './style.css';
// Initialise worker
const worker = new Worker(new URL('./worker.js', import.meta.url), {
type: 'module',
});
// Define elements
const TASK_SELECTOR = document.getElementById('task');
let searchParams = new URLSearchParams(location.search);
let defaultDemo = searchParams.get('demo');
if (defaultDemo) {
TASK_SELECTOR.value = defaultDemo;
}
// translation inputs
const LANGUAGE_FROM = document.getElementById('language-from');
const LANGUAGE_TO = document.getElementById('language-to');
const INPUT_TEXTBOX = document.getElementById('input-textbox');
const OUTPUT_TEXTBOX = document.getElementById('output-textbox');
// text generation inputs
const TEXT_GENERATION_TEXTBOX = document.getElementById('text-generation-textbox');
const TASKS = document.getElementsByClassName('task-settings')
const PROGRESS = document.getElementById('progress');
const PROGRESS_BARS = document.getElementById('progress-bars');
const GENERATE_BUTTON = document.getElementById('generate');
const MLM_INPUT_TEXTBOX = document.getElementById('mlm-input-textbox');
const MLM_OUTPUT_TEXTBOX = document.getElementById('mlm-output-textbox');
const SC_INPUT_TEXTBOX = document.getElementById('sc-input-textbox');
const SC_OUTPUT_CANVAS = document.getElementById('sc-canvas');
const TC_INPUT_TEXTBOX = document.getElementById('tc-input-textbox');
const TC_OUTPUT = document.getElementById('tc-output');
const QA_CONTEXT_TEXTBOX = document.getElementById('qa-context-textbox');
const QA_QUESTION_TEXTBOX = document.getElementById('qa-question-textbox');
const QA_ANSWER_TEXTBOX = document.getElementById('qa-answer-textbox');
const SUMMARIZATION_INPUT_TEXTBOX = document.getElementById('summarization-input-textbox');
const SUMMARIZATION_OUTPUT_TEXTBOX = document.getElementById('summarization-output-textbox');
const SPEECH2TEXT_SELECT = document.getElementById('audio-select');
const SPEECH2TEXT_INPUT = document.getElementById('audio-file');
const SPEECH2TEXT_AUDIO = document.getElementById('audio-player');
const SPEECH2TEXT_OUTPUT_TEXTBOX = document.getElementById('speech2text-output-textbox');
const TEXT2IMAGE_SELECT = document.getElementById('image-select');
const TEXT2IMAGE_INPUT = document.getElementById('image-file');
const TEXT2IMAGE_IMG = document.getElementById('image-viewer');
const TEXT2IMAGE_OUTPUT_TEXTBOX = document.getElementById('image2text-output-textbox');
const IMAGE_CLASSIFICATION_SELECT = document.getElementById('ic-select');
const IMAGE_CLASSIFICATION_INPUT = document.getElementById('ic-file');
const IMAGE_CLASSIFICATION_IMG = document.getElementById('ic-viewer');
const IMAGE_CLASSIFICATION_OUTPUT_CANVAS = document.getElementById('ic-canvas');
const CODE_COMPLETION_CONTAINER = document.getElementById('code-completion-container');
const ZSIC_SELECT = document.getElementById('zsic-select');
const ZSIC_INPUT = document.getElementById('zsic-file');
const ZSIC_CLASSES = document.getElementById('zsic-classes');
const ZSIC_IMG = document.getElementById('zsic-viewer');
const ZSIC_OUTPUT_CANVAS = document.getElementById('zsic-canvas');
const OD_SELECT = document.getElementById('od-select');
const OD_INPUT = document.getElementById('od-file');
const OD_IMG = document.getElementById('od-viewer');
const OD_OUTPUT_OVERLAY = document.getElementById('od-overlay');
const OD_OUTPUT_CANVAS = document.getElementById('od-canvas');
const ZSC_INPUT_TEXTBOX = document.getElementById('zsc-input-textbox');
const ZSC_CLASSES = document.getElementById('zsc-classes');
const ZSC_OUTPUT_CANVAS = document.getElementById('zsc-canvas');
const DEFAULT_GREEDY_PARAMS = {
max_new_tokens: 50,
num_beams: 1,
temperature: 1,
top_k: 0,
do_sample: false
}
const TASK_DEFAULT_PARAMS = {
'translation': DEFAULT_GREEDY_PARAMS,
'text-generation': {
max_new_tokens: 100,
num_beams: 1,
temperature: 1,
top_k: 20,
do_sample: true
},
'code-completion': DEFAULT_GREEDY_PARAMS,
'masked-language-modelling': {
topk: 5 // number of samples
},
'sequence-classification': {},
'token-classification': {},
'zero-shot-classification': {
multi_label: false
},
'question-answering': {},
'summarization': {
max_new_tokens: 50,
num_beams: 2,
temperature: 1,
top_k: 0,
do_sample: false
},
'automatic-speech-recognition': DEFAULT_GREEDY_PARAMS,
'image-to-text': DEFAULT_GREEDY_PARAMS,
'image-classification': {},
'zero-shot-image-classification': {},
'object-detection': {},
};
[
[SPEECH2TEXT_SELECT, SPEECH2TEXT_INPUT, SPEECH2TEXT_AUDIO],
[TEXT2IMAGE_SELECT, TEXT2IMAGE_INPUT, TEXT2IMAGE_IMG],
[IMAGE_CLASSIFICATION_SELECT, IMAGE_CLASSIFICATION_INPUT, IMAGE_CLASSIFICATION_IMG],
[ZSIC_SELECT, ZSIC_INPUT, ZSIC_IMG],
[OD_SELECT, OD_INPUT, OD_IMG],
].forEach(x => {
let [select, input, media] = x;
select.addEventListener('input', (e) => {
if (select.options[select.selectedIndex].hasAttribute('show-custom')) {
input.style.display = 'block';
} else {
input.style.display = 'none';
media.src = select.value
}
})
input.addEventListener("change", () => {
const file = input.files[0];
const url = URL.createObjectURL(file);
media.src = url;
});
});
const NER_TAGS = {
// tag: [textColour, backgroundColour, tagColour]
'ORG': ['#115E59', '#CCFBF1', '#14B8A6'],
'PER': ['#9D174D', '#FCE7F3', '#EC4899'],
'LOC': ['#86198F', '#FAE8FF', '#D946EF'],
}
// Predefined list of unique colours
const COLOURS = [
'255, 99, 132',
'54, 162, 235',
'255, 206, 86',
'75, 192, 192',
'153, 102, 255',
'255, 159, 64',
]
OD_SELECT.addEventListener('change', () => {
// Clear overlay and chart data on change
OD_OUTPUT_OVERLAY.innerHTML = '';
const chart = CHARTS[OD_OUTPUT_CANVAS.id];
chart.data = structuredClone(DEFAULT_DATA);
chart.update();
});
OD_OUTPUT_OVERLAY.addEventListener('mousemove', (e) => {
let rects = OD_OUTPUT_OVERLAY.querySelectorAll('rect')
let colours = [];
let borderColours = [];
rects.forEach((rect, i) => {
let colour = COLOURS[i % COLOURS.length];
// Display if hovering over background (tagName === 'svg')
let toDisplay = e.target.tagName !== 'rect';
if (!toDisplay) {
// Perform additional check
let bb = rect.getBoundingClientRect()
// Check if box intersects with current mouse positition
toDisplay = e.clientX >= bb.left && e.clientX <= bb.right && e.clientY >= bb.top && e.clientY <= bb.bottom
}
if (toDisplay) {
// Set back to original
rect.style.fillOpacity = 0.1;
rect.style.opacity = 1;
colours.push(`rgba(${colour}, 0.5)`);
borderColours.push(`rgba(${colour}, 1)`);
} else {
// Hovering over a rect, so set all other rects to 0 opacity
rect.style.fillOpacity = 0;
rect.style.opacity = 0;
colours.push(`rgba(${colour}, 0.05)`);
borderColours.push(`rgba(${colour}, 0.5)`);
}
})
const chart = CHARTS['od-canvas'];
chart.data.datasets[0].backgroundColor = colours;
chart.data.datasets[0].borderColor = borderColours;
chart.update();
})
function updateParams(task) {
let params = TASK_DEFAULT_PARAMS[task]
if (!params) return;
for (let [key, value] of Object.entries(params)) {
let element = document.querySelector(`.generation-option[param-name="${key}"]`)
if (!element) continue;
element.value = value;
}
}
// Parameters
const GENERATION_OPTIONS = document.getElementsByClassName('generation-option');
const CHART_OPTIONS = {
responsive: true,
maintainAspectRatio: false,
indexAxis: 'y',
scales: {
y: {
beginAtZero: true,
},
x: {
min: 0,
max: 1,
}
},
plugins: {
legend: {
display: false
},
},
layout: {
padding: {
bottom: -5,
}
},
};
// Initialise all code blocks
const CODE_BLOCKS = {};
[...document.querySelectorAll('.code-container')].forEach(element => {
// Guide to add editable code block:
// https://codepen.io/WebCoder49/pen/dyNyraq
// https://css-tricks.com/creating-an-editable-textarea-that-supports-syntax-highlighted-code/
const CODE_HIGHLIGHT = element.querySelector('pre');
const CODE_HIGHLIGHT_CONTENT = element.querySelector('code');
const CODE_COMPLETION_TEXTBOX = element.querySelector('textarea');
let sync_scroll = () => {
/* Scroll result to scroll coords of event - sync with textarea */
CODE_HIGHLIGHT.scrollTop = CODE_COMPLETION_TEXTBOX.scrollTop;
CODE_HIGHLIGHT.scrollLeft = CODE_COMPLETION_TEXTBOX.scrollLeft;
}
let update = (text) => {
// Handle final newlines (see article)
if (text[text.length - 1] == "\n") {
text += " ";
}
// Update code
CODE_HIGHLIGHT_CONTENT.innerHTML = escapeHtml(text);
// Syntax Highlight
Prism.highlightElement(CODE_HIGHLIGHT_CONTENT);
}
// Update code function
let updateCode = (text) => {
update(text);
sync_scroll();
};
CODE_BLOCKS[element.id] = {
update: (text) => {
CODE_COMPLETION_TEXTBOX.value = text;
updateCode(text);
// When updating, set scroll to bottom
// https://stackoverflow.com/a/9170709
CODE_COMPLETION_TEXTBOX.scrollTop = CODE_COMPLETION_TEXTBOX.scrollHeight;
},
text: () => CODE_COMPLETION_TEXTBOX.value
};
CODE_COMPLETION_TEXTBOX.oninput = () => updateCode(CODE_COMPLETION_TEXTBOX.value);
CODE_COMPLETION_TEXTBOX.onscroll = sync_scroll;
CODE_COMPLETION_TEXTBOX.onkeydown = (event) => {
let code = CODE_COMPLETION_TEXTBOX.value;
if (event.key == "Tab") {
/* Tab key pressed */
event.preventDefault(); // stop normal
let before_tab = code.slice(0, CODE_COMPLETION_TEXTBOX.selectionStart); // text before tab
let after_tab = code.slice(CODE_COMPLETION_TEXTBOX.selectionEnd, CODE_COMPLETION_TEXTBOX.value.length); // text after tab
let cursor_pos = CODE_COMPLETION_TEXTBOX.selectionStart + 1; // where cursor moves after tab - moving forward by 1 char to after tab
CODE_COMPLETION_TEXTBOX.value = before_tab + "\t" + after_tab; // add tab char
// move cursor
CODE_COMPLETION_TEXTBOX.selectionStart = cursor_pos;
CODE_COMPLETION_TEXTBOX.selectionEnd = cursor_pos;
update(CODE_COMPLETION_TEXTBOX.value); // Update text to include indent
}
};
});
const DEFAULT_DATA = {
labels: ['label', 'label', 'label', 'label', 'label'],
datasets: [{
borderWidth: 1
}]
}
const CHARTS = {
'sc-canvas': new Chart(SC_OUTPUT_CANVAS, {
type: 'bar',
data: {
labels: ['5 stars', '4 stars', '3 stars', '2 stars', '1 star'],
datasets: [{
borderWidth: 1
}]
},
options: CHART_OPTIONS,
}),
'ic-canvas': new Chart(IMAGE_CLASSIFICATION_OUTPUT_CANVAS, {
type: 'bar',
data: structuredClone(DEFAULT_DATA),
options: CHART_OPTIONS
}),
'zsic-canvas': new Chart(ZSIC_OUTPUT_CANVAS, {
type: 'bar',
data: {
labels: ['football', 'airport', 'animals'],
datasets: [{
borderWidth: 1
}]
},
options: CHART_OPTIONS
}),
'od-canvas': new Chart(OD_OUTPUT_CANVAS, {
type: 'bar',
data: structuredClone(DEFAULT_DATA),
options: CHART_OPTIONS
}),
'zsc-canvas': new Chart(ZSC_OUTPUT_CANVAS, {
type: 'bar',
data: {
labels: ['urgent', 'not urgent', 'phone', 'tablet', 'microwave'],
datasets: [{
borderWidth: 1
}]
},
options: CHART_OPTIONS
}),
};
[
[ZSIC_CLASSES, ZSIC_OUTPUT_CANVAS],
[ZSC_CLASSES, ZSC_OUTPUT_CANVAS],
].forEach(x => {
let [input, chart] = x;
input.addEventListener('input', () => {
// Update labels of graph
let chartToUpdate = CHARTS[chart.id];
chartToUpdate.data.labels = getZSClasses(input);
chartToUpdate.data.datasets[0].data = new Array(chartToUpdate.data.labels.length).fill(0);
chartToUpdate.update();
})
});
function getZSClasses(elem) {
// Get zero-shot classes from input element
return elem.value.split(/\s*,+\s*/g).filter(x => x);
}
function updateVisibility() {
// Set default parameters for task
updateParams(TASK_SELECTOR.value);
for (let element of TASKS) {
if (element.getAttribute('task').split(',').includes(TASK_SELECTOR.value)) {
element.style.display = 'block';
} else {
element.style.display = 'none';
}
}
}
updateVisibility();
// Add event listeners
TASK_SELECTOR.addEventListener('input', updateVisibility);
function parseValue(value, type) {
switch (type) {
case 'number':
return Number(value);
case 'bool':
return value === 'true'
default:
return value
}
}
function isVisible(e) {
// https://stackoverflow.com/a/38873788
return !!(e.offsetWidth || e.offsetHeight || e.getClientRects().length);
}
GENERATE_BUTTON.addEventListener('click', async (e) => {
// Set and pass generation settings to web worker
let data = {
task: TASK_SELECTOR.value,
generation: Object.fromEntries([...GENERATION_OPTIONS]
.filter(isVisible) // Only use parameters that are visible on screen
.map(x => {
let value = parseValue(x.value, x.getAttribute('datatype'));
return [x.getAttribute('param-name'), value]
}))
};
switch (TASK_SELECTOR.value) {
case 'translation':
data.languageFrom = LANGUAGE_FROM.value
data.languageTo = LANGUAGE_TO.value
data.text = INPUT_TEXTBOX.value
data.elementIdToUpdate = OUTPUT_TEXTBOX.id
break;
case 'text-generation':
data.text = TEXT_GENERATION_TEXTBOX.value
data.elementIdToUpdate = TEXT_GENERATION_TEXTBOX.id
break;
case 'code-completion':
data.text = CODE_BLOCKS[CODE_COMPLETION_CONTAINER.id].text();
data.elementIdToUpdate = CODE_COMPLETION_CONTAINER.id
data.targetType = 'code'
break;
case 'masked-language-modelling':
data.text = MLM_INPUT_TEXTBOX.value
data.elementIdToUpdate = MLM_OUTPUT_TEXTBOX.id
break;
case 'sequence-classification':
data.text = SC_INPUT_TEXTBOX.value
data.elementIdToUpdate = SC_OUTPUT_CANVAS.id
data.targetType = 'chart'
break;
case 'token-classification':
data.text = TC_INPUT_TEXTBOX.value
data.elementIdToUpdate = TC_OUTPUT.id
data.targetType = 'tokens'
break;
case 'zero-shot-classification':
data.text = ZSC_INPUT_TEXTBOX.value
data.classes = getZSClasses(ZSC_CLASSES);
data.elementIdToUpdate = ZSC_OUTPUT_CANVAS.id
data.targetType = 'chart'
data.updateLabels = true
break;
case 'question-answering':
data.context = QA_CONTEXT_TEXTBOX.value
data.question = QA_QUESTION_TEXTBOX.value
data.elementIdToUpdate = QA_ANSWER_TEXTBOX.id
break;
case 'summarization':
data.text = SUMMARIZATION_INPUT_TEXTBOX.value
data.elementIdToUpdate = SUMMARIZATION_OUTPUT_TEXTBOX.id
break;
case 'automatic-speech-recognition':
const sampling_rate = 16000;
const audioCTX = new AudioContext({ sampleRate: sampling_rate })
const response = await (await fetch(SPEECH2TEXT_AUDIO.currentSrc)).arrayBuffer()
const decoded = await audioCTX.decodeAudioData(response)
data.audio = decoded.getChannelData(0);
data.elementIdToUpdate = SPEECH2TEXT_OUTPUT_TEXTBOX.id
break;
case 'image-to-text':
data.image = getImageDataFromImage(TEXT2IMAGE_IMG)
data.elementIdToUpdate = TEXT2IMAGE_OUTPUT_TEXTBOX.id
break;
case 'image-classification':
data.image = getImageDataFromImage(IMAGE_CLASSIFICATION_IMG)
data.elementIdToUpdate = IMAGE_CLASSIFICATION_OUTPUT_CANVAS.id
data.targetType = 'chart'
data.updateLabels = true
break;
case 'zero-shot-image-classification':
data.image = getImageDataFromImage(ZSIC_IMG)
data.classes = getZSClasses(ZSIC_CLASSES);
data.elementIdToUpdate = ZSIC_OUTPUT_CANVAS.id
data.targetType = 'chart'
data.updateLabels = true
break;
case 'object-detection':
data.image = getImageDataFromImage(OD_IMG)
data.targetType = 'overlay'
data.chartId = OD_OUTPUT_CANVAS.id
data.elementIdToUpdate = OD_OUTPUT_OVERLAY.id
break;
default:
return;
}
worker.postMessage(data);
});
// Handle result returned by the web worker
worker.addEventListener('message', (event) => {
const message = event.data;
switch (message.type) {
case 'download': // for session creation
if (message.data.status === 'initiate') {
PROGRESS.style.display = 'block';
// create progress bar
PROGRESS_BARS.appendChild(htmlToElement(`
<div class="progress w-100" model="${message.data.name}" file="${message.data.file}">
<div class="progress-bar" role="progressbar"></div>
</div>
`));
} else {
let bar = PROGRESS_BARS.querySelector(`.progress[model="${message.data.name}"][file="${message.data.file}"]> .progress-bar`)
switch (message.data.status) {
case 'progress':
// update existing bar
bar.style.width = message.data.progress.toFixed(2) + '%';
bar.textContent = `${message.data.file} (${formatBytes(message.data.loaded)} / ${formatBytes(message.data.total)})`;
break;
case 'done':
// Remove the progress bar
bar.parentElement.remove();
break;
case 'ready':
// Pipeline is ready - hide container
PROGRESS.style.display = 'none';
PROGRESS_BARS.innerHTML = '';
break;
}
}
break;
case 'update': // for generation
let target = message.target;
let elem = document.getElementById(target);
switch (message.targetType) {
case 'code':
CODE_BLOCKS[target].update(message.data);
break;
default: // is textbox
elem.value = message.data
break;
}
break;
case 'complete':
switch (message.targetType) {
case 'chart':
const chartToUpdate = CHARTS[message.target];
let chartData = chartToUpdate.data.datasets[0].data;
if (message.updateLabels) {
for (let i = 0; i < message.data.length; ++i) {
let item = message.data[i];
chartData[i] = item.score;
chartToUpdate.data.labels[i] = item.label;
}
} else {
// set data, ensuring labels align correctly
for (let item of message.data) {
chartData[
chartToUpdate.data.labels.indexOf(item.label)
] = item.score
}
}
chartToUpdate.update(); // update the chart
break;
case 'tokens':
let target = document.getElementById(message.target);
target.innerHTML = '';
let tokens = message.data;
for (let token of tokens) {
let elem;
if (token.type === 'O') {
elem = document.createTextNode(token.text);
} else {
let [textColour, backgroundColour, tagColour] = NER_TAGS[token.type];
elem = htmlToElement(`<span class="ner-container" style="background-color: ${backgroundColour}; color: ${textColour};">${token.text}<span class="ner-tag" style="background-color: ${tagColour}; color: ${backgroundColour};">${token.type}</span></span>`);
}
target.appendChild(elem);
}
break;
case 'overlay':
let parent = document.getElementById(message.target);
// Clear previous output, just in case
parent.innerHTML = '';
let viewbox = parent.viewBox.baseVal;
let colours = [];
let borderColours = [];
let items = message.data;
for (let i = 0; i < items.length; ++i) {
const box = items[i].box;
let svgns = "http://www.w3.org/2000/svg";
let rect = document.createElementNS(svgns, 'rect');
rect.setAttribute('x', viewbox.width * box.xmin);
rect.setAttribute('y', viewbox.height * box.ymin);
rect.setAttribute('width', viewbox.width * (box.xmax - box.xmin));
rect.setAttribute('height', viewbox.height * (box.ymax - box.ymin));
const colour = COLOURS[i % COLOURS.length];
rect.style.stroke = rect.style.fill = `rgba(${colour}, 1)`;
colours.push(`rgba(${colour}, 0.5)`);
borderColours.push(`rgba(${colour}, 1)`);
parent.appendChild(rect);
}
// Update chart label and data
const chart = CHARTS[message.chartId];
chart.data.labels = items.map(x => x.label);
chart.data.datasets[0] = {
data: items.map(x => x.score),
backgroundColor: colours,
borderColor: borderColours
};
chart.update()
break;
default: // is text
document.getElementById(message.target).value = message.data
break;
}
break;
default:
break;
}
});
// Utility functions
function escapeHtml(unsafe) {
return unsafe.replaceAll('&', '&').replaceAll('<', '<').replaceAll('>', '>').replaceAll('"', '"').replaceAll("'", ''');
}
function htmlToElement(html) {
// https://stackoverflow.com/a/35385518
let template = document.createElement('template');
html = html.trim(); // Never return a text node of whitespace as the result
template.innerHTML = html;
return template.content.firstChild;
}
function formatBytes(bytes, decimals = 0) {
const sizes = ["Bytes", "KB", "MB", "GB", "TB"];
if (bytes === 0) return "0 Bytes";
const i = parseInt(Math.floor(Math.log(bytes) / Math.log(1000)), 10);
const rounded = (bytes / Math.pow(1000, i)).toFixed(decimals);
return rounded + " " + sizes[i];
}
function getImageDataFromImage(original) {
// Helper function to get image data from image element
const canvas = document.createElement('canvas');
canvas.width = original.naturalWidth;
canvas.height = original.naturalHeight;
const ctx = canvas.getContext('2d');
// TODO play around with ctx options?
// ctx.patternQuality = 'bilinear';
// ctx.quality = 'bilinear';
// ctx.antialias = 'default';
// ctx.imageSmoothingQuality = 'high';
ctx.drawImage(original, 0, 0, canvas.width, canvas.height);
return canvas.toDataURL();
}
| transformers.js/examples/demo-site/src/main.js/0 | {
"file_path": "transformers.js/examples/demo-site/src/main.js",
"repo_id": "transformers.js",
"token_count": 9224
} | 322 |
{
"name": "electron",
"productName": "electron",
"version": "1.0.0",
"description": "Transformers.js sample Electron application",
"main": "src/index.js",
"scripts": {
"start": "electron-forge start",
"package": "electron-forge package",
"make": "electron-forge make",
"publish": "electron-forge publish",
"lint": "echo \"No linting configured\""
},
"keywords": [],
"author": "Xenova",
"license": "MIT",
"dependencies": {
"@xenova/transformers": "^2.6.2",
"electron-squirrel-startup": "^1.0.0"
},
"devDependencies": {
"@electron-forge/cli": "^6.1.1",
"@electron-forge/maker-deb": "^6.1.1",
"@electron-forge/maker-rpm": "^6.1.1",
"@electron-forge/maker-squirrel": "^6.1.1",
"@electron-forge/maker-zip": "^6.1.1",
"electron": "^24.1.1"
}
}
| transformers.js/examples/electron/package.json/0 | {
"file_path": "transformers.js/examples/electron/package.json",
"repo_id": "transformers.js",
"token_count": 361
} | 323 |
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>Transformers.js | Sample Browser Extension</title>
<!-- Load styles -->
<link rel="stylesheet" href="popup.css" />
</head>
<body>
<div class="container">
<h1>Transformers.js</h1>
<h2>Run 🤗 Transformers in a Browser Extension!</h2>
<input id="text" placeholder="Enter text here">
<pre id="output"></pre>
</div>
</body>
</html> | transformers.js/examples/extension/src/popup.html/0 | {
"file_path": "transformers.js/examples/extension/src/popup.html",
"repo_id": "transformers.js",
"token_count": 246
} | 324 |
import { pipeline } from '@xenova/transformers';
import wavefile from 'wavefile';
// Load model
let transcriber = await pipeline('automatic-speech-recognition', 'Xenova/whisper-tiny.en');
// Load audio data
let url = 'https://huggingface.co/datasets/Xenova/transformers.js-docs/resolve/main/jfk.wav';
let buffer = Buffer.from(await fetch(url).then(x => x.arrayBuffer()))
// Read .wav file and convert it to required format
let wav = new wavefile.WaveFile(buffer);
wav.toBitDepth('32f'); // Pipeline expects input as a Float32Array
wav.toSampleRate(16000); // Whisper expects audio with a sampling rate of 16000
let audioData = wav.getSamples();
if (Array.isArray(audioData)) {
if (audioData.length > 1) {
const SCALING_FACTOR = Math.sqrt(2);
// Merge channels (into first channel to save memory)
for (let i = 0; i < audioData[0].length; ++i) {
audioData[0][i] = SCALING_FACTOR * (audioData[0][i] + audioData[1][i]) / 2;
}
}
// Select first channel
audioData = audioData[0];
}
// Run model
let start = performance.now();
let output = await transcriber(audioData);
let end = performance.now();
console.log(`Execution duration: ${(end - start) / 1000} seconds`);
console.log(output);
// { text: ' And so my fellow Americans ask not what your country can do for you, ask what you can do for your country.' }
| transformers.js/examples/node-audio-processing/index.js/0 | {
"file_path": "transformers.js/examples/node-audio-processing/index.js",
"repo_id": "transformers.js",
"token_count": 479
} | 325 |
import { pipeline } from '@xenova/transformers';
/**
* This class uses the Singleton pattern to ensure that only one instance of the
* pipeline is loaded. This is because loading the pipeline is an expensive
* operation and we don't want to do it every time we want to translate a sentence.
*/
class MyTranslationPipeline {
static task = 'translation';
static model = 'Xenova/nllb-200-distilled-600M';
static instance = null;
static async getInstance(progress_callback = null) {
if (this.instance === null) {
this.instance = pipeline(this.task, this.model, { progress_callback });
}
return this.instance;
}
}
// Listen for messages from the main thread
self.addEventListener('message', async (event) => {
// Retrieve the translation pipeline. When called for the first time,
// this will load the pipeline and save it for future use.
let translator = await MyTranslationPipeline.getInstance(x => {
// We also add a progress callback to the pipeline so that we can
// track model loading.
self.postMessage(x);
});
// Actually perform the translation
let output = await translator(event.data.text, {
tgt_lang: event.data.tgt_lang,
src_lang: event.data.src_lang,
// Allows for partial output
callback_function: x => {
self.postMessage({
status: 'update',
output: translator.tokenizer.decode(x[0].output_token_ids, { skip_special_tokens: true })
});
}
});
// Send the output back to the main thread
self.postMessage({
status: 'complete',
output: output,
});
});
| transformers.js/examples/react-translator/src/worker.js/0 | {
"file_path": "transformers.js/examples/react-translator/src/worker.js",
"repo_id": "transformers.js",
"token_count": 614
} | 326 |
# Semantic Image Search
This example shows you how to use Transformers.js to create a semantic image search engine. Check out the demo [here](https://huggingface.co/spaces/Xenova/semantic-image-search).

## Getting Started
### Dataset
This application uses images from [The Unsplash Dataset](https://github.com/unsplash/datasets), which you can download [here](https://unsplash.com/data/lite/latest). All you need for this demo is the `photos.tsv000` TSV file, which contains the metadata for all the images.
### Connecting to Supabase
After creating a new [Supabase](https://supabase.com/) project, you'll need to:
1. Create an `images` table and import the data from `photos.tsv000`.
2. Add a column for `image_embeddings`:
```sql
-- Add a new vector column with a dimension of 512
alter table images add column image_embedding vector(512);
```
3. Add your `SUPABASE_URL`, `SUPABASE_ANON_KEY`, and `SUPABASE_SECRET_KEY` keys to a `.env.local` file (see `.env.local.example` for template).
4. Update the image embeddings in your database by running the following command:
```bash
SUPABASE_URL=your-project-url \
SUPABASE_SECRET_KEY=your-secret-key \
node scripts/update-database.mjs
```
*Note:* This will take a while. Also, since queries are capped at 1000 returned rows, you'll need to run this command multiple times to insert all 25000 rows.
5. Create a new `match_images` [database function](https://supabase.com/docs/guides/database/functions):
```sql
-- https://supabase.com/blog/openai-embeddings-postgres-vector
create or replace function match_images (
query_embedding vector(512),
match_threshold float,
match_count int
)
returns table (
photo_id text,
photo_url text,
photo_image_url text,
photo_width int,
photo_height int,
photo_aspect_ratio float,
photo_description text,
ai_description text,
blur_hash text,
similarity float
)
language sql stable
as $$
select
photo_id,
photo_url,
photo_image_url,
photo_width,
photo_height,
photo_aspect_ratio,
photo_description,
ai_description,
blur_hash,
1 - (image_embedding <=> query_embedding) as similarity
from images
where 1 - (image_embedding <=> query_embedding) > match_threshold
order by similarity desc
limit match_count;
$$;
```
5. Add a [database policy](https://supabase.com/docs/guides/auth/row-level-security#policies) to allow users to view the database:
```sql
create policy "policy_name"
on public.images
for select using (
true
);
```
### Development
You can now run the development server with:
```bash
npm run dev
```
Open [http://localhost:3000](http://localhost:3000) with your browser to see the result.
| transformers.js/examples/semantic-image-search/README.md/0 | {
"file_path": "transformers.js/examples/semantic-image-search/README.md",
"repo_id": "transformers.js",
"token_count": 1129
} | 327 |
'use client'
import { useState } from 'react'
import { Modal } from './components/Modal';
import { SearchBar } from './components/SearchBar';
import { ImageGrid } from './components/ImageGrid';
export default function Home() {
// Application state
const [images, setImages] = useState(null);
const [currentImage, setCurrentImage] = useState(null);
const search = async (text) => {
if (!text) return;
const params = new URLSearchParams();
params.append('text', text);
params.append('threshold', 0.1);
params.append('limit', 100);
// Make a request to the /classify route on the server.
const result = await fetch(`/search?${params.toString()}`);
const json = await result.json();
setImages(json);
};
return (
<main className="mx-auto max-w-[1960px] p-4 relative">
<Modal currentImage={currentImage} setCurrentImage={setCurrentImage} />
<SearchBar search={search} />
<ImageGrid images={images} setCurrentImage={setCurrentImage} />
</main>
)
}
| transformers.js/examples/semantic-image-search/src/app/page.js/0 | {
"file_path": "transformers.js/examples/semantic-image-search/src/app/page.js",
"repo_id": "transformers.js",
"token_count": 345
} | 328 |
// Although not strictly necessary, we delegate the tokenization to a worker thread to avoid
// any potential issues with the tokenizer blocking the main thread (especially for large inputs).
import { env, AutoTokenizer } from '@xenova/transformers'
env.allowLocalModels = false;
// This is a map of all the tokenizer instances that we have loaded.
// model_id -> promise that resolves to tokenizer
const TOKENIZER_MAPPINGS = new Map();
// Listen for messages from the main thread
self.addEventListener('message', async (event) => {
let tokenizerPromise = TOKENIZER_MAPPINGS.get(event.data.model_id);
// Load the tokenizer if it hasn't been loaded yet
if (!tokenizerPromise) {
tokenizerPromise = AutoTokenizer.from_pretrained(event.data.model_id);
TOKENIZER_MAPPINGS.set(event.data.model_id, new Promise((resolve) => {
// Just for visualization purposes, we may need to modify the tokenizer slightly
tokenizerPromise.then((tokenizer) => {
// NOTE: We just remove the StripDecoder from the llama tokenizer
switch (tokenizer.constructor.name) {
case 'LlamaTokenizer':
case 'Grok1Tokenizer':
// tokenizer.decoder.decoders.at(-1).constructor.name === 'StripDecoder'
tokenizer.decoder.decoders.pop();
break;
case 'T5Tokenizer':
tokenizer.decoder.addPrefixSpace = false;
break;
}
resolve(tokenizer);
});
}));
}
const tokenizer = await tokenizerPromise;
const text = event.data.text;
const start = performance.now();
const token_ids = tokenizer.encode(text);
const end = performance.now();
console.log('[INFO]', `Tokenized ${text.length} characters in ${(end - start).toFixed(2)}ms`)
let decoded = token_ids.map(x => tokenizer.decode([x]));
let margins = [];
// Minor post-processing for visualization purposes
switch (tokenizer.constructor.name) {
case 'BertTokenizer':
margins = decoded.map((x, i) => i === 0 || x.startsWith('##') ? 0 : 8);
decoded = decoded.map(x => x.replace('##', ''));
break;
case 'T5Tokenizer':
if (decoded.length > 0 && decoded.length !== ' ') {
decoded[0] = decoded[0].replace(/^ /, '');
}
break;
}
// Send the output back to the main thread
self.postMessage({
token_ids, decoded, margins
});
}); | transformers.js/examples/tokenizer-playground/src/worker.js/0 | {
"file_path": "transformers.js/examples/tokenizer-playground/src/worker.js",
"repo_id": "transformers.js",
"token_count": 1112
} | 329 |
import './style.css';
import { env, AutoModel, AutoProcessor, RawImage } from '@xenova/transformers';
env.backends.onnx.wasm.wasmPaths = 'https://cdn.jsdelivr.net/npm/onnxruntime-web@1.17.1/dist/';
env.backends.onnx.wasm.numThreads = 1;
// Reference the elements that we will need
const status = document.getElementById('status');
const container = document.getElementById('container');
const canvas = document.getElementById('canvas');
const outputCanvas = document.getElementById('output-canvas');
const video = document.getElementById('video');
const sizeSlider = document.getElementById('size');
const sizeLabel = document.getElementById('size-value');
const scaleSlider = document.getElementById('scale');
const scaleLabel = document.getElementById('scale-value');
function setStreamSize(width, height) {
video.width = outputCanvas.width = canvas.width = Math.round(width);
video.height = outputCanvas.height = canvas.height = Math.round(height);
}
status.textContent = 'Loading model...';
// Load model and processor
const model_id = 'Xenova/modnet';
let model;
try {
model = await AutoModel.from_pretrained(model_id, {
device: 'webgpu',
dtype: 'fp32', // TODO: add fp16 support
});
} catch (err) {
status.textContent = err.message;
alert(err.message)
throw err;
}
const processor = await AutoProcessor.from_pretrained(model_id);
// Set up controls
let size = 256;
processor.feature_extractor.size = { shortest_edge: size };
sizeSlider.addEventListener('input', () => {
size = Number(sizeSlider.value);
processor.feature_extractor.size = { shortest_edge: size };
sizeLabel.textContent = size;
});
sizeSlider.disabled = false;
let scale = 0.5;
scaleSlider.addEventListener('input', () => {
scale = Number(scaleSlider.value);
setStreamSize(video.videoWidth * scale, video.videoHeight * scale);
scaleLabel.textContent = scale;
});
scaleSlider.disabled = false;
status.textContent = 'Ready';
let isProcessing = false;
let previousTime;
const context = canvas.getContext('2d', { willReadFrequently: true });
const outputContext = outputCanvas.getContext('2d', { willReadFrequently: true });
function updateCanvas() {
const { width, height } = canvas;
if (!isProcessing) {
isProcessing = true;
(async function () {
// Read the current frame from the video
context.drawImage(video, 0, 0, width, height);
const currentFrame = context.getImageData(0, 0, width, height);
const image = new RawImage(currentFrame.data, width, height, 4);
// Pre-process image
const inputs = await processor(image);
// Predict alpha matte
const { output } = await model({ input: inputs.pixel_values });
const mask = await RawImage.fromTensor(output[0].mul(255).to('uint8')).resize(width, height);
// Update alpha channel
const outPixelData = currentFrame;
for (let i = 0; i < mask.data.length; ++i) {
outPixelData.data[4 * i + 3] = mask.data[i];
}
outputContext.putImageData(outPixelData, 0, 0);
if (previousTime !== undefined) {
const fps = 1000 / (performance.now() - previousTime);
status.textContent = `FPS: ${fps.toFixed(2)}`;
}
previousTime = performance.now();
isProcessing = false;
})();
}
window.requestAnimationFrame(updateCanvas);
}
// Start the video stream
navigator.mediaDevices.getUserMedia(
{ video: true }, // Ask for video
).then((stream) => {
// Set up the video and canvas elements.
video.srcObject = stream;
video.play();
const videoTrack = stream.getVideoTracks()[0];
const { width, height } = videoTrack.getSettings();
setStreamSize(width * scale, height * scale);
// Set container width and height depending on the image aspect ratio
const ar = width / height;
const [cw, ch] = (ar > 720 / 405) ? [720, 720 / ar] : [405 * ar, 405];
container.style.width = `${cw}px`;
container.style.height = `${ch}px`;
// Start the animation loop
setTimeout(updateCanvas, 50);
}).catch((error) => {
alert(error);
});
| transformers.js/examples/webgpu-video-background-removal/main.js/0 | {
"file_path": "transformers.js/examples/webgpu-video-background-removal/main.js",
"repo_id": "transformers.js",
"token_count": 1573
} | 330 |
{
"name": "@huggingface/transformers",
"version": "3.7.2",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"name": "@huggingface/transformers",
"version": "3.7.2",
"license": "Apache-2.0",
"dependencies": {
"@huggingface/jinja": "^0.5.1",
"onnxruntime-node": "1.21.0",
"onnxruntime-web": "1.22.0-dev.20250409-89f8206ba4",
"sharp": "^0.34.1"
},
"devDependencies": {
"@types/jest": "^29.5.14",
"@types/node": "^22.10.1",
"@webgpu/types": "^0.1.51",
"catharsis": "github:xenova/catharsis",
"jest": "^30.0.0-alpha.6",
"jest-environment-node": "^30.0.0-alpha.6",
"jsdoc-to-markdown": "^9.1.1",
"prettier": "3.4.2",
"typescript": "^5.8.2",
"wavefile": "11.0.0",
"webpack": "^5.97.1",
"webpack-cli": "^5.1.4",
"webpack-dev-server": "^5.1.0"
}
},
"node_modules/@ampproject/remapping": {
"version": "2.3.0",
"resolved": "https://registry.npmjs.org/@ampproject/remapping/-/remapping-2.3.0.tgz",
"integrity": "sha512-30iZtAPgz+LTIYoeivqYo853f02jBYSd5uGnGpkFV0M3xOt9aN73erkgYAmZU43x4VfqcnLxW9Kpg3R5LC4YYw==",
"dev": true,
"license": "Apache-2.0",
"dependencies": {
"@jridgewell/gen-mapping": "^0.3.5",
"@jridgewell/trace-mapping": "^0.3.24"
},
"engines": {
"node": ">=6.0.0"
}
},
"node_modules/@babel/code-frame": {
"version": "7.26.2",
"resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.26.2.tgz",
"integrity": "sha512-RJlIHRueQgwWitWgF8OdFYGZX328Ax5BCemNGlqHfplnRT9ESi8JkFlvaVYbS+UubVY6dpv87Fs2u5M29iNFVQ==",
"dev": true,
"license": "MIT",
"dependencies": {
"@babel/helper-validator-identifier": "^7.25.9",
"js-tokens": "^4.0.0",
"picocolors": "^1.0.0"
},
"engines": {
"node": ">=6.9.0"
}
},
"node_modules/@babel/compat-data": {
"version": "7.26.2",
"resolved": "https://registry.npmjs.org/@babel/compat-data/-/compat-data-7.26.2.tgz",
"integrity": "sha512-Z0WgzSEa+aUcdiJuCIqgujCshpMWgUpgOxXotrYPSA53hA3qopNaqcJpyr0hVb1FeWdnqFA35/fUtXgBK8srQg==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">=6.9.0"
}
},
"node_modules/@babel/core": {
"version": "7.26.0",
"resolved": "https://registry.npmjs.org/@babel/core/-/core-7.26.0.tgz",
"integrity": "sha512-i1SLeK+DzNnQ3LL/CswPCa/E5u4lh1k6IAEphON8F+cXt0t9euTshDru0q7/IqMa1PMPz5RnHuHscF8/ZJsStg==",
"dev": true,
"license": "MIT",
"dependencies": {
"@ampproject/remapping": "^2.2.0",
"@babel/code-frame": "^7.26.0",
"@babel/generator": "^7.26.0",
"@babel/helper-compilation-targets": "^7.25.9",
"@babel/helper-module-transforms": "^7.26.0",
"@babel/helpers": "^7.26.0",
"@babel/parser": "^7.26.0",
"@babel/template": "^7.25.9",
"@babel/traverse": "^7.25.9",
"@babel/types": "^7.26.0",
"convert-source-map": "^2.0.0",
"debug": "^4.1.0",
"gensync": "^1.0.0-beta.2",
"json5": "^2.2.3",
"semver": "^6.3.1"
},
"engines": {
"node": ">=6.9.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/babel"
}
},
"node_modules/@babel/core/node_modules/debug": {
"version": "4.3.7",
"resolved": "https://registry.npmjs.org/debug/-/debug-4.3.7.tgz",
"integrity": "sha512-Er2nc/H7RrMXZBFCEim6TCmMk02Z8vLC2Rbi1KEBggpo0fS6l0S1nnapwmIi3yW/+GOJap1Krg4w0Hg80oCqgQ==",
"dev": true,
"license": "MIT",
"dependencies": {
"ms": "^2.1.3"
},
"engines": {
"node": ">=6.0"
},
"peerDependenciesMeta": {
"supports-color": {
"optional": true
}
}
},
"node_modules/@babel/core/node_modules/ms": {
"version": "2.1.3",
"resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz",
"integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==",
"dev": true,
"license": "MIT"
},
"node_modules/@babel/core/node_modules/semver": {
"version": "6.3.1",
"resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz",
"integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==",
"dev": true,
"license": "ISC",
"bin": {
"semver": "bin/semver.js"
}
},
"node_modules/@babel/generator": {
"version": "7.26.2",
"resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.26.2.tgz",
"integrity": "sha512-zevQbhbau95nkoxSq3f/DC/SC+EEOUZd3DYqfSkMhY2/wfSeaHV1Ew4vk8e+x8lja31IbyuUa2uQ3JONqKbysw==",
"dev": true,
"license": "MIT",
"dependencies": {
"@babel/parser": "^7.26.2",
"@babel/types": "^7.26.0",
"@jridgewell/gen-mapping": "^0.3.5",
"@jridgewell/trace-mapping": "^0.3.25",
"jsesc": "^3.0.2"
},
"engines": {
"node": ">=6.9.0"
}
},
"node_modules/@babel/helper-compilation-targets": {
"version": "7.25.9",
"resolved": "https://registry.npmjs.org/@babel/helper-compilation-targets/-/helper-compilation-targets-7.25.9.tgz",
"integrity": "sha512-j9Db8Suy6yV/VHa4qzrj9yZfZxhLWQdVnRlXxmKLYlhWUVB1sB2G5sxuWYXk/whHD9iW76PmNzxZ4UCnTQTVEQ==",
"dev": true,
"license": "MIT",
"dependencies": {
"@babel/compat-data": "^7.25.9",
"@babel/helper-validator-option": "^7.25.9",
"browserslist": "^4.24.0",
"lru-cache": "^5.1.1",
"semver": "^6.3.1"
},
"engines": {
"node": ">=6.9.0"
}
},
"node_modules/@babel/helper-compilation-targets/node_modules/semver": {
"version": "6.3.1",
"resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz",
"integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==",
"dev": true,
"license": "ISC",
"bin": {
"semver": "bin/semver.js"
}
},
"node_modules/@babel/helper-module-imports": {
"version": "7.25.9",
"resolved": "https://registry.npmjs.org/@babel/helper-module-imports/-/helper-module-imports-7.25.9.tgz",
"integrity": "sha512-tnUA4RsrmflIM6W6RFTLFSXITtl0wKjgpnLgXyowocVPrbYrLUXSBXDgTs8BlbmIzIdlBySRQjINYs2BAkiLtw==",
"dev": true,
"license": "MIT",
"dependencies": {
"@babel/traverse": "^7.25.9",
"@babel/types": "^7.25.9"
},
"engines": {
"node": ">=6.9.0"
}
},
"node_modules/@babel/helper-module-transforms": {
"version": "7.26.0",
"resolved": "https://registry.npmjs.org/@babel/helper-module-transforms/-/helper-module-transforms-7.26.0.tgz",
"integrity": "sha512-xO+xu6B5K2czEnQye6BHA7DolFFmS3LB7stHZFaOLb1pAwO1HWLS8fXA+eh0A2yIvltPVmx3eNNDBJA2SLHXFw==",
"dev": true,
"license": "MIT",
"dependencies": {
"@babel/helper-module-imports": "^7.25.9",
"@babel/helper-validator-identifier": "^7.25.9",
"@babel/traverse": "^7.25.9"
},
"engines": {
"node": ">=6.9.0"
},
"peerDependencies": {
"@babel/core": "^7.0.0"
}
},
"node_modules/@babel/helper-plugin-utils": {
"version": "7.25.9",
"resolved": "https://registry.npmjs.org/@babel/helper-plugin-utils/-/helper-plugin-utils-7.25.9.tgz",
"integrity": "sha512-kSMlyUVdWe25rEsRGviIgOWnoT/nfABVWlqt9N19/dIPWViAOW2s9wznP5tURbs/IDuNk4gPy3YdYRgH3uxhBw==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">=6.9.0"
}
},
"node_modules/@babel/helper-string-parser": {
"version": "7.25.9",
"resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.25.9.tgz",
"integrity": "sha512-4A/SCr/2KLd5jrtOMFzaKjVtAei3+2r/NChoBNoZ3EyP/+GlhoaEGoWOZUmFmoITP7zOJyHIMm+DYRd8o3PvHA==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">=6.9.0"
}
},
"node_modules/@babel/helper-validator-identifier": {
"version": "7.25.9",
"resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.25.9.tgz",
"integrity": "sha512-Ed61U6XJc3CVRfkERJWDz4dJwKe7iLmmJsbOGu9wSloNSFttHV0I8g6UAgb7qnK5ly5bGLPd4oXZlxCdANBOWQ==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">=6.9.0"
}
},
"node_modules/@babel/helper-validator-option": {
"version": "7.25.9",
"resolved": "https://registry.npmjs.org/@babel/helper-validator-option/-/helper-validator-option-7.25.9.tgz",
"integrity": "sha512-e/zv1co8pp55dNdEcCynfj9X7nyUKUXoUEwfXqaZt0omVOmDe9oOTdKStH4GmAw6zxMFs50ZayuMfHDKlO7Tfw==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">=6.9.0"
}
},
"node_modules/@babel/helpers": {
"version": "7.27.0",
"resolved": "https://registry.npmjs.org/@babel/helpers/-/helpers-7.27.0.tgz",
"integrity": "sha512-U5eyP/CTFPuNE3qk+WZMxFkp/4zUzdceQlfzf7DdGdhp+Fezd7HD+i8Y24ZuTMKX3wQBld449jijbGq6OdGNQg==",
"dev": true,
"license": "MIT",
"dependencies": {
"@babel/template": "^7.27.0",
"@babel/types": "^7.27.0"
},
"engines": {
"node": ">=6.9.0"
}
},
"node_modules/@babel/parser": {
"version": "7.27.0",
"resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.27.0.tgz",
"integrity": "sha512-iaepho73/2Pz7w2eMS0Q5f83+0RKI7i4xmiYeBmDzfRVbQtTOG7Ts0S4HzJVsTMGI9keU8rNfuZr8DKfSt7Yyg==",
"dev": true,
"license": "MIT",
"dependencies": {
"@babel/types": "^7.27.0"
},
"bin": {
"parser": "bin/babel-parser.js"
},
"engines": {
"node": ">=6.0.0"
}
},
"node_modules/@babel/plugin-syntax-async-generators": {
"version": "7.8.4",
"resolved": "https://registry.npmjs.org/@babel/plugin-syntax-async-generators/-/plugin-syntax-async-generators-7.8.4.tgz",
"integrity": "sha512-tycmZxkGfZaxhMRbXlPXuVFpdWlXpir2W4AMhSJgRKzk/eDlIXOhb2LHWoLpDF7TEHylV5zNhykX6KAgHJmTNw==",
"dev": true,
"license": "MIT",
"dependencies": {
"@babel/helper-plugin-utils": "^7.8.0"
},
"peerDependencies": {
"@babel/core": "^7.0.0-0"
}
},
"node_modules/@babel/plugin-syntax-bigint": {
"version": "7.8.3",
"resolved": "https://registry.npmjs.org/@babel/plugin-syntax-bigint/-/plugin-syntax-bigint-7.8.3.tgz",
"integrity": "sha512-wnTnFlG+YxQm3vDxpGE57Pj0srRU4sHE/mDkt1qv2YJJSeUAec2ma4WLUnUPeKjyrfntVwe/N6dCXpU+zL3Npg==",
"dev": true,
"license": "MIT",
"dependencies": {
"@babel/helper-plugin-utils": "^7.8.0"
},
"peerDependencies": {
"@babel/core": "^7.0.0-0"
}
},
"node_modules/@babel/plugin-syntax-class-properties": {
"version": "7.12.13",
"resolved": "https://registry.npmjs.org/@babel/plugin-syntax-class-properties/-/plugin-syntax-class-properties-7.12.13.tgz",
"integrity": "sha512-fm4idjKla0YahUNgFNLCB0qySdsoPiZP3iQE3rky0mBUtMZ23yDJ9SJdg6dXTSDnulOVqiF3Hgr9nbXvXTQZYA==",
"dev": true,
"license": "MIT",
"dependencies": {
"@babel/helper-plugin-utils": "^7.12.13"
},
"peerDependencies": {
"@babel/core": "^7.0.0-0"
}
},
"node_modules/@babel/plugin-syntax-class-static-block": {
"version": "7.14.5",
"resolved": "https://registry.npmjs.org/@babel/plugin-syntax-class-static-block/-/plugin-syntax-class-static-block-7.14.5.tgz",
"integrity": "sha512-b+YyPmr6ldyNnM6sqYeMWE+bgJcJpO6yS4QD7ymxgH34GBPNDM/THBh8iunyvKIZztiwLH4CJZ0RxTk9emgpjw==",
"dev": true,
"license": "MIT",
"dependencies": {
"@babel/helper-plugin-utils": "^7.14.5"
},
"engines": {
"node": ">=6.9.0"
},
"peerDependencies": {
"@babel/core": "^7.0.0-0"
}
},
"node_modules/@babel/plugin-syntax-import-attributes": {
"version": "7.26.0",
"resolved": "https://registry.npmjs.org/@babel/plugin-syntax-import-attributes/-/plugin-syntax-import-attributes-7.26.0.tgz",
"integrity": "sha512-e2dttdsJ1ZTpi3B9UYGLw41hifAubg19AtCu/2I/F1QNVclOBr1dYpTdmdyZ84Xiz43BS/tCUkMAZNLv12Pi+A==",
"dev": true,
"license": "MIT",
"dependencies": {
"@babel/helper-plugin-utils": "^7.25.9"
},
"engines": {
"node": ">=6.9.0"
},
"peerDependencies": {
"@babel/core": "^7.0.0-0"
}
},
"node_modules/@babel/plugin-syntax-import-meta": {
"version": "7.10.4",
"resolved": "https://registry.npmjs.org/@babel/plugin-syntax-import-meta/-/plugin-syntax-import-meta-7.10.4.tgz",
"integrity": "sha512-Yqfm+XDx0+Prh3VSeEQCPU81yC+JWZ2pDPFSS4ZdpfZhp4MkFMaDC1UqseovEKwSUpnIL7+vK+Clp7bfh0iD7g==",
"dev": true,
"license": "MIT",
"dependencies": {
"@babel/helper-plugin-utils": "^7.10.4"
},
"peerDependencies": {
"@babel/core": "^7.0.0-0"
}
},
"node_modules/@babel/plugin-syntax-json-strings": {
"version": "7.8.3",
"resolved": "https://registry.npmjs.org/@babel/plugin-syntax-json-strings/-/plugin-syntax-json-strings-7.8.3.tgz",
"integrity": "sha512-lY6kdGpWHvjoe2vk4WrAapEuBR69EMxZl+RoGRhrFGNYVK8mOPAW8VfbT/ZgrFbXlDNiiaxQnAtgVCZ6jv30EA==",
"dev": true,
"license": "MIT",
"dependencies": {
"@babel/helper-plugin-utils": "^7.8.0"
},
"peerDependencies": {
"@babel/core": "^7.0.0-0"
}
},
"node_modules/@babel/plugin-syntax-jsx": {
"version": "7.25.9",
"resolved": "https://registry.npmjs.org/@babel/plugin-syntax-jsx/-/plugin-syntax-jsx-7.25.9.tgz",
"integrity": "sha512-ld6oezHQMZsZfp6pWtbjaNDF2tiiCYYDqQszHt5VV437lewP9aSi2Of99CK0D0XB21k7FLgnLcmQKyKzynfeAA==",
"dev": true,
"license": "MIT",
"dependencies": {
"@babel/helper-plugin-utils": "^7.25.9"
},
"engines": {
"node": ">=6.9.0"
},
"peerDependencies": {
"@babel/core": "^7.0.0-0"
}
},
"node_modules/@babel/plugin-syntax-logical-assignment-operators": {
"version": "7.10.4",
"resolved": "https://registry.npmjs.org/@babel/plugin-syntax-logical-assignment-operators/-/plugin-syntax-logical-assignment-operators-7.10.4.tgz",
"integrity": "sha512-d8waShlpFDinQ5MtvGU9xDAOzKH47+FFoney2baFIoMr952hKOLp1HR7VszoZvOsV/4+RRszNY7D17ba0te0ig==",
"dev": true,
"license": "MIT",
"dependencies": {
"@babel/helper-plugin-utils": "^7.10.4"
},
"peerDependencies": {
"@babel/core": "^7.0.0-0"
}
},
"node_modules/@babel/plugin-syntax-nullish-coalescing-operator": {
"version": "7.8.3",
"resolved": "https://registry.npmjs.org/@babel/plugin-syntax-nullish-coalescing-operator/-/plugin-syntax-nullish-coalescing-operator-7.8.3.tgz",
"integrity": "sha512-aSff4zPII1u2QD7y+F8oDsz19ew4IGEJg9SVW+bqwpwtfFleiQDMdzA/R+UlWDzfnHFCxxleFT0PMIrR36XLNQ==",
"dev": true,
"license": "MIT",
"dependencies": {
"@babel/helper-plugin-utils": "^7.8.0"
},
"peerDependencies": {
"@babel/core": "^7.0.0-0"
}
},
"node_modules/@babel/plugin-syntax-numeric-separator": {
"version": "7.10.4",
"resolved": "https://registry.npmjs.org/@babel/plugin-syntax-numeric-separator/-/plugin-syntax-numeric-separator-7.10.4.tgz",
"integrity": "sha512-9H6YdfkcK/uOnY/K7/aA2xpzaAgkQn37yzWUMRK7OaPOqOpGS1+n0H5hxT9AUw9EsSjPW8SVyMJwYRtWs3X3ug==",
"dev": true,
"license": "MIT",
"dependencies": {
"@babel/helper-plugin-utils": "^7.10.4"
},
"peerDependencies": {
"@babel/core": "^7.0.0-0"
}
},
"node_modules/@babel/plugin-syntax-object-rest-spread": {
"version": "7.8.3",
"resolved": "https://registry.npmjs.org/@babel/plugin-syntax-object-rest-spread/-/plugin-syntax-object-rest-spread-7.8.3.tgz",
"integrity": "sha512-XoqMijGZb9y3y2XskN+P1wUGiVwWZ5JmoDRwx5+3GmEplNyVM2s2Dg8ILFQm8rWM48orGy5YpI5Bl8U1y7ydlA==",
"dev": true,
"license": "MIT",
"dependencies": {
"@babel/helper-plugin-utils": "^7.8.0"
},
"peerDependencies": {
"@babel/core": "^7.0.0-0"
}
},
"node_modules/@babel/plugin-syntax-optional-catch-binding": {
"version": "7.8.3",
"resolved": "https://registry.npmjs.org/@babel/plugin-syntax-optional-catch-binding/-/plugin-syntax-optional-catch-binding-7.8.3.tgz",
"integrity": "sha512-6VPD0Pc1lpTqw0aKoeRTMiB+kWhAoT24PA+ksWSBrFtl5SIRVpZlwN3NNPQjehA2E/91FV3RjLWoVTglWcSV3Q==",
"dev": true,
"license": "MIT",
"dependencies": {
"@babel/helper-plugin-utils": "^7.8.0"
},
"peerDependencies": {
"@babel/core": "^7.0.0-0"
}
},
"node_modules/@babel/plugin-syntax-optional-chaining": {
"version": "7.8.3",
"resolved": "https://registry.npmjs.org/@babel/plugin-syntax-optional-chaining/-/plugin-syntax-optional-chaining-7.8.3.tgz",
"integrity": "sha512-KoK9ErH1MBlCPxV0VANkXW2/dw4vlbGDrFgz8bmUsBGYkFRcbRwMh6cIJubdPrkxRwuGdtCk0v/wPTKbQgBjkg==",
"dev": true,
"license": "MIT",
"dependencies": {
"@babel/helper-plugin-utils": "^7.8.0"
},
"peerDependencies": {
"@babel/core": "^7.0.0-0"
}
},
"node_modules/@babel/plugin-syntax-private-property-in-object": {
"version": "7.14.5",
"resolved": "https://registry.npmjs.org/@babel/plugin-syntax-private-property-in-object/-/plugin-syntax-private-property-in-object-7.14.5.tgz",
"integrity": "sha512-0wVnp9dxJ72ZUJDV27ZfbSj6iHLoytYZmh3rFcxNnvsJF3ktkzLDZPy/mA17HGsaQT3/DQsWYX1f1QGWkCoVUg==",
"dev": true,
"license": "MIT",
"dependencies": {
"@babel/helper-plugin-utils": "^7.14.5"
},
"engines": {
"node": ">=6.9.0"
},
"peerDependencies": {
"@babel/core": "^7.0.0-0"
}
},
"node_modules/@babel/plugin-syntax-top-level-await": {
"version": "7.14.5",
"resolved": "https://registry.npmjs.org/@babel/plugin-syntax-top-level-await/-/plugin-syntax-top-level-await-7.14.5.tgz",
"integrity": "sha512-hx++upLv5U1rgYfwe1xBQUhRmU41NEvpUvrp8jkrSCdvGSnM5/qdRMtylJ6PG5OFkBaHkbTAKTnd3/YyESRHFw==",
"dev": true,
"license": "MIT",
"dependencies": {
"@babel/helper-plugin-utils": "^7.14.5"
},
"engines": {
"node": ">=6.9.0"
},
"peerDependencies": {
"@babel/core": "^7.0.0-0"
}
},
"node_modules/@babel/plugin-syntax-typescript": {
"version": "7.25.9",
"resolved": "https://registry.npmjs.org/@babel/plugin-syntax-typescript/-/plugin-syntax-typescript-7.25.9.tgz",
"integrity": "sha512-hjMgRy5hb8uJJjUcdWunWVcoi9bGpJp8p5Ol1229PoN6aytsLwNMgmdftO23wnCLMfVmTwZDWMPNq/D1SY60JQ==",
"dev": true,
"license": "MIT",
"dependencies": {
"@babel/helper-plugin-utils": "^7.25.9"
},
"engines": {
"node": ">=6.9.0"
},
"peerDependencies": {
"@babel/core": "^7.0.0-0"
}
},
"node_modules/@babel/template": {
"version": "7.27.0",
"resolved": "https://registry.npmjs.org/@babel/template/-/template-7.27.0.tgz",
"integrity": "sha512-2ncevenBqXI6qRMukPlXwHKHchC7RyMuu4xv5JBXRfOGVcTy1mXCD12qrp7Jsoxll1EV3+9sE4GugBVRjT2jFA==",
"dev": true,
"license": "MIT",
"dependencies": {
"@babel/code-frame": "^7.26.2",
"@babel/parser": "^7.27.0",
"@babel/types": "^7.27.0"
},
"engines": {
"node": ">=6.9.0"
}
},
"node_modules/@babel/traverse": {
"version": "7.25.9",
"resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.25.9.tgz",
"integrity": "sha512-ZCuvfwOwlz/bawvAuvcj8rrithP2/N55Tzz342AkTvq4qaWbGfmCk/tKhNaV2cthijKrPAA8SRJV5WWe7IBMJw==",
"dev": true,
"license": "MIT",
"dependencies": {
"@babel/code-frame": "^7.25.9",
"@babel/generator": "^7.25.9",
"@babel/parser": "^7.25.9",
"@babel/template": "^7.25.9",
"@babel/types": "^7.25.9",
"debug": "^4.3.1",
"globals": "^11.1.0"
},
"engines": {
"node": ">=6.9.0"
}
},
"node_modules/@babel/traverse/node_modules/debug": {
"version": "4.3.7",
"resolved": "https://registry.npmjs.org/debug/-/debug-4.3.7.tgz",
"integrity": "sha512-Er2nc/H7RrMXZBFCEim6TCmMk02Z8vLC2Rbi1KEBggpo0fS6l0S1nnapwmIi3yW/+GOJap1Krg4w0Hg80oCqgQ==",
"dev": true,
"license": "MIT",
"dependencies": {
"ms": "^2.1.3"
},
"engines": {
"node": ">=6.0"
},
"peerDependenciesMeta": {
"supports-color": {
"optional": true
}
}
},
"node_modules/@babel/traverse/node_modules/ms": {
"version": "2.1.3",
"resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz",
"integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==",
"dev": true,
"license": "MIT"
},
"node_modules/@babel/types": {
"version": "7.27.0",
"resolved": "https://registry.npmjs.org/@babel/types/-/types-7.27.0.tgz",
"integrity": "sha512-H45s8fVLYjbhFH62dIJ3WtmJ6RSPt/3DRO0ZcT2SUiYiQyz3BLVb9ADEnLl91m74aQPS3AzzeajZHYOalWe3bg==",
"dev": true,
"license": "MIT",
"dependencies": {
"@babel/helper-string-parser": "^7.25.9",
"@babel/helper-validator-identifier": "^7.25.9"
},
"engines": {
"node": ">=6.9.0"
}
},
"node_modules/@bcoe/v8-coverage": {
"version": "0.2.3",
"resolved": "https://registry.npmjs.org/@bcoe/v8-coverage/-/v8-coverage-0.2.3.tgz",
"integrity": "sha512-0hYQ8SB4Db5zvZB4axdMHGwEaQjkZzFjQiN9LVYvIFB2nSUHW9tYpxWriPrWDASIxiaXax83REcLxuSdnGPZtw==",
"dev": true,
"license": "MIT"
},
"node_modules/@discoveryjs/json-ext": {
"version": "0.5.7",
"resolved": "https://registry.npmjs.org/@discoveryjs/json-ext/-/json-ext-0.5.7.tgz",
"integrity": "sha512-dBVuXR082gk3jsFp7Rd/JI4kytwGHecnCoTtXFb7DB6CNHp4rg5k1bhg0nWdLGLnOV71lmDzGQaLMy8iPLY0pw==",
"dev": true,
"engines": {
"node": ">=10.0.0"
}
},
"node_modules/@emnapi/runtime": {
"version": "1.4.0",
"resolved": "https://registry.npmjs.org/@emnapi/runtime/-/runtime-1.4.0.tgz",
"integrity": "sha512-64WYIf4UYcdLnbKn/umDlNjQDSS8AgZrI/R9+x5ilkUVFxXcA1Ebl+gQLc/6mERA4407Xof0R7wEyEuj091CVw==",
"license": "MIT",
"optional": true,
"dependencies": {
"tslib": "^2.4.0"
}
},
"node_modules/@huggingface/jinja": {
"version": "0.5.1",
"resolved": "https://registry.npmjs.org/@huggingface/jinja/-/jinja-0.5.1.tgz",
"integrity": "sha512-yUZLld4lrM9iFxHCwFQ7D1HW2MWMwSbeB7WzWqFYDWK+rEb+WldkLdAJxUPOmgICMHZLzZGVcVjFh3w/YGubng==",
"license": "MIT",
"engines": {
"node": ">=18"
}
},
"node_modules/@img/sharp-darwin-arm64": {
"version": "0.34.1",
"resolved": "https://registry.npmjs.org/@img/sharp-darwin-arm64/-/sharp-darwin-arm64-0.34.1.tgz",
"integrity": "sha512-pn44xgBtgpEbZsu+lWf2KNb6OAf70X68k+yk69Ic2Xz11zHR/w24/U49XT7AeRwJ0Px+mhALhU5LPci1Aymk7A==",
"cpu": [
"arm64"
],
"license": "Apache-2.0",
"optional": true,
"os": [
"darwin"
],
"engines": {
"node": "^18.17.0 || ^20.3.0 || >=21.0.0"
},
"funding": {
"url": "https://opencollective.com/libvips"
},
"optionalDependencies": {
"@img/sharp-libvips-darwin-arm64": "1.1.0"
}
},
"node_modules/@img/sharp-darwin-x64": {
"version": "0.34.1",
"resolved": "https://registry.npmjs.org/@img/sharp-darwin-x64/-/sharp-darwin-x64-0.34.1.tgz",
"integrity": "sha512-VfuYgG2r8BpYiOUN+BfYeFo69nP/MIwAtSJ7/Zpxc5QF3KS22z8Pvg3FkrSFJBPNQ7mmcUcYQFBmEQp7eu1F8Q==",
"cpu": [
"x64"
],
"license": "Apache-2.0",
"optional": true,
"os": [
"darwin"
],
"engines": {
"node": "^18.17.0 || ^20.3.0 || >=21.0.0"
},
"funding": {
"url": "https://opencollective.com/libvips"
},
"optionalDependencies": {
"@img/sharp-libvips-darwin-x64": "1.1.0"
}
},
"node_modules/@img/sharp-libvips-darwin-arm64": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/@img/sharp-libvips-darwin-arm64/-/sharp-libvips-darwin-arm64-1.1.0.tgz",
"integrity": "sha512-HZ/JUmPwrJSoM4DIQPv/BfNh9yrOA8tlBbqbLz4JZ5uew2+o22Ik+tHQJcih7QJuSa0zo5coHTfD5J8inqj9DA==",
"cpu": [
"arm64"
],
"license": "LGPL-3.0-or-later",
"optional": true,
"os": [
"darwin"
],
"funding": {
"url": "https://opencollective.com/libvips"
}
},
"node_modules/@img/sharp-libvips-darwin-x64": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/@img/sharp-libvips-darwin-x64/-/sharp-libvips-darwin-x64-1.1.0.tgz",
"integrity": "sha512-Xzc2ToEmHN+hfvsl9wja0RlnXEgpKNmftriQp6XzY/RaSfwD9th+MSh0WQKzUreLKKINb3afirxW7A0fz2YWuQ==",
"cpu": [
"x64"
],
"license": "LGPL-3.0-or-later",
"optional": true,
"os": [
"darwin"
],
"funding": {
"url": "https://opencollective.com/libvips"
}
},
"node_modules/@img/sharp-libvips-linux-arm": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/@img/sharp-libvips-linux-arm/-/sharp-libvips-linux-arm-1.1.0.tgz",
"integrity": "sha512-s8BAd0lwUIvYCJyRdFqvsj+BJIpDBSxs6ivrOPm/R7piTs5UIwY5OjXrP2bqXC9/moGsyRa37eYWYCOGVXxVrA==",
"cpu": [
"arm"
],
"license": "LGPL-3.0-or-later",
"optional": true,
"os": [
"linux"
],
"funding": {
"url": "https://opencollective.com/libvips"
}
},
"node_modules/@img/sharp-libvips-linux-arm64": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/@img/sharp-libvips-linux-arm64/-/sharp-libvips-linux-arm64-1.1.0.tgz",
"integrity": "sha512-IVfGJa7gjChDET1dK9SekxFFdflarnUB8PwW8aGwEoF3oAsSDuNUTYS+SKDOyOJxQyDC1aPFMuRYLoDInyV9Ew==",
"cpu": [
"arm64"
],
"license": "LGPL-3.0-or-later",
"optional": true,
"os": [
"linux"
],
"funding": {
"url": "https://opencollective.com/libvips"
}
},
"node_modules/@img/sharp-libvips-linux-ppc64": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/@img/sharp-libvips-linux-ppc64/-/sharp-libvips-linux-ppc64-1.1.0.tgz",
"integrity": "sha512-tiXxFZFbhnkWE2LA8oQj7KYR+bWBkiV2nilRldT7bqoEZ4HiDOcePr9wVDAZPi/Id5fT1oY9iGnDq20cwUz8lQ==",
"cpu": [
"ppc64"
],
"license": "LGPL-3.0-or-later",
"optional": true,
"os": [
"linux"
],
"funding": {
"url": "https://opencollective.com/libvips"
}
},
"node_modules/@img/sharp-libvips-linux-s390x": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/@img/sharp-libvips-linux-s390x/-/sharp-libvips-linux-s390x-1.1.0.tgz",
"integrity": "sha512-xukSwvhguw7COyzvmjydRb3x/09+21HykyapcZchiCUkTThEQEOMtBj9UhkaBRLuBrgLFzQ2wbxdeCCJW/jgJA==",
"cpu": [
"s390x"
],
"license": "LGPL-3.0-or-later",
"optional": true,
"os": [
"linux"
],
"funding": {
"url": "https://opencollective.com/libvips"
}
},
"node_modules/@img/sharp-libvips-linux-x64": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/@img/sharp-libvips-linux-x64/-/sharp-libvips-linux-x64-1.1.0.tgz",
"integrity": "sha512-yRj2+reB8iMg9W5sULM3S74jVS7zqSzHG3Ol/twnAAkAhnGQnpjj6e4ayUz7V+FpKypwgs82xbRdYtchTTUB+Q==",
"cpu": [
"x64"
],
"license": "LGPL-3.0-or-later",
"optional": true,
"os": [
"linux"
],
"funding": {
"url": "https://opencollective.com/libvips"
}
},
"node_modules/@img/sharp-libvips-linuxmusl-arm64": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/@img/sharp-libvips-linuxmusl-arm64/-/sharp-libvips-linuxmusl-arm64-1.1.0.tgz",
"integrity": "sha512-jYZdG+whg0MDK+q2COKbYidaqW/WTz0cc1E+tMAusiDygrM4ypmSCjOJPmFTvHHJ8j/6cAGyeDWZOsK06tP33w==",
"cpu": [
"arm64"
],
"license": "LGPL-3.0-or-later",
"optional": true,
"os": [
"linux"
],
"funding": {
"url": "https://opencollective.com/libvips"
}
},
"node_modules/@img/sharp-libvips-linuxmusl-x64": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/@img/sharp-libvips-linuxmusl-x64/-/sharp-libvips-linuxmusl-x64-1.1.0.tgz",
"integrity": "sha512-wK7SBdwrAiycjXdkPnGCPLjYb9lD4l6Ze2gSdAGVZrEL05AOUJESWU2lhlC+Ffn5/G+VKuSm6zzbQSzFX/P65A==",
"cpu": [
"x64"
],
"license": "LGPL-3.0-or-later",
"optional": true,
"os": [
"linux"
],
"funding": {
"url": "https://opencollective.com/libvips"
}
},
"node_modules/@img/sharp-linux-arm": {
"version": "0.34.1",
"resolved": "https://registry.npmjs.org/@img/sharp-linux-arm/-/sharp-linux-arm-0.34.1.tgz",
"integrity": "sha512-anKiszvACti2sGy9CirTlNyk7BjjZPiML1jt2ZkTdcvpLU1YH6CXwRAZCA2UmRXnhiIftXQ7+Oh62Ji25W72jA==",
"cpu": [
"arm"
],
"license": "Apache-2.0",
"optional": true,
"os": [
"linux"
],
"engines": {
"node": "^18.17.0 || ^20.3.0 || >=21.0.0"
},
"funding": {
"url": "https://opencollective.com/libvips"
},
"optionalDependencies": {
"@img/sharp-libvips-linux-arm": "1.1.0"
}
},
"node_modules/@img/sharp-linux-arm64": {
"version": "0.34.1",
"resolved": "https://registry.npmjs.org/@img/sharp-linux-arm64/-/sharp-linux-arm64-0.34.1.tgz",
"integrity": "sha512-kX2c+vbvaXC6vly1RDf/IWNXxrlxLNpBVWkdpRq5Ka7OOKj6nr66etKy2IENf6FtOgklkg9ZdGpEu9kwdlcwOQ==",
"cpu": [
"arm64"
],
"license": "Apache-2.0",
"optional": true,
"os": [
"linux"
],
"engines": {
"node": "^18.17.0 || ^20.3.0 || >=21.0.0"
},
"funding": {
"url": "https://opencollective.com/libvips"
},
"optionalDependencies": {
"@img/sharp-libvips-linux-arm64": "1.1.0"
}
},
"node_modules/@img/sharp-linux-s390x": {
"version": "0.34.1",
"resolved": "https://registry.npmjs.org/@img/sharp-linux-s390x/-/sharp-linux-s390x-0.34.1.tgz",
"integrity": "sha512-7s0KX2tI9mZI2buRipKIw2X1ufdTeaRgwmRabt5bi9chYfhur+/C1OXg3TKg/eag1W+6CCWLVmSauV1owmRPxA==",
"cpu": [
"s390x"
],
"license": "Apache-2.0",
"optional": true,
"os": [
"linux"
],
"engines": {
"node": "^18.17.0 || ^20.3.0 || >=21.0.0"
},
"funding": {
"url": "https://opencollective.com/libvips"
},
"optionalDependencies": {
"@img/sharp-libvips-linux-s390x": "1.1.0"
}
},
"node_modules/@img/sharp-linux-x64": {
"version": "0.34.1",
"resolved": "https://registry.npmjs.org/@img/sharp-linux-x64/-/sharp-linux-x64-0.34.1.tgz",
"integrity": "sha512-wExv7SH9nmoBW3Wr2gvQopX1k8q2g5V5Iag8Zk6AVENsjwd+3adjwxtp3Dcu2QhOXr8W9NusBU6XcQUohBZ5MA==",
"cpu": [
"x64"
],
"license": "Apache-2.0",
"optional": true,
"os": [
"linux"
],
"engines": {
"node": "^18.17.0 || ^20.3.0 || >=21.0.0"
},
"funding": {
"url": "https://opencollective.com/libvips"
},
"optionalDependencies": {
"@img/sharp-libvips-linux-x64": "1.1.0"
}
},
"node_modules/@img/sharp-linuxmusl-arm64": {
"version": "0.34.1",
"resolved": "https://registry.npmjs.org/@img/sharp-linuxmusl-arm64/-/sharp-linuxmusl-arm64-0.34.1.tgz",
"integrity": "sha512-DfvyxzHxw4WGdPiTF0SOHnm11Xv4aQexvqhRDAoD00MzHekAj9a/jADXeXYCDFH/DzYruwHbXU7uz+H+nWmSOQ==",
"cpu": [
"arm64"
],
"license": "Apache-2.0",
"optional": true,
"os": [
"linux"
],
"engines": {
"node": "^18.17.0 || ^20.3.0 || >=21.0.0"
},
"funding": {
"url": "https://opencollective.com/libvips"
},
"optionalDependencies": {
"@img/sharp-libvips-linuxmusl-arm64": "1.1.0"
}
},
"node_modules/@img/sharp-linuxmusl-x64": {
"version": "0.34.1",
"resolved": "https://registry.npmjs.org/@img/sharp-linuxmusl-x64/-/sharp-linuxmusl-x64-0.34.1.tgz",
"integrity": "sha512-pax/kTR407vNb9qaSIiWVnQplPcGU8LRIJpDT5o8PdAx5aAA7AS3X9PS8Isw1/WfqgQorPotjrZL3Pqh6C5EBg==",
"cpu": [
"x64"
],
"license": "Apache-2.0",
"optional": true,
"os": [
"linux"
],
"engines": {
"node": "^18.17.0 || ^20.3.0 || >=21.0.0"
},
"funding": {
"url": "https://opencollective.com/libvips"
},
"optionalDependencies": {
"@img/sharp-libvips-linuxmusl-x64": "1.1.0"
}
},
"node_modules/@img/sharp-wasm32": {
"version": "0.34.1",
"resolved": "https://registry.npmjs.org/@img/sharp-wasm32/-/sharp-wasm32-0.34.1.tgz",
"integrity": "sha512-YDybQnYrLQfEpzGOQe7OKcyLUCML4YOXl428gOOzBgN6Gw0rv8dpsJ7PqTHxBnXnwXr8S1mYFSLSa727tpz0xg==",
"cpu": [
"wasm32"
],
"license": "Apache-2.0 AND LGPL-3.0-or-later AND MIT",
"optional": true,
"dependencies": {
"@emnapi/runtime": "^1.4.0"
},
"engines": {
"node": "^18.17.0 || ^20.3.0 || >=21.0.0"
},
"funding": {
"url": "https://opencollective.com/libvips"
}
},
"node_modules/@img/sharp-win32-ia32": {
"version": "0.34.1",
"resolved": "https://registry.npmjs.org/@img/sharp-win32-ia32/-/sharp-win32-ia32-0.34.1.tgz",
"integrity": "sha512-WKf/NAZITnonBf3U1LfdjoMgNO5JYRSlhovhRhMxXVdvWYveM4kM3L8m35onYIdh75cOMCo1BexgVQcCDzyoWw==",
"cpu": [
"ia32"
],
"license": "Apache-2.0 AND LGPL-3.0-or-later",
"optional": true,
"os": [
"win32"
],
"engines": {
"node": "^18.17.0 || ^20.3.0 || >=21.0.0"
},
"funding": {
"url": "https://opencollective.com/libvips"
}
},
"node_modules/@img/sharp-win32-x64": {
"version": "0.34.1",
"resolved": "https://registry.npmjs.org/@img/sharp-win32-x64/-/sharp-win32-x64-0.34.1.tgz",
"integrity": "sha512-hw1iIAHpNE8q3uMIRCgGOeDoz9KtFNarFLQclLxr/LK1VBkj8nby18RjFvr6aP7USRYAjTZW6yisnBWMX571Tw==",
"cpu": [
"x64"
],
"license": "Apache-2.0 AND LGPL-3.0-or-later",
"optional": true,
"os": [
"win32"
],
"engines": {
"node": "^18.17.0 || ^20.3.0 || >=21.0.0"
},
"funding": {
"url": "https://opencollective.com/libvips"
}
},
"node_modules/@isaacs/cliui": {
"version": "8.0.2",
"resolved": "https://registry.npmjs.org/@isaacs/cliui/-/cliui-8.0.2.tgz",
"integrity": "sha512-O8jcjabXaleOG9DQ0+ARXWZBTfnP4WNAqzuiJK7ll44AmxGKv/J2M4TPjxjY3znBCfvBXFzucm1twdyFybFqEA==",
"dependencies": {
"string-width": "^5.1.2",
"string-width-cjs": "npm:string-width@^4.2.0",
"strip-ansi": "^7.0.1",
"strip-ansi-cjs": "npm:strip-ansi@^6.0.1",
"wrap-ansi": "^8.1.0",
"wrap-ansi-cjs": "npm:wrap-ansi@^7.0.0"
},
"engines": {
"node": ">=12"
}
},
"node_modules/@isaacs/cliui/node_modules/ansi-regex": {
"version": "6.0.1",
"resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-6.0.1.tgz",
"integrity": "sha512-n5M855fKb2SsfMIiFFoVrABHJC8QtHwVx+mHWP3QcEqBHYienj5dHSgjbxtC0WEZXYt4wcD6zrQElDPhFuZgfA==",
"engines": {
"node": ">=12"
},
"funding": {
"url": "https://github.com/chalk/ansi-regex?sponsor=1"
}
},
"node_modules/@isaacs/cliui/node_modules/ansi-styles": {
"version": "6.2.1",
"resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-6.2.1.tgz",
"integrity": "sha512-bN798gFfQX+viw3R7yrGWRqnrN2oRkEkUjjl4JNn4E8GxxbjtG3FbrEIIY3l8/hrwUwIeCZvi4QuOTP4MErVug==",
"engines": {
"node": ">=12"
},
"funding": {
"url": "https://github.com/chalk/ansi-styles?sponsor=1"
}
},
"node_modules/@isaacs/cliui/node_modules/emoji-regex": {
"version": "9.2.2",
"resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-9.2.2.tgz",
"integrity": "sha512-L18DaJsXSUk2+42pv8mLs5jJT2hqFkFE4j21wOmgbUqsZ2hL72NsUU785g9RXgo3s0ZNgVl42TiHp3ZtOv/Vyg=="
},
"node_modules/@isaacs/cliui/node_modules/string-width": {
"version": "5.1.2",
"resolved": "https://registry.npmjs.org/string-width/-/string-width-5.1.2.tgz",
"integrity": "sha512-HnLOCR3vjcY8beoNLtcjZ5/nxn2afmME6lhrDrebokqMap+XbeW8n9TXpPDOqdGK5qcI3oT0GKTW6wC7EMiVqA==",
"dependencies": {
"eastasianwidth": "^0.2.0",
"emoji-regex": "^9.2.2",
"strip-ansi": "^7.0.1"
},
"engines": {
"node": ">=12"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/@isaacs/cliui/node_modules/strip-ansi": {
"version": "7.1.0",
"resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.1.0.tgz",
"integrity": "sha512-iq6eVVI64nQQTRYq2KtEg2d2uU7LElhTJwsH4YzIHZshxlgZms/wIc4VoDQTlG/IvVIrBKG06CrZnp0qv7hkcQ==",
"dependencies": {
"ansi-regex": "^6.0.1"
},
"engines": {
"node": ">=12"
},
"funding": {
"url": "https://github.com/chalk/strip-ansi?sponsor=1"
}
},
"node_modules/@isaacs/cliui/node_modules/wrap-ansi": {
"version": "8.1.0",
"resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-8.1.0.tgz",
"integrity": "sha512-si7QWI6zUMq56bESFvagtmzMdGOtoxfR+Sez11Mobfc7tm+VkUckk9bW2UeffTGVUbOksxmSw0AA2gs8g71NCQ==",
"dependencies": {
"ansi-styles": "^6.1.0",
"string-width": "^5.0.1",
"strip-ansi": "^7.0.1"
},
"engines": {
"node": ">=12"
},
"funding": {
"url": "https://github.com/chalk/wrap-ansi?sponsor=1"
}
},
"node_modules/@isaacs/fs-minipass": {
"version": "4.0.1",
"resolved": "https://registry.npmjs.org/@isaacs/fs-minipass/-/fs-minipass-4.0.1.tgz",
"integrity": "sha512-wgm9Ehl2jpeqP3zw/7mo3kRHFp5MEDhqAdwy1fTGkHAwnkGOVsgpvQhL8B5n1qlb01jV3n/bI0ZfZp5lWA1k4w==",
"dependencies": {
"minipass": "^7.0.4"
},
"engines": {
"node": ">=18.0.0"
}
},
"node_modules/@istanbuljs/load-nyc-config": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/@istanbuljs/load-nyc-config/-/load-nyc-config-1.1.0.tgz",
"integrity": "sha512-VjeHSlIzpv/NyD3N0YuHfXOPDIixcA1q2ZV98wsMqcYlPmv2n3Yb2lYP9XMElnaFVXg5A7YLTeLu6V84uQDjmQ==",
"dev": true,
"license": "ISC",
"dependencies": {
"camelcase": "^5.3.1",
"find-up": "^4.1.0",
"get-package-type": "^0.1.0",
"js-yaml": "^3.13.1",
"resolve-from": "^5.0.0"
},
"engines": {
"node": ">=8"
}
},
"node_modules/@istanbuljs/schema": {
"version": "0.1.3",
"resolved": "https://registry.npmjs.org/@istanbuljs/schema/-/schema-0.1.3.tgz",
"integrity": "sha512-ZXRY4jNvVgSVQ8DL3LTcakaAtXwTVUxE81hslsyD2AtoXW/wVob10HkOJ1X/pAlcI7D+2YoZKg5do8G/w6RYgA==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">=8"
}
},
"node_modules/@jest/console": {
"version": "30.0.0-alpha.6",
"resolved": "https://registry.npmjs.org/@jest/console/-/console-30.0.0-alpha.6.tgz",
"integrity": "sha512-08BeAnuabmauj5B+Xa4GNPAotQUGm3PLKSE3rnpnmxniZzR4tXhx8+AA2+HGTri4bbVRY/r3Jl0vJnkhvHTkeQ==",
"dev": true,
"license": "MIT",
"dependencies": {
"@jest/types": "30.0.0-alpha.6",
"@types/node": "*",
"chalk": "^4.0.0",
"jest-message-util": "30.0.0-alpha.6",
"jest-util": "30.0.0-alpha.6",
"slash": "^3.0.0"
},
"engines": {
"node": "^16.10.0 || ^18.12.0 || >=20.0.0"
}
},
"node_modules/@jest/console/node_modules/@jest/schemas": {
"version": "30.0.0-alpha.6",
"resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-30.0.0-alpha.6.tgz",
"integrity": "sha512-Ukr3kR/VsBq8+JHU92xArhSJeFQHVHs5T1laPO00GrrNzv3DvoHn3/EVVagGn9CHbLeAyJHXFRHYxq3+520kiA==",
"dev": true,
"license": "MIT",
"dependencies": {
"@sinclair/typebox": "^0.33.0"
},
"engines": {
"node": "^16.10.0 || ^18.12.0 || >=20.0.0"
}
},
"node_modules/@jest/console/node_modules/@jest/types": {
"version": "30.0.0-alpha.6",
"resolved": "https://registry.npmjs.org/@jest/types/-/types-30.0.0-alpha.6.tgz",
"integrity": "sha512-qUjAm8uvIR7oExn/Fp7/bvn58HSZng5itQDM9x0vaxXWxxGH/8MDmqX/h7OUBz9ka+KfYRaTxe4Y6wiM8+nphw==",
"dev": true,
"license": "MIT",
"dependencies": {
"@jest/pattern": "30.0.0-alpha.6",
"@jest/schemas": "30.0.0-alpha.6",
"@types/istanbul-lib-coverage": "^2.0.0",
"@types/istanbul-reports": "^3.0.0",
"@types/node": "*",
"@types/yargs": "^17.0.8",
"chalk": "^4.0.0"
},
"engines": {
"node": "^16.10.0 || ^18.12.0 || >=20.0.0"
}
},
"node_modules/@jest/console/node_modules/@sinclair/typebox": {
"version": "0.33.17",
"resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.33.17.tgz",
"integrity": "sha512-75232GRx3wp3P7NP+yc4nRK3XUAnaQShxTAzapgmQrgs0QvSq0/mOJGoZXRpH15cFCKyys+4laCPbBselqJ5Ag==",
"dev": true,
"license": "MIT"
},
"node_modules/@jest/console/node_modules/ansi-styles": {
"version": "5.2.0",
"resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-5.2.0.tgz",
"integrity": "sha512-Cxwpt2SfTzTtXcfOlzGEee8O+c+MmUgGrNiBcXnuWxuFJHe6a5Hz7qwhwe5OgaSYI0IJvkLqWX1ASG+cJOkEiA==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">=10"
},
"funding": {
"url": "https://github.com/chalk/ansi-styles?sponsor=1"
}
},
"node_modules/@jest/console/node_modules/ci-info": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/ci-info/-/ci-info-4.0.0.tgz",
"integrity": "sha512-TdHqgGf9odd8SXNuxtUBVx8Nv+qZOejE6qyqiy5NtbYYQOeFa6zmHkxlPzmaLxWWHsU6nJmB7AETdVPi+2NBUg==",
"dev": true,
"funding": [
{
"type": "github",
"url": "https://github.com/sponsors/sibiraj-s"
}
],
"license": "MIT",
"engines": {
"node": ">=8"
}
},
"node_modules/@jest/console/node_modules/jest-message-util": {
"version": "30.0.0-alpha.6",
"resolved": "https://registry.npmjs.org/jest-message-util/-/jest-message-util-30.0.0-alpha.6.tgz",
"integrity": "sha512-XAGJqkrBo7m3bFxWqiNqL0PyAWGf1XHR6bTve90MjBKJuIzhJsounGTzBNUw8JoU7Uzcj5Z6ZmEhaE3CDnwjfw==",
"dev": true,
"license": "MIT",
"dependencies": {
"@babel/code-frame": "^7.12.13",
"@jest/types": "30.0.0-alpha.6",
"@types/stack-utils": "^2.0.0",
"chalk": "^4.0.0",
"graceful-fs": "^4.2.9",
"micromatch": "^4.0.7",
"pretty-format": "30.0.0-alpha.6",
"slash": "^3.0.0",
"stack-utils": "^2.0.3"
},
"engines": {
"node": "^16.10.0 || ^18.12.0 || >=20.0.0"
}
},
"node_modules/@jest/console/node_modules/jest-util": {
"version": "30.0.0-alpha.6",
"resolved": "https://registry.npmjs.org/jest-util/-/jest-util-30.0.0-alpha.6.tgz",
"integrity": "sha512-JlimakOVDyoMC8TEG+knoufxUqLG+Btihf1G8o5sHxz54C6oL54Wetfepp+Nhuj/1hSL0sQtkovvjlEycf9i0w==",
"dev": true,
"license": "MIT",
"dependencies": {
"@jest/types": "30.0.0-alpha.6",
"@types/node": "*",
"chalk": "^4.0.0",
"ci-info": "^4.0.0",
"graceful-fs": "^4.2.9",
"picomatch": "^4.0.0"
},
"engines": {
"node": "^16.10.0 || ^18.12.0 || >=20.0.0"
}
},
"node_modules/@jest/console/node_modules/picomatch": {
"version": "4.0.2",
"resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.2.tgz",
"integrity": "sha512-M7BAV6Rlcy5u+m6oPhAPFgJTzAioX/6B0DxyvDlo9l8+T3nLKbrczg2WLUyzd45L8RqfUMyGPzekbMvX2Ldkwg==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">=12"
},
"funding": {
"url": "https://github.com/sponsors/jonschlinkert"
}
},
"node_modules/@jest/console/node_modules/pretty-format": {
"version": "30.0.0-alpha.6",
"resolved": "https://registry.npmjs.org/pretty-format/-/pretty-format-30.0.0-alpha.6.tgz",
"integrity": "sha512-xkeffkZoqQmRrcNewpOsUCKNOl+CkPqjt3Ld749uz1S7/O7GuPNPv2fZk3v/1U/FE8/B4Zz0llVL80MKON1tOQ==",
"dev": true,
"license": "MIT",
"dependencies": {
"@jest/schemas": "30.0.0-alpha.6",
"ansi-styles": "^5.0.0",
"react-is": "^18.0.0"
},
"engines": {
"node": "^16.10.0 || ^18.12.0 || >=20.0.0"
}
},
"node_modules/@jest/core": {
"version": "30.0.0-alpha.6",
"resolved": "https://registry.npmjs.org/@jest/core/-/core-30.0.0-alpha.6.tgz",
"integrity": "sha512-Qsvu9/I0hUOpeelp3jlTmg6cg3C+w18v4hxWVGchCRJAChvuxmsomB1Cm+DKB6NiMy2EvUvpwdT8X31lERtemw==",
"dev": true,
"license": "MIT",
"dependencies": {
"@jest/console": "30.0.0-alpha.6",
"@jest/pattern": "30.0.0-alpha.6",
"@jest/reporters": "30.0.0-alpha.6",
"@jest/test-result": "30.0.0-alpha.6",
"@jest/transform": "30.0.0-alpha.6",
"@jest/types": "30.0.0-alpha.6",
"@types/node": "*",
"ansi-escapes": "^4.2.1",
"chalk": "^4.0.0",
"ci-info": "^4.0.0",
"exit": "^0.1.2",
"graceful-fs": "^4.2.9",
"jest-changed-files": "30.0.0-alpha.6",
"jest-config": "30.0.0-alpha.6",
"jest-haste-map": "30.0.0-alpha.6",
"jest-message-util": "30.0.0-alpha.6",
"jest-regex-util": "30.0.0-alpha.6",
"jest-resolve": "30.0.0-alpha.6",
"jest-resolve-dependencies": "30.0.0-alpha.6",
"jest-runner": "30.0.0-alpha.6",
"jest-runtime": "30.0.0-alpha.6",
"jest-snapshot": "30.0.0-alpha.6",
"jest-util": "30.0.0-alpha.6",
"jest-validate": "30.0.0-alpha.6",
"jest-watcher": "30.0.0-alpha.6",
"micromatch": "^4.0.7",
"pretty-format": "30.0.0-alpha.6",
"slash": "^3.0.0",
"strip-ansi": "^6.0.0"
},
"engines": {
"node": "^16.10.0 || ^18.12.0 || >=20.0.0"
},
"peerDependencies": {
"node-notifier": "^8.0.1 || ^9.0.0 || ^10.0.0"
},
"peerDependenciesMeta": {
"node-notifier": {
"optional": true
}
}
},
"node_modules/@jest/core/node_modules/@jest/schemas": {
"version": "30.0.0-alpha.6",
"resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-30.0.0-alpha.6.tgz",
"integrity": "sha512-Ukr3kR/VsBq8+JHU92xArhSJeFQHVHs5T1laPO00GrrNzv3DvoHn3/EVVagGn9CHbLeAyJHXFRHYxq3+520kiA==",
"dev": true,
"license": "MIT",
"dependencies": {
"@sinclair/typebox": "^0.33.0"
},
"engines": {
"node": "^16.10.0 || ^18.12.0 || >=20.0.0"
}
},
"node_modules/@jest/core/node_modules/@jest/types": {
"version": "30.0.0-alpha.6",
"resolved": "https://registry.npmjs.org/@jest/types/-/types-30.0.0-alpha.6.tgz",
"integrity": "sha512-qUjAm8uvIR7oExn/Fp7/bvn58HSZng5itQDM9x0vaxXWxxGH/8MDmqX/h7OUBz9ka+KfYRaTxe4Y6wiM8+nphw==",
"dev": true,
"license": "MIT",
"dependencies": {
"@jest/pattern": "30.0.0-alpha.6",
"@jest/schemas": "30.0.0-alpha.6",
"@types/istanbul-lib-coverage": "^2.0.0",
"@types/istanbul-reports": "^3.0.0",
"@types/node": "*",
"@types/yargs": "^17.0.8",
"chalk": "^4.0.0"
},
"engines": {
"node": "^16.10.0 || ^18.12.0 || >=20.0.0"
}
},
"node_modules/@jest/core/node_modules/@sinclair/typebox": {
"version": "0.33.17",
"resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.33.17.tgz",
"integrity": "sha512-75232GRx3wp3P7NP+yc4nRK3XUAnaQShxTAzapgmQrgs0QvSq0/mOJGoZXRpH15cFCKyys+4laCPbBselqJ5Ag==",
"dev": true,
"license": "MIT"
},
"node_modules/@jest/core/node_modules/ansi-styles": {
"version": "5.2.0",
"resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-5.2.0.tgz",
"integrity": "sha512-Cxwpt2SfTzTtXcfOlzGEee8O+c+MmUgGrNiBcXnuWxuFJHe6a5Hz7qwhwe5OgaSYI0IJvkLqWX1ASG+cJOkEiA==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">=10"
},
"funding": {
"url": "https://github.com/chalk/ansi-styles?sponsor=1"
}
},
"node_modules/@jest/core/node_modules/ci-info": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/ci-info/-/ci-info-4.0.0.tgz",
"integrity": "sha512-TdHqgGf9odd8SXNuxtUBVx8Nv+qZOejE6qyqiy5NtbYYQOeFa6zmHkxlPzmaLxWWHsU6nJmB7AETdVPi+2NBUg==",
"dev": true,
"funding": [
{
"type": "github",
"url": "https://github.com/sponsors/sibiraj-s"
}
],
"license": "MIT",
"engines": {
"node": ">=8"
}
},
"node_modules/@jest/core/node_modules/jest-message-util": {
"version": "30.0.0-alpha.6",
"resolved": "https://registry.npmjs.org/jest-message-util/-/jest-message-util-30.0.0-alpha.6.tgz",
"integrity": "sha512-XAGJqkrBo7m3bFxWqiNqL0PyAWGf1XHR6bTve90MjBKJuIzhJsounGTzBNUw8JoU7Uzcj5Z6ZmEhaE3CDnwjfw==",
"dev": true,
"license": "MIT",
"dependencies": {
"@babel/code-frame": "^7.12.13",
"@jest/types": "30.0.0-alpha.6",
"@types/stack-utils": "^2.0.0",
"chalk": "^4.0.0",
"graceful-fs": "^4.2.9",
"micromatch": "^4.0.7",
"pretty-format": "30.0.0-alpha.6",
"slash": "^3.0.0",
"stack-utils": "^2.0.3"
},
"engines": {
"node": "^16.10.0 || ^18.12.0 || >=20.0.0"
}
},
"node_modules/@jest/core/node_modules/jest-util": {
"version": "30.0.0-alpha.6",
"resolved": "https://registry.npmjs.org/jest-util/-/jest-util-30.0.0-alpha.6.tgz",
"integrity": "sha512-JlimakOVDyoMC8TEG+knoufxUqLG+Btihf1G8o5sHxz54C6oL54Wetfepp+Nhuj/1hSL0sQtkovvjlEycf9i0w==",
"dev": true,
"license": "MIT",
"dependencies": {
"@jest/types": "30.0.0-alpha.6",
"@types/node": "*",
"chalk": "^4.0.0",
"ci-info": "^4.0.0",
"graceful-fs": "^4.2.9",
"picomatch": "^4.0.0"
},
"engines": {
"node": "^16.10.0 || ^18.12.0 || >=20.0.0"
}
},
"node_modules/@jest/core/node_modules/picomatch": {
"version": "4.0.2",
"resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.2.tgz",
"integrity": "sha512-M7BAV6Rlcy5u+m6oPhAPFgJTzAioX/6B0DxyvDlo9l8+T3nLKbrczg2WLUyzd45L8RqfUMyGPzekbMvX2Ldkwg==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">=12"
},
"funding": {
"url": "https://github.com/sponsors/jonschlinkert"
}
},
"node_modules/@jest/core/node_modules/pretty-format": {
"version": "30.0.0-alpha.6",
"resolved": "https://registry.npmjs.org/pretty-format/-/pretty-format-30.0.0-alpha.6.tgz",
"integrity": "sha512-xkeffkZoqQmRrcNewpOsUCKNOl+CkPqjt3Ld749uz1S7/O7GuPNPv2fZk3v/1U/FE8/B4Zz0llVL80MKON1tOQ==",
"dev": true,
"license": "MIT",
"dependencies": {
"@jest/schemas": "30.0.0-alpha.6",
"ansi-styles": "^5.0.0",
"react-is": "^18.0.0"
},
"engines": {
"node": "^16.10.0 || ^18.12.0 || >=20.0.0"
}
},
"node_modules/@jest/environment": {
"version": "30.0.0-alpha.6",
"resolved": "https://registry.npmjs.org/@jest/environment/-/environment-30.0.0-alpha.6.tgz",
"integrity": "sha512-pjNYNkzq761hh8D2grrG77L6nhe2VBCFFM+G1hyqhaJ2MAzhp2Gh+G94uF3px7luSzLh8GYvGJQGYy197EUOGQ==",
"dev": true,
"license": "MIT",
"dependencies": {
"@jest/fake-timers": "30.0.0-alpha.6",
"@jest/types": "30.0.0-alpha.6",
"@types/node": "*",
"jest-mock": "30.0.0-alpha.6"
},
"engines": {
"node": "^16.10.0 || ^18.12.0 || >=20.0.0"
}
},
"node_modules/@jest/environment/node_modules/@jest/schemas": {
"version": "30.0.0-alpha.6",
"resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-30.0.0-alpha.6.tgz",
"integrity": "sha512-Ukr3kR/VsBq8+JHU92xArhSJeFQHVHs5T1laPO00GrrNzv3DvoHn3/EVVagGn9CHbLeAyJHXFRHYxq3+520kiA==",
"dev": true,
"license": "MIT",
"dependencies": {
"@sinclair/typebox": "^0.33.0"
},
"engines": {
"node": "^16.10.0 || ^18.12.0 || >=20.0.0"
}
},
"node_modules/@jest/environment/node_modules/@jest/types": {
"version": "30.0.0-alpha.6",
"resolved": "https://registry.npmjs.org/@jest/types/-/types-30.0.0-alpha.6.tgz",
"integrity": "sha512-qUjAm8uvIR7oExn/Fp7/bvn58HSZng5itQDM9x0vaxXWxxGH/8MDmqX/h7OUBz9ka+KfYRaTxe4Y6wiM8+nphw==",
"dev": true,
"license": "MIT",
"dependencies": {
"@jest/pattern": "30.0.0-alpha.6",
"@jest/schemas": "30.0.0-alpha.6",
"@types/istanbul-lib-coverage": "^2.0.0",
"@types/istanbul-reports": "^3.0.0",
"@types/node": "*",
"@types/yargs": "^17.0.8",
"chalk": "^4.0.0"
},
"engines": {
"node": "^16.10.0 || ^18.12.0 || >=20.0.0"
}
},
"node_modules/@jest/environment/node_modules/@sinclair/typebox": {
"version": "0.33.17",
"resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.33.17.tgz",
"integrity": "sha512-75232GRx3wp3P7NP+yc4nRK3XUAnaQShxTAzapgmQrgs0QvSq0/mOJGoZXRpH15cFCKyys+4laCPbBselqJ5Ag==",
"dev": true,
"license": "MIT"
},
"node_modules/@jest/expect": {
"version": "30.0.0-alpha.6",
"resolved": "https://registry.npmjs.org/@jest/expect/-/expect-30.0.0-alpha.6.tgz",
"integrity": "sha512-3O74pygTwUBzUjO958IgNwmp0WrjASbiWdMEfUMePVqtiGoyS4Nxj9hsx4uKsNVivNJSZiiayYoP6dLhWerJXQ==",
"dev": true,
"license": "MIT",
"dependencies": {
"expect": "30.0.0-alpha.6",
"jest-snapshot": "30.0.0-alpha.6"
},
"engines": {
"node": "^16.10.0 || ^18.12.0 || >=20.0.0"
}
},
"node_modules/@jest/expect-utils": {
"version": "29.6.1",
"resolved": "https://registry.npmjs.org/@jest/expect-utils/-/expect-utils-29.6.1.tgz",
"integrity": "sha512-o319vIf5pEMx0LmzSxxkYYxo4wrRLKHq9dP1yJU7FoPTB0LfAKSz8SWD6D/6U3v/O52t9cF5t+MeJiRsfk7zMw==",
"dev": true,
"dependencies": {
"jest-get-type": "^29.4.3"
},
"engines": {
"node": "^14.15.0 || ^16.10.0 || >=18.0.0"
}
},
"node_modules/@jest/expect/node_modules/@jest/expect-utils": {
"version": "30.0.0-alpha.6",
"resolved": "https://registry.npmjs.org/@jest/expect-utils/-/expect-utils-30.0.0-alpha.6.tgz",
"integrity": "sha512-QMySMhaCUl0ZQd7Tx5X3fVWY5jtQxZNrTll0OyavdQ70ZTLgk0kU9K+XovcMWO26MK9R5EX7bBgD/j7w9hUM4w==",
"dev": true,
"license": "MIT",
"dependencies": {
"jest-get-type": "30.0.0-alpha.6"
},
"engines": {
"node": "^16.10.0 || ^18.12.0 || >=20.0.0"
}
},
"node_modules/@jest/expect/node_modules/@jest/schemas": {
"version": "30.0.0-alpha.6",
"resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-30.0.0-alpha.6.tgz",
"integrity": "sha512-Ukr3kR/VsBq8+JHU92xArhSJeFQHVHs5T1laPO00GrrNzv3DvoHn3/EVVagGn9CHbLeAyJHXFRHYxq3+520kiA==",
"dev": true,
"license": "MIT",
"dependencies": {
"@sinclair/typebox": "^0.33.0"
},
"engines": {
"node": "^16.10.0 || ^18.12.0 || >=20.0.0"
}
},
"node_modules/@jest/expect/node_modules/@jest/types": {
"version": "30.0.0-alpha.6",
"resolved": "https://registry.npmjs.org/@jest/types/-/types-30.0.0-alpha.6.tgz",
"integrity": "sha512-qUjAm8uvIR7oExn/Fp7/bvn58HSZng5itQDM9x0vaxXWxxGH/8MDmqX/h7OUBz9ka+KfYRaTxe4Y6wiM8+nphw==",
"dev": true,
"license": "MIT",
"dependencies": {
"@jest/pattern": "30.0.0-alpha.6",
"@jest/schemas": "30.0.0-alpha.6",
"@types/istanbul-lib-coverage": "^2.0.0",
"@types/istanbul-reports": "^3.0.0",
"@types/node": "*",
"@types/yargs": "^17.0.8",
"chalk": "^4.0.0"
},
"engines": {
"node": "^16.10.0 || ^18.12.0 || >=20.0.0"
}
},
"node_modules/@jest/expect/node_modules/@sinclair/typebox": {
"version": "0.33.17",
"resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.33.17.tgz",
"integrity": "sha512-75232GRx3wp3P7NP+yc4nRK3XUAnaQShxTAzapgmQrgs0QvSq0/mOJGoZXRpH15cFCKyys+4laCPbBselqJ5Ag==",
"dev": true,
"license": "MIT"
},
"node_modules/@jest/expect/node_modules/ansi-styles": {
"version": "5.2.0",
"resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-5.2.0.tgz",
"integrity": "sha512-Cxwpt2SfTzTtXcfOlzGEee8O+c+MmUgGrNiBcXnuWxuFJHe6a5Hz7qwhwe5OgaSYI0IJvkLqWX1ASG+cJOkEiA==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">=10"
},
"funding": {
"url": "https://github.com/chalk/ansi-styles?sponsor=1"
}
},
"node_modules/@jest/expect/node_modules/ci-info": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/ci-info/-/ci-info-4.0.0.tgz",
"integrity": "sha512-TdHqgGf9odd8SXNuxtUBVx8Nv+qZOejE6qyqiy5NtbYYQOeFa6zmHkxlPzmaLxWWHsU6nJmB7AETdVPi+2NBUg==",
"dev": true,
"funding": [
{
"type": "github",
"url": "https://github.com/sponsors/sibiraj-s"
}
],
"license": "MIT",
"engines": {
"node": ">=8"
}
},
"node_modules/@jest/expect/node_modules/diff-sequences": {
"version": "30.0.0-alpha.6",
"resolved": "https://registry.npmjs.org/diff-sequences/-/diff-sequences-30.0.0-alpha.6.tgz",
"integrity": "sha512-DVGt3/yzbneMUTuupsMqyfSXMnU2fE0lVsC9uFsJmRpluvSi7ZhrS0GX5tnMna6Ta788FGfOUx+irI/+cAZ4EA==",
"dev": true,
"license": "MIT",
"engines": {
"node": "^16.10.0 || ^18.12.0 || >=20.0.0"
}
},
"node_modules/@jest/expect/node_modules/expect": {
"version": "30.0.0-alpha.6",
"resolved": "https://registry.npmjs.org/expect/-/expect-30.0.0-alpha.6.tgz",
"integrity": "sha512-WVi2V4iHKw/vHEyye00Q9CSZz7KHDbJkJyteUI8kTih9jiyMl3bIk7wLYFcY9D1Blnadlyb5w5NBuNjQBow99g==",
"dev": true,
"license": "MIT",
"dependencies": {
"@jest/expect-utils": "30.0.0-alpha.6",
"jest-get-type": "30.0.0-alpha.6",
"jest-matcher-utils": "30.0.0-alpha.6",
"jest-message-util": "30.0.0-alpha.6",
"jest-mock": "30.0.0-alpha.6",
"jest-util": "30.0.0-alpha.6"
},
"engines": {
"node": "^16.10.0 || ^18.12.0 || >=20.0.0"
}
},
"node_modules/@jest/expect/node_modules/jest-diff": {
"version": "30.0.0-alpha.6",
"resolved": "https://registry.npmjs.org/jest-diff/-/jest-diff-30.0.0-alpha.6.tgz",
"integrity": "sha512-43j1DoYwVKrkbB67a2gC0ijjIY9biF0JSPXv7H6zlOkzNlqYg8hSDzrurLNo6zGKatW4JSBLE79LmXPJPj1m6A==",
"dev": true,
"license": "MIT",
"dependencies": {
"chalk": "^4.0.0",
"diff-sequences": "30.0.0-alpha.6",
"jest-get-type": "30.0.0-alpha.6",
"pretty-format": "30.0.0-alpha.6"
},
"engines": {
"node": "^16.10.0 || ^18.12.0 || >=20.0.0"
}
},
"node_modules/@jest/expect/node_modules/jest-get-type": {
"version": "30.0.0-alpha.6",
"resolved": "https://registry.npmjs.org/jest-get-type/-/jest-get-type-30.0.0-alpha.6.tgz",
"integrity": "sha512-lJEoQdCY4ICN6+T0lJ9BODKuqPOEpCv2NnJsEO1nmsK0fbWZmN/pgOPHVqLfK8i3jZpUmgupJ1w8r36mc8iiBQ==",
"dev": true,
"license": "MIT",
"engines": {
"node": "^16.10.0 || ^18.12.0 || >=20.0.0"
}
},
"node_modules/@jest/expect/node_modules/jest-matcher-utils": {
"version": "30.0.0-alpha.6",
"resolved": "https://registry.npmjs.org/jest-matcher-utils/-/jest-matcher-utils-30.0.0-alpha.6.tgz",
"integrity": "sha512-jaq7+HznsK54G0qzu96ZwfMEKHmlPiDqg6qG2p/hVQzr6Y/qVMRh8abI9Y1lX6SSXkr+S9mPAkmOsuJNLTLYmQ==",
"dev": true,
"license": "MIT",
"dependencies": {
"chalk": "^4.0.0",
"jest-diff": "30.0.0-alpha.6",
"jest-get-type": "30.0.0-alpha.6",
"pretty-format": "30.0.0-alpha.6"
},
"engines": {
"node": "^16.10.0 || ^18.12.0 || >=20.0.0"
}
},
"node_modules/@jest/expect/node_modules/jest-message-util": {
"version": "30.0.0-alpha.6",
"resolved": "https://registry.npmjs.org/jest-message-util/-/jest-message-util-30.0.0-alpha.6.tgz",
"integrity": "sha512-XAGJqkrBo7m3bFxWqiNqL0PyAWGf1XHR6bTve90MjBKJuIzhJsounGTzBNUw8JoU7Uzcj5Z6ZmEhaE3CDnwjfw==",
"dev": true,
"license": "MIT",
"dependencies": {
"@babel/code-frame": "^7.12.13",
"@jest/types": "30.0.0-alpha.6",
"@types/stack-utils": "^2.0.0",
"chalk": "^4.0.0",
"graceful-fs": "^4.2.9",
"micromatch": "^4.0.7",
"pretty-format": "30.0.0-alpha.6",
"slash": "^3.0.0",
"stack-utils": "^2.0.3"
},
"engines": {
"node": "^16.10.0 || ^18.12.0 || >=20.0.0"
}
},
"node_modules/@jest/expect/node_modules/jest-util": {
"version": "30.0.0-alpha.6",
"resolved": "https://registry.npmjs.org/jest-util/-/jest-util-30.0.0-alpha.6.tgz",
"integrity": "sha512-JlimakOVDyoMC8TEG+knoufxUqLG+Btihf1G8o5sHxz54C6oL54Wetfepp+Nhuj/1hSL0sQtkovvjlEycf9i0w==",
"dev": true,
"license": "MIT",
"dependencies": {
"@jest/types": "30.0.0-alpha.6",
"@types/node": "*",
"chalk": "^4.0.0",
"ci-info": "^4.0.0",
"graceful-fs": "^4.2.9",
"picomatch": "^4.0.0"
},
"engines": {
"node": "^16.10.0 || ^18.12.0 || >=20.0.0"
}
},
"node_modules/@jest/expect/node_modules/picomatch": {
"version": "4.0.2",
"resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.2.tgz",
"integrity": "sha512-M7BAV6Rlcy5u+m6oPhAPFgJTzAioX/6B0DxyvDlo9l8+T3nLKbrczg2WLUyzd45L8RqfUMyGPzekbMvX2Ldkwg==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">=12"
},
"funding": {
"url": "https://github.com/sponsors/jonschlinkert"
}
},
"node_modules/@jest/expect/node_modules/pretty-format": {
"version": "30.0.0-alpha.6",
"resolved": "https://registry.npmjs.org/pretty-format/-/pretty-format-30.0.0-alpha.6.tgz",
"integrity": "sha512-xkeffkZoqQmRrcNewpOsUCKNOl+CkPqjt3Ld749uz1S7/O7GuPNPv2fZk3v/1U/FE8/B4Zz0llVL80MKON1tOQ==",
"dev": true,
"license": "MIT",
"dependencies": {
"@jest/schemas": "30.0.0-alpha.6",
"ansi-styles": "^5.0.0",
"react-is": "^18.0.0"
},
"engines": {
"node": "^16.10.0 || ^18.12.0 || >=20.0.0"
}
},
"node_modules/@jest/fake-timers": {
"version": "30.0.0-alpha.6",
"resolved": "https://registry.npmjs.org/@jest/fake-timers/-/fake-timers-30.0.0-alpha.6.tgz",
"integrity": "sha512-deka0RmhJgEKPJM6cXPd4TJQ6fLczErdMN7Oxzr16UTDFHxtFd79tduJ8uP92dQyO4zy63N/dlFK6d+FHyWUDw==",
"dev": true,
"license": "MIT",
"dependencies": {
"@jest/types": "30.0.0-alpha.6",
"@sinonjs/fake-timers": "^11.1.0",
"@types/node": "*",
"jest-message-util": "30.0.0-alpha.6",
"jest-mock": "30.0.0-alpha.6",
"jest-util": "30.0.0-alpha.6"
},
"engines": {
"node": "^16.10.0 || ^18.12.0 || >=20.0.0"
}
},
"node_modules/@jest/fake-timers/node_modules/@jest/schemas": {
"version": "30.0.0-alpha.6",
"resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-30.0.0-alpha.6.tgz",
"integrity": "sha512-Ukr3kR/VsBq8+JHU92xArhSJeFQHVHs5T1laPO00GrrNzv3DvoHn3/EVVagGn9CHbLeAyJHXFRHYxq3+520kiA==",
"dev": true,
"license": "MIT",
"dependencies": {
"@sinclair/typebox": "^0.33.0"
},
"engines": {
"node": "^16.10.0 || ^18.12.0 || >=20.0.0"
}
},
"node_modules/@jest/fake-timers/node_modules/@jest/types": {
"version": "30.0.0-alpha.6",
"resolved": "https://registry.npmjs.org/@jest/types/-/types-30.0.0-alpha.6.tgz",
"integrity": "sha512-qUjAm8uvIR7oExn/Fp7/bvn58HSZng5itQDM9x0vaxXWxxGH/8MDmqX/h7OUBz9ka+KfYRaTxe4Y6wiM8+nphw==",
"dev": true,
"license": "MIT",
"dependencies": {
"@jest/pattern": "30.0.0-alpha.6",
"@jest/schemas": "30.0.0-alpha.6",
"@types/istanbul-lib-coverage": "^2.0.0",
"@types/istanbul-reports": "^3.0.0",
"@types/node": "*",
"@types/yargs": "^17.0.8",
"chalk": "^4.0.0"
},
"engines": {
"node": "^16.10.0 || ^18.12.0 || >=20.0.0"
}
},
"node_modules/@jest/fake-timers/node_modules/@sinclair/typebox": {
"version": "0.33.17",
"resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.33.17.tgz",
"integrity": "sha512-75232GRx3wp3P7NP+yc4nRK3XUAnaQShxTAzapgmQrgs0QvSq0/mOJGoZXRpH15cFCKyys+4laCPbBselqJ5Ag==",
"dev": true,
"license": "MIT"
},
"node_modules/@jest/fake-timers/node_modules/ansi-styles": {
"version": "5.2.0",
"resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-5.2.0.tgz",
"integrity": "sha512-Cxwpt2SfTzTtXcfOlzGEee8O+c+MmUgGrNiBcXnuWxuFJHe6a5Hz7qwhwe5OgaSYI0IJvkLqWX1ASG+cJOkEiA==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">=10"
},
"funding": {
"url": "https://github.com/chalk/ansi-styles?sponsor=1"
}
},
"node_modules/@jest/fake-timers/node_modules/ci-info": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/ci-info/-/ci-info-4.0.0.tgz",
"integrity": "sha512-TdHqgGf9odd8SXNuxtUBVx8Nv+qZOejE6qyqiy5NtbYYQOeFa6zmHkxlPzmaLxWWHsU6nJmB7AETdVPi+2NBUg==",
"dev": true,
"funding": [
{
"type": "github",
"url": "https://github.com/sponsors/sibiraj-s"
}
],
"license": "MIT",
"engines": {
"node": ">=8"
}
},
"node_modules/@jest/fake-timers/node_modules/jest-message-util": {
"version": "30.0.0-alpha.6",
"resolved": "https://registry.npmjs.org/jest-message-util/-/jest-message-util-30.0.0-alpha.6.tgz",
"integrity": "sha512-XAGJqkrBo7m3bFxWqiNqL0PyAWGf1XHR6bTve90MjBKJuIzhJsounGTzBNUw8JoU7Uzcj5Z6ZmEhaE3CDnwjfw==",
"dev": true,
"license": "MIT",
"dependencies": {
"@babel/code-frame": "^7.12.13",
"@jest/types": "30.0.0-alpha.6",
"@types/stack-utils": "^2.0.0",
"chalk": "^4.0.0",
"graceful-fs": "^4.2.9",
"micromatch": "^4.0.7",
"pretty-format": "30.0.0-alpha.6",
"slash": "^3.0.0",
"stack-utils": "^2.0.3"
},
"engines": {
"node": "^16.10.0 || ^18.12.0 || >=20.0.0"
}
},
"node_modules/@jest/fake-timers/node_modules/jest-util": {
"version": "30.0.0-alpha.6",
"resolved": "https://registry.npmjs.org/jest-util/-/jest-util-30.0.0-alpha.6.tgz",
"integrity": "sha512-JlimakOVDyoMC8TEG+knoufxUqLG+Btihf1G8o5sHxz54C6oL54Wetfepp+Nhuj/1hSL0sQtkovvjlEycf9i0w==",
"dev": true,
"license": "MIT",
"dependencies": {
"@jest/types": "30.0.0-alpha.6",
"@types/node": "*",
"chalk": "^4.0.0",
"ci-info": "^4.0.0",
"graceful-fs": "^4.2.9",
"picomatch": "^4.0.0"
},
"engines": {
"node": "^16.10.0 || ^18.12.0 || >=20.0.0"
}
},
"node_modules/@jest/fake-timers/node_modules/picomatch": {
"version": "4.0.2",
"resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.2.tgz",
"integrity": "sha512-M7BAV6Rlcy5u+m6oPhAPFgJTzAioX/6B0DxyvDlo9l8+T3nLKbrczg2WLUyzd45L8RqfUMyGPzekbMvX2Ldkwg==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">=12"
},
"funding": {
"url": "https://github.com/sponsors/jonschlinkert"
}
},
"node_modules/@jest/fake-timers/node_modules/pretty-format": {
"version": "30.0.0-alpha.6",
"resolved": "https://registry.npmjs.org/pretty-format/-/pretty-format-30.0.0-alpha.6.tgz",
"integrity": "sha512-xkeffkZoqQmRrcNewpOsUCKNOl+CkPqjt3Ld749uz1S7/O7GuPNPv2fZk3v/1U/FE8/B4Zz0llVL80MKON1tOQ==",
"dev": true,
"license": "MIT",
"dependencies": {
"@jest/schemas": "30.0.0-alpha.6",
"ansi-styles": "^5.0.0",
"react-is": "^18.0.0"
},
"engines": {
"node": "^16.10.0 || ^18.12.0 || >=20.0.0"
}
},
"node_modules/@jest/globals": {
"version": "30.0.0-alpha.6",
"resolved": "https://registry.npmjs.org/@jest/globals/-/globals-30.0.0-alpha.6.tgz",
"integrity": "sha512-+uJMoPUos9RH6+52iNgKJBbx1Hd2QsCZjExi5XpVvTjJ/gE4eJ1X7irUMt+14sH0QkeZ3GnjeTJFopyjOCsu+Q==",
"dev": true,
"license": "MIT",
"dependencies": {
"@jest/environment": "30.0.0-alpha.6",
"@jest/expect": "30.0.0-alpha.6",
"@jest/types": "30.0.0-alpha.6",
"jest-mock": "30.0.0-alpha.6"
},
"engines": {
"node": "^16.10.0 || ^18.12.0 || >=20.0.0"
}
},
"node_modules/@jest/globals/node_modules/@jest/schemas": {
"version": "30.0.0-alpha.6",
"resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-30.0.0-alpha.6.tgz",
"integrity": "sha512-Ukr3kR/VsBq8+JHU92xArhSJeFQHVHs5T1laPO00GrrNzv3DvoHn3/EVVagGn9CHbLeAyJHXFRHYxq3+520kiA==",
"dev": true,
"license": "MIT",
"dependencies": {
"@sinclair/typebox": "^0.33.0"
},
"engines": {
"node": "^16.10.0 || ^18.12.0 || >=20.0.0"
}
},
"node_modules/@jest/globals/node_modules/@jest/types": {
"version": "30.0.0-alpha.6",
"resolved": "https://registry.npmjs.org/@jest/types/-/types-30.0.0-alpha.6.tgz",
"integrity": "sha512-qUjAm8uvIR7oExn/Fp7/bvn58HSZng5itQDM9x0vaxXWxxGH/8MDmqX/h7OUBz9ka+KfYRaTxe4Y6wiM8+nphw==",
"dev": true,
"license": "MIT",
"dependencies": {
"@jest/pattern": "30.0.0-alpha.6",
"@jest/schemas": "30.0.0-alpha.6",
"@types/istanbul-lib-coverage": "^2.0.0",
"@types/istanbul-reports": "^3.0.0",
"@types/node": "*",
"@types/yargs": "^17.0.8",
"chalk": "^4.0.0"
},
"engines": {
"node": "^16.10.0 || ^18.12.0 || >=20.0.0"
}
},
"node_modules/@jest/globals/node_modules/@sinclair/typebox": {
"version": "0.33.17",
"resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.33.17.tgz",
"integrity": "sha512-75232GRx3wp3P7NP+yc4nRK3XUAnaQShxTAzapgmQrgs0QvSq0/mOJGoZXRpH15cFCKyys+4laCPbBselqJ5Ag==",
"dev": true,
"license": "MIT"
},
"node_modules/@jest/pattern": {
"version": "30.0.0-alpha.6",
"resolved": "https://registry.npmjs.org/@jest/pattern/-/pattern-30.0.0-alpha.6.tgz",
"integrity": "sha512-eoV3sjS1M5k3YdrFWezqdndfgepwB86gwyXC0BzV2saZdJ42ySUoEDBGKuwta8A6Zh3w8tVHNFxz1BqiFraHCQ==",
"dev": true,
"license": "MIT",
"dependencies": {
"@types/node": "*",
"jest-regex-util": "30.0.0-alpha.6"
},
"engines": {
"node": "^16.10.0 || ^18.12.0 || >=20.0.0"
}
},
"node_modules/@jest/reporters": {
"version": "30.0.0-alpha.6",
"resolved": "https://registry.npmjs.org/@jest/reporters/-/reporters-30.0.0-alpha.6.tgz",
"integrity": "sha512-jzW0t2OtEzBYwlG4EMJKG4q5RPaVvLPDm/nBS08hd+XPoLJJ9b5thyo/MoThIqJfdi0lHqFlDQUmlL205CMoSw==",
"dev": true,
"license": "MIT",
"dependencies": {
"@bcoe/v8-coverage": "^0.2.3",
"@jest/console": "30.0.0-alpha.6",
"@jest/test-result": "30.0.0-alpha.6",
"@jest/transform": "30.0.0-alpha.6",
"@jest/types": "30.0.0-alpha.6",
"@jridgewell/trace-mapping": "^0.3.18",
"@types/node": "*",
"chalk": "^4.0.0",
"collect-v8-coverage": "^1.0.0",
"exit": "^0.1.2",
"glob": "^10.3.10",
"graceful-fs": "^4.2.9",
"istanbul-lib-coverage": "^3.0.0",
"istanbul-lib-instrument": "^6.0.0",
"istanbul-lib-report": "^3.0.0",
"istanbul-lib-source-maps": "^5.0.0",
"istanbul-reports": "^3.1.3",
"jest-message-util": "30.0.0-alpha.6",
"jest-util": "30.0.0-alpha.6",
"jest-worker": "30.0.0-alpha.6",
"slash": "^3.0.0",
"string-length": "^4.0.1",
"strip-ansi": "^6.0.0",
"v8-to-istanbul": "^9.0.1"
},
"engines": {
"node": "^16.10.0 || ^18.12.0 || >=20.0.0"
},
"peerDependencies": {
"node-notifier": "^8.0.1 || ^9.0.0 || ^10.0.0"
},
"peerDependenciesMeta": {
"node-notifier": {
"optional": true
}
}
},
"node_modules/@jest/reporters/node_modules/@jest/schemas": {
"version": "30.0.0-alpha.6",
"resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-30.0.0-alpha.6.tgz",
"integrity": "sha512-Ukr3kR/VsBq8+JHU92xArhSJeFQHVHs5T1laPO00GrrNzv3DvoHn3/EVVagGn9CHbLeAyJHXFRHYxq3+520kiA==",
"dev": true,
"license": "MIT",
"dependencies": {
"@sinclair/typebox": "^0.33.0"
},
"engines": {
"node": "^16.10.0 || ^18.12.0 || >=20.0.0"
}
},
"node_modules/@jest/reporters/node_modules/@jest/types": {
"version": "30.0.0-alpha.6",
"resolved": "https://registry.npmjs.org/@jest/types/-/types-30.0.0-alpha.6.tgz",
"integrity": "sha512-qUjAm8uvIR7oExn/Fp7/bvn58HSZng5itQDM9x0vaxXWxxGH/8MDmqX/h7OUBz9ka+KfYRaTxe4Y6wiM8+nphw==",
"dev": true,
"license": "MIT",
"dependencies": {
"@jest/pattern": "30.0.0-alpha.6",
"@jest/schemas": "30.0.0-alpha.6",
"@types/istanbul-lib-coverage": "^2.0.0",
"@types/istanbul-reports": "^3.0.0",
"@types/node": "*",
"@types/yargs": "^17.0.8",
"chalk": "^4.0.0"
},
"engines": {
"node": "^16.10.0 || ^18.12.0 || >=20.0.0"
}
},
"node_modules/@jest/reporters/node_modules/@sinclair/typebox": {
"version": "0.33.17",
"resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.33.17.tgz",
"integrity": "sha512-75232GRx3wp3P7NP+yc4nRK3XUAnaQShxTAzapgmQrgs0QvSq0/mOJGoZXRpH15cFCKyys+4laCPbBselqJ5Ag==",
"dev": true,
"license": "MIT"
},
"node_modules/@jest/reporters/node_modules/ansi-styles": {
"version": "5.2.0",
"resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-5.2.0.tgz",
"integrity": "sha512-Cxwpt2SfTzTtXcfOlzGEee8O+c+MmUgGrNiBcXnuWxuFJHe6a5Hz7qwhwe5OgaSYI0IJvkLqWX1ASG+cJOkEiA==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">=10"
},
"funding": {
"url": "https://github.com/chalk/ansi-styles?sponsor=1"
}
},
"node_modules/@jest/reporters/node_modules/brace-expansion": {
"version": "2.0.2",
"resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.2.tgz",
"integrity": "sha512-Jt0vHyM+jmUBqojB7E1NIYadt0vI0Qxjxd2TErW94wDz+E2LAm5vKMXXwg6ZZBTHPuUlDgQHKXvjGBdfcF1ZDQ==",
"dev": true,
"license": "MIT",
"dependencies": {
"balanced-match": "^1.0.0"
}
},
"node_modules/@jest/reporters/node_modules/ci-info": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/ci-info/-/ci-info-4.0.0.tgz",
"integrity": "sha512-TdHqgGf9odd8SXNuxtUBVx8Nv+qZOejE6qyqiy5NtbYYQOeFa6zmHkxlPzmaLxWWHsU6nJmB7AETdVPi+2NBUg==",
"dev": true,
"funding": [
{
"type": "github",
"url": "https://github.com/sponsors/sibiraj-s"
}
],
"license": "MIT",
"engines": {
"node": ">=8"
}
},
"node_modules/@jest/reporters/node_modules/glob": {
"version": "10.4.5",
"resolved": "https://registry.npmjs.org/glob/-/glob-10.4.5.tgz",
"integrity": "sha512-7Bv8RF0k6xjo7d4A/PxYLbUCfb6c+Vpd2/mB2yRDlew7Jb5hEXiCD9ibfO7wpk8i4sevK6DFny9h7EYbM3/sHg==",
"dev": true,
"license": "ISC",
"dependencies": {
"foreground-child": "^3.1.0",
"jackspeak": "^3.1.2",
"minimatch": "^9.0.4",
"minipass": "^7.1.2",
"package-json-from-dist": "^1.0.0",
"path-scurry": "^1.11.1"
},
"bin": {
"glob": "dist/esm/bin.mjs"
},
"funding": {
"url": "https://github.com/sponsors/isaacs"
}
},
"node_modules/@jest/reporters/node_modules/jest-message-util": {
"version": "30.0.0-alpha.6",
"resolved": "https://registry.npmjs.org/jest-message-util/-/jest-message-util-30.0.0-alpha.6.tgz",
"integrity": "sha512-XAGJqkrBo7m3bFxWqiNqL0PyAWGf1XHR6bTve90MjBKJuIzhJsounGTzBNUw8JoU7Uzcj5Z6ZmEhaE3CDnwjfw==",
"dev": true,
"license": "MIT",
"dependencies": {
"@babel/code-frame": "^7.12.13",
"@jest/types": "30.0.0-alpha.6",
"@types/stack-utils": "^2.0.0",
"chalk": "^4.0.0",
"graceful-fs": "^4.2.9",
"micromatch": "^4.0.7",
"pretty-format": "30.0.0-alpha.6",
"slash": "^3.0.0",
"stack-utils": "^2.0.3"
},
"engines": {
"node": "^16.10.0 || ^18.12.0 || >=20.0.0"
}
},
"node_modules/@jest/reporters/node_modules/jest-util": {
"version": "30.0.0-alpha.6",
"resolved": "https://registry.npmjs.org/jest-util/-/jest-util-30.0.0-alpha.6.tgz",
"integrity": "sha512-JlimakOVDyoMC8TEG+knoufxUqLG+Btihf1G8o5sHxz54C6oL54Wetfepp+Nhuj/1hSL0sQtkovvjlEycf9i0w==",
"dev": true,
"license": "MIT",
"dependencies": {
"@jest/types": "30.0.0-alpha.6",
"@types/node": "*",
"chalk": "^4.0.0",
"ci-info": "^4.0.0",
"graceful-fs": "^4.2.9",
"picomatch": "^4.0.0"
},
"engines": {
"node": "^16.10.0 || ^18.12.0 || >=20.0.0"
}
},
"node_modules/@jest/reporters/node_modules/jest-worker": {
"version": "30.0.0-alpha.6",
"resolved": "https://registry.npmjs.org/jest-worker/-/jest-worker-30.0.0-alpha.6.tgz",
"integrity": "sha512-qlzX7zFT/QdUV/LWsJwZBlaIBaJ+E2VH3d1gArGVP+9hUHGpJkEzCSBK7yuZrkt+M/U0Jre5+maPRmkinEF4DA==",
"dev": true,
"license": "MIT",
"dependencies": {
"@types/node": "*",
"@ungap/structured-clone": "^1.2.0",
"jest-util": "30.0.0-alpha.6",
"merge-stream": "^2.0.0",
"supports-color": "^8.0.0"
},
"engines": {
"node": "^16.10.0 || ^18.12.0 || >=20.0.0"
}
},
"node_modules/@jest/reporters/node_modules/minimatch": {
"version": "9.0.5",
"resolved": "https://registry.npmjs.org/minimatch/-/minimatch-9.0.5.tgz",
"integrity": "sha512-G6T0ZX48xgozx7587koeX9Ys2NYy6Gmv//P89sEte9V9whIapMNF4idKxnW2QtCcLiTWlb/wfCabAtAFWhhBow==",
"dev": true,
"license": "ISC",
"dependencies": {
"brace-expansion": "^2.0.1"
},
"engines": {
"node": ">=16 || 14 >=14.17"
},
"funding": {
"url": "https://github.com/sponsors/isaacs"
}
},
"node_modules/@jest/reporters/node_modules/picomatch": {
"version": "4.0.2",
"resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.2.tgz",
"integrity": "sha512-M7BAV6Rlcy5u+m6oPhAPFgJTzAioX/6B0DxyvDlo9l8+T3nLKbrczg2WLUyzd45L8RqfUMyGPzekbMvX2Ldkwg==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">=12"
},
"funding": {
"url": "https://github.com/sponsors/jonschlinkert"
}
},
"node_modules/@jest/reporters/node_modules/pretty-format": {
"version": "30.0.0-alpha.6",
"resolved": "https://registry.npmjs.org/pretty-format/-/pretty-format-30.0.0-alpha.6.tgz",
"integrity": "sha512-xkeffkZoqQmRrcNewpOsUCKNOl+CkPqjt3Ld749uz1S7/O7GuPNPv2fZk3v/1U/FE8/B4Zz0llVL80MKON1tOQ==",
"dev": true,
"license": "MIT",
"dependencies": {
"@jest/schemas": "30.0.0-alpha.6",
"ansi-styles": "^5.0.0",
"react-is": "^18.0.0"
},
"engines": {
"node": "^16.10.0 || ^18.12.0 || >=20.0.0"
}
},
"node_modules/@jest/schemas": {
"version": "29.6.0",
"resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-29.6.0.tgz",
"integrity": "sha512-rxLjXyJBTL4LQeJW3aKo0M/+GkCOXsO+8i9Iu7eDb6KwtP65ayoDsitrdPBtujxQ88k4wI2FNYfa6TOGwSn6cQ==",
"dev": true,
"dependencies": {
"@sinclair/typebox": "^0.27.8"
},
"engines": {
"node": "^14.15.0 || ^16.10.0 || >=18.0.0"
}
},
"node_modules/@jest/snapshot-utils": {
"version": "30.0.0-alpha.6",
"resolved": "https://registry.npmjs.org/@jest/snapshot-utils/-/snapshot-utils-30.0.0-alpha.6.tgz",
"integrity": "sha512-iDtIFCyRT8ZyLmz6kYbS8GR/MBXKA6uZPBfdTcnd2y0T987DV3GVlvwkAC+iFTc1w3HgwQe8LTf+y3i+O2ISCw==",
"dev": true,
"license": "MIT",
"dependencies": {
"@jest/types": "30.0.0-alpha.6",
"chalk": "^4.0.0",
"graceful-fs": "^4.2.9",
"natural-compare": "^1.4.0"
},
"engines": {
"node": "^16.10.0 || ^18.12.0 || >=20.0.0"
}
},
"node_modules/@jest/snapshot-utils/node_modules/@jest/schemas": {
"version": "30.0.0-alpha.6",
"resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-30.0.0-alpha.6.tgz",
"integrity": "sha512-Ukr3kR/VsBq8+JHU92xArhSJeFQHVHs5T1laPO00GrrNzv3DvoHn3/EVVagGn9CHbLeAyJHXFRHYxq3+520kiA==",
"dev": true,
"license": "MIT",
"dependencies": {
"@sinclair/typebox": "^0.33.0"
},
"engines": {
"node": "^16.10.0 || ^18.12.0 || >=20.0.0"
}
},
"node_modules/@jest/snapshot-utils/node_modules/@jest/types": {
"version": "30.0.0-alpha.6",
"resolved": "https://registry.npmjs.org/@jest/types/-/types-30.0.0-alpha.6.tgz",
"integrity": "sha512-qUjAm8uvIR7oExn/Fp7/bvn58HSZng5itQDM9x0vaxXWxxGH/8MDmqX/h7OUBz9ka+KfYRaTxe4Y6wiM8+nphw==",
"dev": true,
"license": "MIT",
"dependencies": {
"@jest/pattern": "30.0.0-alpha.6",
"@jest/schemas": "30.0.0-alpha.6",
"@types/istanbul-lib-coverage": "^2.0.0",
"@types/istanbul-reports": "^3.0.0",
"@types/node": "*",
"@types/yargs": "^17.0.8",
"chalk": "^4.0.0"
},
"engines": {
"node": "^16.10.0 || ^18.12.0 || >=20.0.0"
}
},
"node_modules/@jest/snapshot-utils/node_modules/@sinclair/typebox": {
"version": "0.33.17",
"resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.33.17.tgz",
"integrity": "sha512-75232GRx3wp3P7NP+yc4nRK3XUAnaQShxTAzapgmQrgs0QvSq0/mOJGoZXRpH15cFCKyys+4laCPbBselqJ5Ag==",
"dev": true,
"license": "MIT"
},
"node_modules/@jest/source-map": {
"version": "30.0.0-alpha.6",
"resolved": "https://registry.npmjs.org/@jest/source-map/-/source-map-30.0.0-alpha.6.tgz",
"integrity": "sha512-7rSrxehVyzqw5O+F2ds7wLAm9f6QxqYsJU42LNyUpaFlJqtWz3PeQ2Wu3DVoPzGu0C66EhDHKYmeN0mXnRDZmQ==",
"dev": true,
"license": "MIT",
"dependencies": {
"@jridgewell/trace-mapping": "^0.3.18",
"callsites": "^3.0.0",
"graceful-fs": "^4.2.9"
},
"engines": {
"node": "^16.10.0 || ^18.12.0 || >=20.0.0"
}
},
"node_modules/@jest/test-result": {
"version": "30.0.0-alpha.6",
"resolved": "https://registry.npmjs.org/@jest/test-result/-/test-result-30.0.0-alpha.6.tgz",
"integrity": "sha512-Jlg8lCm7VQ6YvQ0eZx2nQEtej/ng+ulV8cXH7Nj5i33hNZq8EZvWM4gQDWDzRe1X7cVE3Bd42On5f6s2rqqIjw==",
"dev": true,
"license": "MIT",
"dependencies": {
"@jest/console": "30.0.0-alpha.6",
"@jest/types": "30.0.0-alpha.6",
"@types/istanbul-lib-coverage": "^2.0.0",
"collect-v8-coverage": "^1.0.0"
},
"engines": {
"node": "^16.10.0 || ^18.12.0 || >=20.0.0"
}
},
"node_modules/@jest/test-result/node_modules/@jest/schemas": {
"version": "30.0.0-alpha.6",
"resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-30.0.0-alpha.6.tgz",
"integrity": "sha512-Ukr3kR/VsBq8+JHU92xArhSJeFQHVHs5T1laPO00GrrNzv3DvoHn3/EVVagGn9CHbLeAyJHXFRHYxq3+520kiA==",
"dev": true,
"license": "MIT",
"dependencies": {
"@sinclair/typebox": "^0.33.0"
},
"engines": {
"node": "^16.10.0 || ^18.12.0 || >=20.0.0"
}
},
"node_modules/@jest/test-result/node_modules/@jest/types": {
"version": "30.0.0-alpha.6",
"resolved": "https://registry.npmjs.org/@jest/types/-/types-30.0.0-alpha.6.tgz",
"integrity": "sha512-qUjAm8uvIR7oExn/Fp7/bvn58HSZng5itQDM9x0vaxXWxxGH/8MDmqX/h7OUBz9ka+KfYRaTxe4Y6wiM8+nphw==",
"dev": true,
"license": "MIT",
"dependencies": {
"@jest/pattern": "30.0.0-alpha.6",
"@jest/schemas": "30.0.0-alpha.6",
"@types/istanbul-lib-coverage": "^2.0.0",
"@types/istanbul-reports": "^3.0.0",
"@types/node": "*",
"@types/yargs": "^17.0.8",
"chalk": "^4.0.0"
},
"engines": {
"node": "^16.10.0 || ^18.12.0 || >=20.0.0"
}
},
"node_modules/@jest/test-result/node_modules/@sinclair/typebox": {
"version": "0.33.17",
"resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.33.17.tgz",
"integrity": "sha512-75232GRx3wp3P7NP+yc4nRK3XUAnaQShxTAzapgmQrgs0QvSq0/mOJGoZXRpH15cFCKyys+4laCPbBselqJ5Ag==",
"dev": true,
"license": "MIT"
},
"node_modules/@jest/test-sequencer": {
"version": "30.0.0-alpha.6",
"resolved": "https://registry.npmjs.org/@jest/test-sequencer/-/test-sequencer-30.0.0-alpha.6.tgz",
"integrity": "sha512-5M89jbSQWkBjGlFrRk2wXjRJVxR+uN553sFN0q2TglH0/a4OMSVxRBcCmnIqqDMDizGAlYTxW6BaXxHGHpvrRQ==",
"dev": true,
"license": "MIT",
"dependencies": {
"@jest/test-result": "30.0.0-alpha.6",
"graceful-fs": "^4.2.9",
"jest-haste-map": "30.0.0-alpha.6",
"slash": "^3.0.0"
},
"engines": {
"node": "^16.10.0 || ^18.12.0 || >=20.0.0"
}
},
"node_modules/@jest/transform": {
"version": "30.0.0-alpha.6",
"resolved": "https://registry.npmjs.org/@jest/transform/-/transform-30.0.0-alpha.6.tgz",
"integrity": "sha512-4L8BZm38BJASswsqruc4c3F0AExYLvp0xq8067e7fIyg4hfwa4zUA+N2idf+eTTjDWevVVdIBfELzJ8b7nvO4Q==",
"dev": true,
"license": "MIT",
"dependencies": {
"@babel/core": "^7.11.6",
"@jest/types": "30.0.0-alpha.6",
"@jridgewell/trace-mapping": "^0.3.18",
"babel-plugin-istanbul": "^7.0.0",
"chalk": "^4.0.0",
"convert-source-map": "^2.0.0",
"fast-json-stable-stringify": "^2.1.0",
"graceful-fs": "^4.2.9",
"jest-haste-map": "30.0.0-alpha.6",
"jest-regex-util": "30.0.0-alpha.6",
"jest-util": "30.0.0-alpha.6",
"micromatch": "^4.0.7",
"pirates": "^4.0.4",
"slash": "^3.0.0",
"write-file-atomic": "^5.0.0"
},
"engines": {
"node": "^16.10.0 || ^18.12.0 || >=20.0.0"
}
},
"node_modules/@jest/transform/node_modules/@jest/schemas": {
"version": "30.0.0-alpha.6",
"resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-30.0.0-alpha.6.tgz",
"integrity": "sha512-Ukr3kR/VsBq8+JHU92xArhSJeFQHVHs5T1laPO00GrrNzv3DvoHn3/EVVagGn9CHbLeAyJHXFRHYxq3+520kiA==",
"dev": true,
"license": "MIT",
"dependencies": {
"@sinclair/typebox": "^0.33.0"
},
"engines": {
"node": "^16.10.0 || ^18.12.0 || >=20.0.0"
}
},
"node_modules/@jest/transform/node_modules/@jest/types": {
"version": "30.0.0-alpha.6",
"resolved": "https://registry.npmjs.org/@jest/types/-/types-30.0.0-alpha.6.tgz",
"integrity": "sha512-qUjAm8uvIR7oExn/Fp7/bvn58HSZng5itQDM9x0vaxXWxxGH/8MDmqX/h7OUBz9ka+KfYRaTxe4Y6wiM8+nphw==",
"dev": true,
"license": "MIT",
"dependencies": {
"@jest/pattern": "30.0.0-alpha.6",
"@jest/schemas": "30.0.0-alpha.6",
"@types/istanbul-lib-coverage": "^2.0.0",
"@types/istanbul-reports": "^3.0.0",
"@types/node": "*",
"@types/yargs": "^17.0.8",
"chalk": "^4.0.0"
},
"engines": {
"node": "^16.10.0 || ^18.12.0 || >=20.0.0"
}
},
"node_modules/@jest/transform/node_modules/@sinclair/typebox": {
"version": "0.33.17",
"resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.33.17.tgz",
"integrity": "sha512-75232GRx3wp3P7NP+yc4nRK3XUAnaQShxTAzapgmQrgs0QvSq0/mOJGoZXRpH15cFCKyys+4laCPbBselqJ5Ag==",
"dev": true,
"license": "MIT"
},
"node_modules/@jest/transform/node_modules/ci-info": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/ci-info/-/ci-info-4.0.0.tgz",
"integrity": "sha512-TdHqgGf9odd8SXNuxtUBVx8Nv+qZOejE6qyqiy5NtbYYQOeFa6zmHkxlPzmaLxWWHsU6nJmB7AETdVPi+2NBUg==",
"dev": true,
"funding": [
{
"type": "github",
"url": "https://github.com/sponsors/sibiraj-s"
}
],
"license": "MIT",
"engines": {
"node": ">=8"
}
},
"node_modules/@jest/transform/node_modules/jest-util": {
"version": "30.0.0-alpha.6",
"resolved": "https://registry.npmjs.org/jest-util/-/jest-util-30.0.0-alpha.6.tgz",
"integrity": "sha512-JlimakOVDyoMC8TEG+knoufxUqLG+Btihf1G8o5sHxz54C6oL54Wetfepp+Nhuj/1hSL0sQtkovvjlEycf9i0w==",
"dev": true,
"license": "MIT",
"dependencies": {
"@jest/types": "30.0.0-alpha.6",
"@types/node": "*",
"chalk": "^4.0.0",
"ci-info": "^4.0.0",
"graceful-fs": "^4.2.9",
"picomatch": "^4.0.0"
},
"engines": {
"node": "^16.10.0 || ^18.12.0 || >=20.0.0"
}
},
"node_modules/@jest/transform/node_modules/picomatch": {
"version": "4.0.2",
"resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.2.tgz",
"integrity": "sha512-M7BAV6Rlcy5u+m6oPhAPFgJTzAioX/6B0DxyvDlo9l8+T3nLKbrczg2WLUyzd45L8RqfUMyGPzekbMvX2Ldkwg==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">=12"
},
"funding": {
"url": "https://github.com/sponsors/jonschlinkert"
}
},
"node_modules/@jest/types": {
"version": "29.6.1",
"resolved": "https://registry.npmjs.org/@jest/types/-/types-29.6.1.tgz",
"integrity": "sha512-tPKQNMPuXgvdOn2/Lg9HNfUvjYVGolt04Hp03f5hAk878uwOLikN+JzeLY0HcVgKgFl9Hs3EIqpu3WX27XNhnw==",
"dev": true,
"dependencies": {
"@jest/schemas": "^29.6.0",
"@types/istanbul-lib-coverage": "^2.0.0",
"@types/istanbul-reports": "^3.0.0",
"@types/node": "*",
"@types/yargs": "^17.0.8",
"chalk": "^4.0.0"
},
"engines": {
"node": "^14.15.0 || ^16.10.0 || >=18.0.0"
}
},
"node_modules/@jridgewell/gen-mapping": {
"version": "0.3.5",
"resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.5.tgz",
"integrity": "sha512-IzL8ZoEDIBRWEzlCcRhOaCupYyN5gdIK+Q6fbFdPDg6HqX6jpkItn7DFIpW9LQzXG6Df9sA7+OKnq0qlz/GaQg==",
"dev": true,
"dependencies": {
"@jridgewell/set-array": "^1.2.1",
"@jridgewell/sourcemap-codec": "^1.4.10",
"@jridgewell/trace-mapping": "^0.3.24"
},
"engines": {
"node": ">=6.0.0"
}
},
"node_modules/@jridgewell/resolve-uri": {
"version": "3.1.0",
"resolved": "https://registry.npmjs.org/@jridgewell/resolve-uri/-/resolve-uri-3.1.0.tgz",
"integrity": "sha512-F2msla3tad+Mfht5cJq7LSXcdudKTWCVYUgw6pLFOOHSTtZlj6SWNYAp+AhuqLmWdBO2X5hPrLcu8cVP8fy28w==",
"dev": true,
"engines": {
"node": ">=6.0.0"
}
},
"node_modules/@jridgewell/set-array": {
"version": "1.2.1",
"resolved": "https://registry.npmjs.org/@jridgewell/set-array/-/set-array-1.2.1.tgz",
"integrity": "sha512-R8gLRTZeyp03ymzP/6Lil/28tGeGEzhx1q2k703KGWRAI1VdvPIXdG70VJc2pAMw3NA6JKL5hhFu1sJX0Mnn/A==",
"dev": true,
"engines": {
"node": ">=6.0.0"
}
},
"node_modules/@jridgewell/source-map": {
"version": "0.3.6",
"resolved": "https://registry.npmjs.org/@jridgewell/source-map/-/source-map-0.3.6.tgz",
"integrity": "sha512-1ZJTZebgqllO79ue2bm3rIGud/bOe0pP5BjSRCRxxYkEZS8STV7zN84UBbiYu7jy+eCKSnVIUgoWWE/tt+shMQ==",
"dev": true,
"dependencies": {
"@jridgewell/gen-mapping": "^0.3.5",
"@jridgewell/trace-mapping": "^0.3.25"
}
},
"node_modules/@jridgewell/sourcemap-codec": {
"version": "1.4.15",
"resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.4.15.tgz",
"integrity": "sha512-eF2rxCRulEKXHTRiDrDy6erMYWqNw4LPdQ8UQA4huuxaQsVeRPFl2oM8oDGxMFhJUWZf9McpLtJasDDZb/Bpeg==",
"dev": true
},
"node_modules/@jridgewell/trace-mapping": {
"version": "0.3.25",
"resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.25.tgz",
"integrity": "sha512-vNk6aEwybGtawWmy/PzwnGDOjCkLWSD2wqvjGGAgOAwCGWySYXfYoxt00IJkTF+8Lb57DwOb3Aa0o9CApepiYQ==",
"dev": true,
"dependencies": {
"@jridgewell/resolve-uri": "^3.1.0",
"@jridgewell/sourcemap-codec": "^1.4.14"
}
},
"node_modules/@jsdoc/salty": {
"version": "0.2.8",
"resolved": "https://registry.npmjs.org/@jsdoc/salty/-/salty-0.2.8.tgz",
"integrity": "sha512-5e+SFVavj1ORKlKaKr2BmTOekmXbelU7dC0cDkQLqag7xfuTPuGMUFx7KWJuv4bYZrTsoL2Z18VVCOKYxzoHcg==",
"dev": true,
"license": "Apache-2.0",
"dependencies": {
"lodash": "^4.17.21"
},
"engines": {
"node": ">=v12.0.0"
}
},
"node_modules/@jsonjoy.com/base64": {
"version": "1.1.2",
"resolved": "https://registry.npmjs.org/@jsonjoy.com/base64/-/base64-1.1.2.tgz",
"integrity": "sha512-q6XAnWQDIMA3+FTiOYajoYqySkO+JSat0ytXGSuRdq9uXE7o92gzuQwQM14xaCRlBLGq3v5miDGC4vkVTn54xA==",
"dev": true,
"license": "Apache-2.0",
"engines": {
"node": ">=10.0"
},
"funding": {
"type": "github",
"url": "https://github.com/sponsors/streamich"
},
"peerDependencies": {
"tslib": "2"
}
},
"node_modules/@jsonjoy.com/json-pack": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/@jsonjoy.com/json-pack/-/json-pack-1.1.0.tgz",
"integrity": "sha512-zlQONA+msXPPwHWZMKFVS78ewFczIll5lXiVPwFPCZUsrOKdxc2AvxU1HoNBmMRhqDZUR9HkC3UOm+6pME6Xsg==",
"dev": true,
"license": "Apache-2.0",
"dependencies": {
"@jsonjoy.com/base64": "^1.1.1",
"@jsonjoy.com/util": "^1.1.2",
"hyperdyperid": "^1.2.0",
"thingies": "^1.20.0"
},
"engines": {
"node": ">=10.0"
},
"funding": {
"type": "github",
"url": "https://github.com/sponsors/streamich"
},
"peerDependencies": {
"tslib": "2"
}
},
"node_modules/@jsonjoy.com/util": {
"version": "1.5.0",
"resolved": "https://registry.npmjs.org/@jsonjoy.com/util/-/util-1.5.0.tgz",
"integrity": "sha512-ojoNsrIuPI9g6o8UxhraZQSyF2ByJanAY4cTFbc8Mf2AXEF4aQRGY1dJxyJpuyav8r9FGflEt/Ff3u5Nt6YMPA==",
"dev": true,
"license": "Apache-2.0",
"engines": {
"node": ">=10.0"
},
"funding": {
"type": "github",
"url": "https://github.com/sponsors/streamich"
},
"peerDependencies": {
"tslib": "2"
}
},
"node_modules/@leichtgewicht/ip-codec": {
"version": "2.0.5",
"resolved": "https://registry.npmjs.org/@leichtgewicht/ip-codec/-/ip-codec-2.0.5.tgz",
"integrity": "sha512-Vo+PSpZG2/fmgmiNzYK9qWRh8h/CHrwD0mo1h1DzL4yzHNSfWYujGTYsWGreD000gcgmZ7K4Ys6Tx9TxtsKdDw==",
"dev": true,
"license": "MIT"
},
"node_modules/@nodelib/fs.scandir": {
"version": "2.1.5",
"resolved": "https://registry.npmjs.org/@nodelib/fs.scandir/-/fs.scandir-2.1.5.tgz",
"integrity": "sha512-vq24Bq3ym5HEQm2NKCr3yXDwjc7vTsEThRDnkp2DK9p1uqLR+DHurm/NOTo0KG7HYHU7eppKZj3MyqYuMBf62g==",
"dev": true,
"license": "MIT",
"dependencies": {
"@nodelib/fs.stat": "2.0.5",
"run-parallel": "^1.1.9"
},
"engines": {
"node": ">= 8"
}
},
"node_modules/@nodelib/fs.stat": {
"version": "2.0.5",
"resolved": "https://registry.npmjs.org/@nodelib/fs.stat/-/fs.stat-2.0.5.tgz",
"integrity": "sha512-RkhPPp2zrqDAQA/2jNhnztcPAlv64XdhIp7a7454A5ovI7Bukxgt7MX7udwAu3zg1DcpPU0rz3VV1SeaqvY4+A==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">= 8"
}
},
"node_modules/@nodelib/fs.walk": {
"version": "1.2.8",
"resolved": "https://registry.npmjs.org/@nodelib/fs.walk/-/fs.walk-1.2.8.tgz",
"integrity": "sha512-oGB+UxlgWcgQkgwo8GcEGwemoTFt3FIO9ababBmaGwXIoBKZ+GTy0pP185beGg7Llih/NSHSV2XAs1lnznocSg==",
"dev": true,
"license": "MIT",
"dependencies": {
"@nodelib/fs.scandir": "2.1.5",
"fastq": "^1.6.0"
},
"engines": {
"node": ">= 8"
}
},
"node_modules/@pkgjs/parseargs": {
"version": "0.11.0",
"resolved": "https://registry.npmjs.org/@pkgjs/parseargs/-/parseargs-0.11.0.tgz",
"integrity": "sha512-+1VkjdD0QBLPodGrJUeqarH8VAIvQODIbwh9XpP5Syisf7YoQgsJKPNFoqqLQlu+VQ/tVSshMR6loPMn8U+dPg==",
"optional": true,
"engines": {
"node": ">=14"
}
},
"node_modules/@pkgr/core": {
"version": "0.1.1",
"resolved": "https://registry.npmjs.org/@pkgr/core/-/core-0.1.1.tgz",
"integrity": "sha512-cq8o4cWH0ibXh9VGi5P20Tu9XF/0fFXl9EUinr9QfTM7a7p0oTA4iJRCQWppXR1Pg8dSM0UCItCkPwsk9qWWYA==",
"dev": true,
"license": "MIT",
"engines": {
"node": "^12.20.0 || ^14.18.0 || >=16.0.0"
},
"funding": {
"url": "https://opencollective.com/unts"
}
},
"node_modules/@protobufjs/aspromise": {
"version": "1.1.2",
"resolved": "https://registry.npmjs.org/@protobufjs/aspromise/-/aspromise-1.1.2.tgz",
"integrity": "sha512-j+gKExEuLmKwvz3OgROXtrJ2UG2x8Ch2YZUxahh+s1F2HZ+wAceUNLkvy6zKCPVRkU++ZWQrdxsUeQXmcg4uoQ=="
},
"node_modules/@protobufjs/base64": {
"version": "1.1.2",
"resolved": "https://registry.npmjs.org/@protobufjs/base64/-/base64-1.1.2.tgz",
"integrity": "sha512-AZkcAA5vnN/v4PDqKyMR5lx7hZttPDgClv83E//FMNhR2TMcLUhfRUBHCmSl0oi9zMgDDqRUJkSxO3wm85+XLg=="
},
"node_modules/@protobufjs/codegen": {
"version": "2.0.4",
"resolved": "https://registry.npmjs.org/@protobufjs/codegen/-/codegen-2.0.4.tgz",
"integrity": "sha512-YyFaikqM5sH0ziFZCN3xDC7zeGaB/d0IUb9CATugHWbd1FRFwWwt4ld4OYMPWu5a3Xe01mGAULCdqhMlPl29Jg=="
},
"node_modules/@protobufjs/eventemitter": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/@protobufjs/eventemitter/-/eventemitter-1.1.0.tgz",
"integrity": "sha512-j9ednRT81vYJ9OfVuXG6ERSTdEL1xVsNgqpkxMsbIabzSo3goCjDIveeGv5d03om39ML71RdmrGNjG5SReBP/Q=="
},
"node_modules/@protobufjs/fetch": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/@protobufjs/fetch/-/fetch-1.1.0.tgz",
"integrity": "sha512-lljVXpqXebpsijW71PZaCYeIcE5on1w5DlQy5WH6GLbFryLUrBD4932W/E2BSpfRJWseIL4v/KPgBFxDOIdKpQ==",
"dependencies": {
"@protobufjs/aspromise": "^1.1.1",
"@protobufjs/inquire": "^1.1.0"
}
},
"node_modules/@protobufjs/float": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/@protobufjs/float/-/float-1.0.2.tgz",
"integrity": "sha512-Ddb+kVXlXst9d+R9PfTIxh1EdNkgoRe5tOX6t01f1lYWOvJnSPDBlG241QLzcyPdoNTsblLUdujGSE4RzrTZGQ=="
},
"node_modules/@protobufjs/inquire": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/@protobufjs/inquire/-/inquire-1.1.0.tgz",
"integrity": "sha512-kdSefcPdruJiFMVSbn801t4vFK7KB/5gd2fYvrxhuJYg8ILrmn9SKSX2tZdV6V+ksulWqS7aXjBcRXl3wHoD9Q=="
},
"node_modules/@protobufjs/path": {
"version": "1.1.2",
"resolved": "https://registry.npmjs.org/@protobufjs/path/-/path-1.1.2.tgz",
"integrity": "sha512-6JOcJ5Tm08dOHAbdR3GrvP+yUUfkjG5ePsHYczMFLq3ZmMkAD98cDgcT2iA1lJ9NVwFd4tH/iSSoe44YWkltEA=="
},
"node_modules/@protobufjs/pool": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/@protobufjs/pool/-/pool-1.1.0.tgz",
"integrity": "sha512-0kELaGSIDBKvcgS4zkjz1PeddatrjYcmMWOlAuAPwAeccUrPHdUqo/J6LiymHHEiJT5NrF1UVwxY14f+fy4WQw=="
},
"node_modules/@protobufjs/utf8": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/@protobufjs/utf8/-/utf8-1.1.0.tgz",
"integrity": "sha512-Vvn3zZrhQZkkBE8LSuW3em98c0FwgO4nxzv6OdSxPKJIEKY2bGbHn+mhGIPerzI4twdxaP8/0+06HBpwf345Lw=="
},
"node_modules/@sinclair/typebox": {
"version": "0.27.8",
"resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.27.8.tgz",
"integrity": "sha512-+Fj43pSMwJs4KRrH/938Uf+uAELIgVBmQzg/q1YG10djyfA3TnrU8N8XzqCh/okZdszqBQTZf96idMfE5lnwTA==",
"dev": true
},
"node_modules/@sinonjs/commons": {
"version": "3.0.1",
"resolved": "https://registry.npmjs.org/@sinonjs/commons/-/commons-3.0.1.tgz",
"integrity": "sha512-K3mCHKQ9sVh8o1C9cxkwxaOmXoAMlDxC1mYyHrjqOWEcBjYr76t96zL2zlj5dUGZ3HSw240X1qgH3Mjf1yJWpQ==",
"dev": true,
"license": "BSD-3-Clause",
"dependencies": {
"type-detect": "4.0.8"
}
},
"node_modules/@sinonjs/fake-timers": {
"version": "11.3.1",
"resolved": "https://registry.npmjs.org/@sinonjs/fake-timers/-/fake-timers-11.3.1.tgz",
"integrity": "sha512-EVJO7nW5M/F5Tur0Rf2z/QoMo+1Ia963RiMtapiQrEWvY0iBUvADo8Beegwjpnle5BHkyHuoxSTW3jF43H1XRA==",
"dev": true,
"license": "BSD-3-Clause",
"dependencies": {
"@sinonjs/commons": "^3.0.1"
}
},
"node_modules/@types/babel__core": {
"version": "7.20.5",
"resolved": "https://registry.npmjs.org/@types/babel__core/-/babel__core-7.20.5.tgz",
"integrity": "sha512-qoQprZvz5wQFJwMDqeseRXWv3rqMvhgpbXFfVyWhbx9X47POIA6i/+dXefEmZKoAgOaTdaIgNSMqMIU61yRyzA==",
"dev": true,
"license": "MIT",
"dependencies": {
"@babel/parser": "^7.20.7",
"@babel/types": "^7.20.7",
"@types/babel__generator": "*",
"@types/babel__template": "*",
"@types/babel__traverse": "*"
}
},
"node_modules/@types/babel__generator": {
"version": "7.6.8",
"resolved": "https://registry.npmjs.org/@types/babel__generator/-/babel__generator-7.6.8.tgz",
"integrity": "sha512-ASsj+tpEDsEiFr1arWrlN6V3mdfjRMZt6LtK/Vp/kreFLnr5QH5+DhvD5nINYZXzwJvXeGq+05iUXcAzVrqWtw==",
"dev": true,
"license": "MIT",
"dependencies": {
"@babel/types": "^7.0.0"
}
},
"node_modules/@types/babel__template": {
"version": "7.4.4",
"resolved": "https://registry.npmjs.org/@types/babel__template/-/babel__template-7.4.4.tgz",
"integrity": "sha512-h/NUaSyG5EyxBIp8YRxo4RMe2/qQgvyowRwVMzhYhBCONbW8PUsg4lkFMrhgZhUe5z3L3MiLDuvyJ/CaPa2A8A==",
"dev": true,
"license": "MIT",
"dependencies": {
"@babel/parser": "^7.1.0",
"@babel/types": "^7.0.0"
}
},
"node_modules/@types/babel__traverse": {
"version": "7.20.6",
"resolved": "https://registry.npmjs.org/@types/babel__traverse/-/babel__traverse-7.20.6.tgz",
"integrity": "sha512-r1bzfrm0tomOI8g1SzvCaQHo6Lcv6zu0EA+W2kHrt8dyrHQxGzBBL4kdkzIS+jBMV+EYcMAEAqXqYaLJq5rOZg==",
"dev": true,
"license": "MIT",
"dependencies": {
"@babel/types": "^7.20.7"
}
},
"node_modules/@types/body-parser": {
"version": "1.19.2",
"resolved": "https://registry.npmjs.org/@types/body-parser/-/body-parser-1.19.2.tgz",
"integrity": "sha512-ALYone6pm6QmwZoAgeyNksccT9Q4AWZQ6PvfwR37GT6r6FWUPguq6sUmNGSMV2Wr761oQoBxwGGa6DR5o1DC9g==",
"dev": true,
"dependencies": {
"@types/connect": "*",
"@types/node": "*"
}
},
"node_modules/@types/bonjour": {
"version": "3.5.13",
"resolved": "https://registry.npmjs.org/@types/bonjour/-/bonjour-3.5.13.tgz",
"integrity": "sha512-z9fJ5Im06zvUL548KvYNecEVlA7cVDkGUi6kZusb04mpyEFKCIZJvloCcmpmLaIahDpOQGHaHmG6imtPMmPXGQ==",
"dev": true,
"license": "MIT",
"dependencies": {
"@types/node": "*"
}
},
"node_modules/@types/connect": {
"version": "3.4.35",
"resolved": "https://registry.npmjs.org/@types/connect/-/connect-3.4.35.tgz",
"integrity": "sha512-cdeYyv4KWoEgpBISTxWvqYsVy444DOqehiF3fM3ne10AmJ62RSyNkUnxMJXHQWRQQX2eR94m5y1IZyDwBjV9FQ==",
"dev": true,
"dependencies": {
"@types/node": "*"
}
},
"node_modules/@types/connect-history-api-fallback": {
"version": "1.5.4",
"resolved": "https://registry.npmjs.org/@types/connect-history-api-fallback/-/connect-history-api-fallback-1.5.4.tgz",
"integrity": "sha512-n6Cr2xS1h4uAulPRdlw6Jl6s1oG8KrVilPN2yUITEs+K48EzMJJ3W1xy8K5eWuFvjp3R74AOIGSmp2UfBJ8HFw==",
"dev": true,
"license": "MIT",
"dependencies": {
"@types/express-serve-static-core": "*",
"@types/node": "*"
}
},
"node_modules/@types/eslint": {
"version": "9.6.1",
"resolved": "https://registry.npmjs.org/@types/eslint/-/eslint-9.6.1.tgz",
"integrity": "sha512-FXx2pKgId/WyYo2jXw63kk7/+TY7u7AziEJxJAnSFzHlqTAS3Ync6SvgYAN/k4/PQpnnVuzoMuVnByKK2qp0ag==",
"dev": true,
"license": "MIT",
"dependencies": {
"@types/estree": "*",
"@types/json-schema": "*"
}
},
"node_modules/@types/eslint-scope": {
"version": "3.7.7",
"resolved": "https://registry.npmjs.org/@types/eslint-scope/-/eslint-scope-3.7.7.tgz",
"integrity": "sha512-MzMFlSLBqNF2gcHWO0G1vP/YQyfvrxZ0bF+u7mzUdZ1/xK4A4sru+nraZz5i3iEIk1l1uyicaDVTB4QbbEkAYg==",
"dev": true,
"license": "MIT",
"dependencies": {
"@types/eslint": "*",
"@types/estree": "*"
}
},
"node_modules/@types/estree": {
"version": "1.0.6",
"resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.6.tgz",
"integrity": "sha512-AYnb1nQyY49te+VRAVgmzfcgjYS91mY5P0TKUDCLEM+gNnA+3T6rWITXRLYCpahpqSQbN5cE+gHpnPyXjHWxcw==",
"dev": true,
"license": "MIT"
},
"node_modules/@types/express": {
"version": "4.17.21",
"resolved": "https://registry.npmjs.org/@types/express/-/express-4.17.21.tgz",
"integrity": "sha512-ejlPM315qwLpaQlQDTjPdsUFSc6ZsP4AN6AlWnogPjQ7CVi7PYF3YVz+CY3jE2pwYf7E/7HlDAN0rV2GxTG0HQ==",
"dev": true,
"license": "MIT",
"dependencies": {
"@types/body-parser": "*",
"@types/express-serve-static-core": "^4.17.33",
"@types/qs": "*",
"@types/serve-static": "*"
}
},
"node_modules/@types/express-serve-static-core": {
"version": "4.17.33",
"resolved": "https://registry.npmjs.org/@types/express-serve-static-core/-/express-serve-static-core-4.17.33.tgz",
"integrity": "sha512-TPBqmR/HRYI3eC2E5hmiivIzv+bidAfXofM+sbonAGvyDhySGw9/PQZFt2BLOrjUUR++4eJVpx6KnLQK1Fk9tA==",
"dev": true,
"dependencies": {
"@types/node": "*",
"@types/qs": "*",
"@types/range-parser": "*"
}
},
"node_modules/@types/http-errors": {
"version": "2.0.4",
"resolved": "https://registry.npmjs.org/@types/http-errors/-/http-errors-2.0.4.tgz",
"integrity": "sha512-D0CFMMtydbJAegzOyHjtiKPLlvnm3iTZyZRSZoLq2mRhDdmLfIWOCYPfQJ4cu2erKghU++QvjcUjp/5h7hESpA==",
"dev": true,
"license": "MIT"
},
"node_modules/@types/http-proxy": {
"version": "1.17.10",
"resolved": "https://registry.npmjs.org/@types/http-proxy/-/http-proxy-1.17.10.tgz",
"integrity": "sha512-Qs5aULi+zV1bwKAg5z1PWnDXWmsn+LxIvUGv6E2+OOMYhclZMO+OXd9pYVf2gLykf2I7IV2u7oTHwChPNsvJ7g==",
"dev": true,
"dependencies": {
"@types/node": "*"
}
},
"node_modules/@types/istanbul-lib-coverage": {
"version": "2.0.4",
"resolved": "https://registry.npmjs.org/@types/istanbul-lib-coverage/-/istanbul-lib-coverage-2.0.4.tgz",
"integrity": "sha512-z/QT1XN4K4KYuslS23k62yDIDLwLFkzxOuMplDtObz0+y7VqJCaO2o+SPwHCvLFZh7xazvvoor2tA/hPz9ee7g==",
"dev": true
},
"node_modules/@types/istanbul-lib-report": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/@types/istanbul-lib-report/-/istanbul-lib-report-3.0.0.tgz",
"integrity": "sha512-plGgXAPfVKFoYfa9NpYDAkseG+g6Jr294RqeqcqDixSbU34MZVJRi/P+7Y8GDpzkEwLaGZZOpKIEmeVZNtKsrg==",
"dev": true,
"dependencies": {
"@types/istanbul-lib-coverage": "*"
}
},
"node_modules/@types/istanbul-reports": {
"version": "3.0.1",
"resolved": "https://registry.npmjs.org/@types/istanbul-reports/-/istanbul-reports-3.0.1.tgz",
"integrity": "sha512-c3mAZEuK0lvBp8tmuL74XRKn1+y2dcwOUpH7x4WrF6gk1GIgiluDRgMYQtw2OFcBvAJWlt6ASU3tSqxp0Uu0Aw==",
"dev": true,
"dependencies": {
"@types/istanbul-lib-report": "*"
}
},
"node_modules/@types/jest": {
"version": "29.5.14",
"resolved": "https://registry.npmjs.org/@types/jest/-/jest-29.5.14.tgz",
"integrity": "sha512-ZN+4sdnLUbo8EVvVc2ao0GFW6oVrQRPn4K2lglySj7APvSrgzxHiNNK99us4WDMi57xxA2yggblIAMNhXOotLQ==",
"dev": true,
"license": "MIT",
"dependencies": {
"expect": "^29.0.0",
"pretty-format": "^29.0.0"
}
},
"node_modules/@types/json-schema": {
"version": "7.0.11",
"resolved": "https://registry.npmjs.org/@types/json-schema/-/json-schema-7.0.11.tgz",
"integrity": "sha512-wOuvG1SN4Us4rez+tylwwwCV1psiNVOkJeM3AUWUNWg/jDQY2+HE/444y5gc+jBmRqASOm2Oeh5c1axHobwRKQ==",
"dev": true
},
"node_modules/@types/linkify-it": {
"version": "5.0.0",
"resolved": "https://registry.npmjs.org/@types/linkify-it/-/linkify-it-5.0.0.tgz",
"integrity": "sha512-sVDA58zAw4eWAffKOaQH5/5j3XeayukzDk+ewSsnv3p4yJEZHCCzMDiZM8e0OUrRvmpGZ85jf4yDHkHsgBNr9Q==",
"dev": true,
"license": "MIT"
},
"node_modules/@types/markdown-it": {
"version": "14.1.2",
"resolved": "https://registry.npmjs.org/@types/markdown-it/-/markdown-it-14.1.2.tgz",
"integrity": "sha512-promo4eFwuiW+TfGxhi+0x3czqTYJkG8qB17ZUJiVF10Xm7NLVRSLUsfRTU/6h1e24VvRnXCx+hG7li58lkzog==",
"dev": true,
"license": "MIT",
"dependencies": {
"@types/linkify-it": "^5",
"@types/mdurl": "^2"
}
},
"node_modules/@types/mdurl": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/@types/mdurl/-/mdurl-2.0.0.tgz",
"integrity": "sha512-RGdgjQUZba5p6QEFAVx2OGb8rQDL/cPRG7GiedRzMcJ1tYnUANBncjbSB1NRGwbvjcPeikRABz2nshyPk1bhWg==",
"dev": true,
"license": "MIT"
},
"node_modules/@types/mime": {
"version": "1.3.5",
"resolved": "https://registry.npmjs.org/@types/mime/-/mime-1.3.5.tgz",
"integrity": "sha512-/pyBZWSLD2n0dcHE3hq8s8ZvcETHtEuF+3E7XVt0Ig2nvsVQXdghHVcEkIWjy9A0wKfTn97a/PSDYohKIlnP/w==",
"dev": true,
"license": "MIT"
},
"node_modules/@types/node": {
"version": "22.10.1",
"resolved": "https://registry.npmjs.org/@types/node/-/node-22.10.1.tgz",
"integrity": "sha512-qKgsUwfHZV2WCWLAnVP1JqnpE6Im6h3Y0+fYgMTasNQ7V++CBX5OT1as0g0f+OyubbFqhf6XVNIsmN4IIhEgGQ==",
"license": "MIT",
"dependencies": {
"undici-types": "~6.20.0"
}
},
"node_modules/@types/node-forge": {
"version": "1.3.11",
"resolved": "https://registry.npmjs.org/@types/node-forge/-/node-forge-1.3.11.tgz",
"integrity": "sha512-FQx220y22OKNTqaByeBGqHWYz4cl94tpcxeFdvBo3wjG6XPBuZ0BNgNZRV5J5TFmmcsJ4IzsLkmGRiQbnYsBEQ==",
"dev": true,
"license": "MIT",
"dependencies": {
"@types/node": "*"
}
},
"node_modules/@types/qs": {
"version": "6.9.7",
"resolved": "https://registry.npmjs.org/@types/qs/-/qs-6.9.7.tgz",
"integrity": "sha512-FGa1F62FT09qcrueBA6qYTrJPVDzah9a+493+o2PCXsesWHIn27G98TsSMs3WPNbZIEj4+VJf6saSFpvD+3Zsw==",
"dev": true
},
"node_modules/@types/range-parser": {
"version": "1.2.4",
"resolved": "https://registry.npmjs.org/@types/range-parser/-/range-parser-1.2.4.tgz",
"integrity": "sha512-EEhsLsD6UsDM1yFhAvy0Cjr6VwmpMWqFBCb9w07wVugF7w9nfajxLuVmngTIpgS6svCnm6Vaw+MZhoDCKnOfsw==",
"dev": true
},
"node_modules/@types/retry": {
"version": "0.12.2",
"resolved": "https://registry.npmjs.org/@types/retry/-/retry-0.12.2.tgz",
"integrity": "sha512-XISRgDJ2Tc5q4TRqvgJtzsRkFYNJzZrhTdtMoGVBttwzzQJkPnS3WWTFc7kuDRoPtPakl+T+OfdEUjYJj7Jbow==",
"dev": true,
"license": "MIT"
},
"node_modules/@types/send": {
"version": "0.17.4",
"resolved": "https://registry.npmjs.org/@types/send/-/send-0.17.4.tgz",
"integrity": "sha512-x2EM6TJOybec7c52BX0ZspPodMsQUd5L6PRwOunVyVUhXiBSKf3AezDL8Dgvgt5o0UfKNfuA0eMLr2wLT4AiBA==",
"dev": true,
"license": "MIT",
"dependencies": {
"@types/mime": "^1",
"@types/node": "*"
}
},
"node_modules/@types/serve-index": {
"version": "1.9.4",
"resolved": "https://registry.npmjs.org/@types/serve-index/-/serve-index-1.9.4.tgz",
"integrity": "sha512-qLpGZ/c2fhSs5gnYsQxtDEq3Oy8SXPClIXkW5ghvAvsNuVSA8k+gCONcUCS/UjLEYvYps+e8uBtfgXgvhwfNug==",
"dev": true,
"license": "MIT",
"dependencies": {
"@types/express": "*"
}
},
"node_modules/@types/serve-static": {
"version": "1.15.7",
"resolved": "https://registry.npmjs.org/@types/serve-static/-/serve-static-1.15.7.tgz",
"integrity": "sha512-W8Ym+h8nhuRwaKPaDw34QUkwsGi6Rc4yYqvKFo5rm2FUEhCFbzVWrxXUxuKK8TASjWsysJY0nsmNCGhCOIsrOw==",
"dev": true,
"license": "MIT",
"dependencies": {
"@types/http-errors": "*",
"@types/node": "*",
"@types/send": "*"
}
},
"node_modules/@types/sockjs": {
"version": "0.3.36",
"resolved": "https://registry.npmjs.org/@types/sockjs/-/sockjs-0.3.36.tgz",
"integrity": "sha512-MK9V6NzAS1+Ud7JV9lJLFqW85VbC9dq3LmwZCuBe4wBDgKC0Kj/jd8Xl+nSviU+Qc3+m7umHHyHg//2KSa0a0Q==",
"dev": true,
"license": "MIT",
"dependencies": {
"@types/node": "*"
}
},
"node_modules/@types/stack-utils": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/@types/stack-utils/-/stack-utils-2.0.1.tgz",
"integrity": "sha512-Hl219/BT5fLAaz6NDkSuhzasy49dwQS/DSdu4MdggFB8zcXv7vflBI3xp7FEmkmdDkBUI2bPUNeMttp2knYdxw==",
"dev": true
},
"node_modules/@types/ws": {
"version": "8.5.13",
"resolved": "https://registry.npmjs.org/@types/ws/-/ws-8.5.13.tgz",
"integrity": "sha512-osM/gWBTPKgHV8XkTunnegTRIsvF6owmf5w+JtAfOw472dptdm0dlGv4xCt6GwQRcC2XVOvvRE/0bAoQcL2QkA==",
"dev": true,
"license": "MIT",
"dependencies": {
"@types/node": "*"
}
},
"node_modules/@types/yargs": {
"version": "17.0.24",
"resolved": "https://registry.npmjs.org/@types/yargs/-/yargs-17.0.24.tgz",
"integrity": "sha512-6i0aC7jV6QzQB8ne1joVZ0eSFIstHsCrobmOtghM11yGlH0j43FKL2UhWdELkyps0zuf7qVTUVCCR+tgSlyLLw==",
"dev": true,
"dependencies": {
"@types/yargs-parser": "*"
}
},
"node_modules/@types/yargs-parser": {
"version": "21.0.0",
"resolved": "https://registry.npmjs.org/@types/yargs-parser/-/yargs-parser-21.0.0.tgz",
"integrity": "sha512-iO9ZQHkZxHn4mSakYV0vFHAVDyEOIJQrV2uZ06HxEPcx+mt8swXoZHIbaaJ2crJYFfErySgktuTZ3BeLz+XmFA==",
"dev": true
},
"node_modules/@ungap/structured-clone": {
"version": "1.2.0",
"resolved": "https://registry.npmjs.org/@ungap/structured-clone/-/structured-clone-1.2.0.tgz",
"integrity": "sha512-zuVdFrMJiuCDQUMCzQaD6KL28MjnqqN8XnAqiEq9PNm/hCPTSGfrXCOfwj1ow4LFb/tNymJPwsNbVePc1xFqrQ==",
"dev": true,
"license": "ISC"
},
"node_modules/@webassemblyjs/ast": {
"version": "1.14.1",
"resolved": "https://registry.npmjs.org/@webassemblyjs/ast/-/ast-1.14.1.tgz",
"integrity": "sha512-nuBEDgQfm1ccRp/8bCQrx1frohyufl4JlbMMZ4P1wpeOfDhF6FQkxZJ1b/e+PLwr6X1Nhw6OLme5usuBWYBvuQ==",
"dev": true,
"license": "MIT",
"dependencies": {
"@webassemblyjs/helper-numbers": "1.13.2",
"@webassemblyjs/helper-wasm-bytecode": "1.13.2"
}
},
"node_modules/@webassemblyjs/floating-point-hex-parser": {
"version": "1.13.2",
"resolved": "https://registry.npmjs.org/@webassemblyjs/floating-point-hex-parser/-/floating-point-hex-parser-1.13.2.tgz",
"integrity": "sha512-6oXyTOzbKxGH4steLbLNOu71Oj+C8Lg34n6CqRvqfS2O71BxY6ByfMDRhBytzknj9yGUPVJ1qIKhRlAwO1AovA==",
"dev": true,
"license": "MIT"
},
"node_modules/@webassemblyjs/helper-api-error": {
"version": "1.13.2",
"resolved": "https://registry.npmjs.org/@webassemblyjs/helper-api-error/-/helper-api-error-1.13.2.tgz",
"integrity": "sha512-U56GMYxy4ZQCbDZd6JuvvNV/WFildOjsaWD3Tzzvmw/mas3cXzRJPMjP83JqEsgSbyrmaGjBfDtV7KDXV9UzFQ==",
"dev": true,
"license": "MIT"
},
"node_modules/@webassemblyjs/helper-buffer": {
"version": "1.14.1",
"resolved": "https://registry.npmjs.org/@webassemblyjs/helper-buffer/-/helper-buffer-1.14.1.tgz",
"integrity": "sha512-jyH7wtcHiKssDtFPRB+iQdxlDf96m0E39yb0k5uJVhFGleZFoNw1c4aeIcVUPPbXUVJ94wwnMOAqUHyzoEPVMA==",
"dev": true,
"license": "MIT"
},
"node_modules/@webassemblyjs/helper-numbers": {
"version": "1.13.2",
"resolved": "https://registry.npmjs.org/@webassemblyjs/helper-numbers/-/helper-numbers-1.13.2.tgz",
"integrity": "sha512-FE8aCmS5Q6eQYcV3gI35O4J789wlQA+7JrqTTpJqn5emA4U2hvwJmvFRC0HODS+3Ye6WioDklgd6scJ3+PLnEA==",
"dev": true,
"license": "MIT",
"dependencies": {
"@webassemblyjs/floating-point-hex-parser": "1.13.2",
"@webassemblyjs/helper-api-error": "1.13.2",
"@xtuc/long": "4.2.2"
}
},
"node_modules/@webassemblyjs/helper-wasm-bytecode": {
"version": "1.13.2",
"resolved": "https://registry.npmjs.org/@webassemblyjs/helper-wasm-bytecode/-/helper-wasm-bytecode-1.13.2.tgz",
"integrity": "sha512-3QbLKy93F0EAIXLh0ogEVR6rOubA9AoZ+WRYhNbFyuB70j3dRdwH9g+qXhLAO0kiYGlg3TxDV+I4rQTr/YNXkA==",
"dev": true,
"license": "MIT"
},
"node_modules/@webassemblyjs/helper-wasm-section": {
"version": "1.14.1",
"resolved": "https://registry.npmjs.org/@webassemblyjs/helper-wasm-section/-/helper-wasm-section-1.14.1.tgz",
"integrity": "sha512-ds5mXEqTJ6oxRoqjhWDU83OgzAYjwsCV8Lo/N+oRsNDmx/ZDpqalmrtgOMkHwxsG0iI//3BwWAErYRHtgn0dZw==",
"dev": true,
"license": "MIT",
"dependencies": {
"@webassemblyjs/ast": "1.14.1",
"@webassemblyjs/helper-buffer": "1.14.1",
"@webassemblyjs/helper-wasm-bytecode": "1.13.2",
"@webassemblyjs/wasm-gen": "1.14.1"
}
},
"node_modules/@webassemblyjs/ieee754": {
"version": "1.13.2",
"resolved": "https://registry.npmjs.org/@webassemblyjs/ieee754/-/ieee754-1.13.2.tgz",
"integrity": "sha512-4LtOzh58S/5lX4ITKxnAK2USuNEvpdVV9AlgGQb8rJDHaLeHciwG4zlGr0j/SNWlr7x3vO1lDEsuePvtcDNCkw==",
"dev": true,
"license": "MIT",
"dependencies": {
"@xtuc/ieee754": "^1.2.0"
}
},
"node_modules/@webassemblyjs/leb128": {
"version": "1.13.2",
"resolved": "https://registry.npmjs.org/@webassemblyjs/leb128/-/leb128-1.13.2.tgz",
"integrity": "sha512-Lde1oNoIdzVzdkNEAWZ1dZ5orIbff80YPdHx20mrHwHrVNNTjNr8E3xz9BdpcGqRQbAEa+fkrCb+fRFTl/6sQw==",
"dev": true,
"license": "Apache-2.0",
"dependencies": {
"@xtuc/long": "4.2.2"
}
},
"node_modules/@webassemblyjs/utf8": {
"version": "1.13.2",
"resolved": "https://registry.npmjs.org/@webassemblyjs/utf8/-/utf8-1.13.2.tgz",
"integrity": "sha512-3NQWGjKTASY1xV5m7Hr0iPeXD9+RDobLll3T9d2AO+g3my8xy5peVyjSag4I50mR1bBSN/Ct12lo+R9tJk0NZQ==",
"dev": true,
"license": "MIT"
},
"node_modules/@webassemblyjs/wasm-edit": {
"version": "1.14.1",
"resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-edit/-/wasm-edit-1.14.1.tgz",
"integrity": "sha512-RNJUIQH/J8iA/1NzlE4N7KtyZNHi3w7at7hDjvRNm5rcUXa00z1vRz3glZoULfJ5mpvYhLybmVcwcjGrC1pRrQ==",
"dev": true,
"license": "MIT",
"dependencies": {
"@webassemblyjs/ast": "1.14.1",
"@webassemblyjs/helper-buffer": "1.14.1",
"@webassemblyjs/helper-wasm-bytecode": "1.13.2",
"@webassemblyjs/helper-wasm-section": "1.14.1",
"@webassemblyjs/wasm-gen": "1.14.1",
"@webassemblyjs/wasm-opt": "1.14.1",
"@webassemblyjs/wasm-parser": "1.14.1",
"@webassemblyjs/wast-printer": "1.14.1"
}
},
"node_modules/@webassemblyjs/wasm-gen": {
"version": "1.14.1",
"resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-gen/-/wasm-gen-1.14.1.tgz",
"integrity": "sha512-AmomSIjP8ZbfGQhumkNvgC33AY7qtMCXnN6bL2u2Js4gVCg8fp735aEiMSBbDR7UQIj90n4wKAFUSEd0QN2Ukg==",
"dev": true,
"license": "MIT",
"dependencies": {
"@webassemblyjs/ast": "1.14.1",
"@webassemblyjs/helper-wasm-bytecode": "1.13.2",
"@webassemblyjs/ieee754": "1.13.2",
"@webassemblyjs/leb128": "1.13.2",
"@webassemblyjs/utf8": "1.13.2"
}
},
"node_modules/@webassemblyjs/wasm-opt": {
"version": "1.14.1",
"resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-opt/-/wasm-opt-1.14.1.tgz",
"integrity": "sha512-PTcKLUNvBqnY2U6E5bdOQcSM+oVP/PmrDY9NzowJjislEjwP/C4an2303MCVS2Mg9d3AJpIGdUFIQQWbPds0Sw==",
"dev": true,
"license": "MIT",
"dependencies": {
"@webassemblyjs/ast": "1.14.1",
"@webassemblyjs/helper-buffer": "1.14.1",
"@webassemblyjs/wasm-gen": "1.14.1",
"@webassemblyjs/wasm-parser": "1.14.1"
}
},
"node_modules/@webassemblyjs/wasm-parser": {
"version": "1.14.1",
"resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-parser/-/wasm-parser-1.14.1.tgz",
"integrity": "sha512-JLBl+KZ0R5qB7mCnud/yyX08jWFw5MsoalJ1pQ4EdFlgj9VdXKGuENGsiCIjegI1W7p91rUlcB/LB5yRJKNTcQ==",
"dev": true,
"license": "MIT",
"dependencies": {
"@webassemblyjs/ast": "1.14.1",
"@webassemblyjs/helper-api-error": "1.13.2",
"@webassemblyjs/helper-wasm-bytecode": "1.13.2",
"@webassemblyjs/ieee754": "1.13.2",
"@webassemblyjs/leb128": "1.13.2",
"@webassemblyjs/utf8": "1.13.2"
}
},
"node_modules/@webassemblyjs/wast-printer": {
"version": "1.14.1",
"resolved": "https://registry.npmjs.org/@webassemblyjs/wast-printer/-/wast-printer-1.14.1.tgz",
"integrity": "sha512-kPSSXE6De1XOR820C90RIo2ogvZG+c3KiHzqUoO/F34Y2shGzesfqv7o57xrxovZJH/MetF5UjroJ/R/3isoiw==",
"dev": true,
"license": "MIT",
"dependencies": {
"@webassemblyjs/ast": "1.14.1",
"@xtuc/long": "4.2.2"
}
},
"node_modules/@webgpu/types": {
"version": "0.1.51",
"resolved": "https://registry.npmjs.org/@webgpu/types/-/types-0.1.51.tgz",
"integrity": "sha512-ktR3u64NPjwIViNCck+z9QeyN0iPkQCUOQ07ZCV1RzlkfP+olLTeEZ95O1QHS+v4w9vJeY9xj/uJuSphsHy5rQ==",
"dev": true,
"license": "BSD-3-Clause"
},
"node_modules/@webpack-cli/configtest": {
"version": "2.1.1",
"resolved": "https://registry.npmjs.org/@webpack-cli/configtest/-/configtest-2.1.1.tgz",
"integrity": "sha512-wy0mglZpDSiSS0XHrVR+BAdId2+yxPSoJW8fsna3ZpYSlufjvxnP4YbKTCBZnNIcGN4r6ZPXV55X4mYExOfLmw==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">=14.15.0"
},
"peerDependencies": {
"webpack": "5.x.x",
"webpack-cli": "5.x.x"
}
},
"node_modules/@webpack-cli/info": {
"version": "2.0.2",
"resolved": "https://registry.npmjs.org/@webpack-cli/info/-/info-2.0.2.tgz",
"integrity": "sha512-zLHQdI/Qs1UyT5UBdWNqsARasIA+AaF8t+4u2aS2nEpBQh2mWIVb8qAklq0eUENnC5mOItrIB4LiS9xMtph18A==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">=14.15.0"
},
"peerDependencies": {
"webpack": "5.x.x",
"webpack-cli": "5.x.x"
}
},
"node_modules/@webpack-cli/serve": {
"version": "2.0.5",
"resolved": "https://registry.npmjs.org/@webpack-cli/serve/-/serve-2.0.5.tgz",
"integrity": "sha512-lqaoKnRYBdo1UgDX8uF24AfGMifWK19TxPmM5FHc2vAGxrJ/qtyUyFBWoY1tISZdelsQ5fBcOusifo5o5wSJxQ==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">=14.15.0"
},
"peerDependencies": {
"webpack": "5.x.x",
"webpack-cli": "5.x.x"
},
"peerDependenciesMeta": {
"webpack-dev-server": {
"optional": true
}
}
},
"node_modules/@xtuc/ieee754": {
"version": "1.2.0",
"resolved": "https://registry.npmjs.org/@xtuc/ieee754/-/ieee754-1.2.0.tgz",
"integrity": "sha512-DX8nKgqcGwsc0eJSqYt5lwP4DH5FlHnmuWWBRy7X0NcaGR0ZtuyeESgMwTYVEtxmsNGY+qit4QYT/MIYTOTPeA==",
"dev": true,
"license": "BSD-3-Clause"
},
"node_modules/@xtuc/long": {
"version": "4.2.2",
"resolved": "https://registry.npmjs.org/@xtuc/long/-/long-4.2.2.tgz",
"integrity": "sha512-NuHqBY1PB/D8xU6s/thBgOAiAP7HOYDQ32+BFZILJ8ivkUkAHQnWfn6WhL79Owj1qmUnoN/YPhktdIoucipkAQ==",
"dev": true,
"license": "Apache-2.0"
},
"node_modules/accepts": {
"version": "1.3.8",
"resolved": "https://registry.npmjs.org/accepts/-/accepts-1.3.8.tgz",
"integrity": "sha512-PYAthTa2m2VKxuvSD3DPC/Gy+U+sOA1LAuT8mkmRuvw+NACSaeXEQ+NHcVF7rONl6qcaxV3Uuemwawk+7+SJLw==",
"dev": true,
"dependencies": {
"mime-types": "~2.1.34",
"negotiator": "0.6.3"
},
"engines": {
"node": ">= 0.6"
}
},
"node_modules/acorn": {
"version": "8.14.0",
"resolved": "https://registry.npmjs.org/acorn/-/acorn-8.14.0.tgz",
"integrity": "sha512-cl669nCJTZBsL97OF4kUQm5g5hC2uihk0NxY3WENAC0TYdILVkAyHymAntgxGkl7K+t0cXIrH5siy5S4XkFycA==",
"dev": true,
"license": "MIT",
"bin": {
"acorn": "bin/acorn"
},
"engines": {
"node": ">=0.4.0"
}
},
"node_modules/ajv": {
"version": "6.12.6",
"resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz",
"integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==",
"dev": true,
"dependencies": {
"fast-deep-equal": "^3.1.1",
"fast-json-stable-stringify": "^2.0.0",
"json-schema-traverse": "^0.4.1",
"uri-js": "^4.2.2"
},
"funding": {
"type": "github",
"url": "https://github.com/sponsors/epoberezkin"
}
},
"node_modules/ajv-formats": {
"version": "2.1.1",
"resolved": "https://registry.npmjs.org/ajv-formats/-/ajv-formats-2.1.1.tgz",
"integrity": "sha512-Wx0Kx52hxE7C18hkMEggYlEifqWZtYaRgouJor+WMdPnQyEK13vgEWyVNup7SoeeoLMsr4kf5h6dOW11I15MUA==",
"dev": true,
"license": "MIT",
"dependencies": {
"ajv": "^8.0.0"
},
"peerDependencies": {
"ajv": "^8.0.0"
},
"peerDependenciesMeta": {
"ajv": {
"optional": true
}
}
},
"node_modules/ajv-formats/node_modules/ajv": {
"version": "8.17.1",
"resolved": "https://registry.npmjs.org/ajv/-/ajv-8.17.1.tgz",
"integrity": "sha512-B/gBuNg5SiMTrPkC+A2+cW0RszwxYmn6VYxB/inlBStS5nx6xHIt/ehKRhIMhqusl7a8LjQoZnjCs5vhwxOQ1g==",
"dev": true,
"license": "MIT",
"dependencies": {
"fast-deep-equal": "^3.1.3",
"fast-uri": "^3.0.1",
"json-schema-traverse": "^1.0.0",
"require-from-string": "^2.0.2"
},
"funding": {
"type": "github",
"url": "https://github.com/sponsors/epoberezkin"
}
},
"node_modules/ajv-formats/node_modules/json-schema-traverse": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-1.0.0.tgz",
"integrity": "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==",
"dev": true,
"license": "MIT"
},
"node_modules/ajv-keywords": {
"version": "3.5.2",
"resolved": "https://registry.npmjs.org/ajv-keywords/-/ajv-keywords-3.5.2.tgz",
"integrity": "sha512-5p6WTN0DdTGVQk6VjcEju19IgaHudalcfabD7yhDGeA6bcQnmL+CpveLJq/3hvfwd1aof6L386Ougkx6RfyMIQ==",
"dev": true,
"peerDependencies": {
"ajv": "^6.9.1"
}
},
"node_modules/ansi-escapes": {
"version": "4.3.2",
"resolved": "https://registry.npmjs.org/ansi-escapes/-/ansi-escapes-4.3.2.tgz",
"integrity": "sha512-gKXj5ALrKWQLsYG9jlTRmR/xKluxHV+Z9QEwNIgCfM1/uwPMCuzVVnh5mwTd+OuBZcwSIMbqssNWRm1lE51QaQ==",
"dev": true,
"license": "MIT",
"dependencies": {
"type-fest": "^0.21.3"
},
"engines": {
"node": ">=8"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/ansi-html-community": {
"version": "0.0.8",
"resolved": "https://registry.npmjs.org/ansi-html-community/-/ansi-html-community-0.0.8.tgz",
"integrity": "sha512-1APHAyr3+PCamwNw3bXCPp4HFLONZt/yIH0sZp0/469KWNTEy+qN5jQ3GVX6DMZ1UXAi34yVwtTeaG/HpBuuzw==",
"dev": true,
"engines": [
"node >= 0.8.0"
],
"bin": {
"ansi-html": "bin/ansi-html"
}
},
"node_modules/ansi-regex": {
"version": "5.0.1",
"resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz",
"integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==",
"engines": {
"node": ">=8"
}
},
"node_modules/ansi-styles": {
"version": "4.3.0",
"resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz",
"integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==",
"dependencies": {
"color-convert": "^2.0.1"
},
"engines": {
"node": ">=8"
},
"funding": {
"url": "https://github.com/chalk/ansi-styles?sponsor=1"
}
},
"node_modules/anymatch": {
"version": "3.1.3",
"resolved": "https://registry.npmjs.org/anymatch/-/anymatch-3.1.3.tgz",
"integrity": "sha512-KMReFUr0B4t+D+OBkjR3KYqvocp2XaSzO55UcB6mgQMd3KbcE+mWTyvVV7D/zsdEbNnV6acZUutkiHQXvTr1Rw==",
"dev": true,
"dependencies": {
"normalize-path": "^3.0.0",
"picomatch": "^2.0.4"
},
"engines": {
"node": ">= 8"
}
},
"node_modules/argparse": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz",
"integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==",
"dev": true,
"license": "Python-2.0"
},
"node_modules/array-back": {
"version": "6.2.2",
"resolved": "https://registry.npmjs.org/array-back/-/array-back-6.2.2.tgz",
"integrity": "sha512-gUAZ7HPyb4SJczXAMUXMGAvI976JoK3qEx9v1FTmeYuJj0IBiaKttG1ydtGKdkfqWkIkouke7nG8ufGy77+Cvw==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">=12.17"
}
},
"node_modules/babel-jest": {
"version": "30.0.0-alpha.6",
"resolved": "https://registry.npmjs.org/babel-jest/-/babel-jest-30.0.0-alpha.6.tgz",
"integrity": "sha512-WOQkqpBz2q8d/AT6D6rZXW5xnKHDmk3kIukaXlzUyoBBIvLh1SEvi2RGS4fboEtS0kNkyL+zf1rSfkt5OCIgmw==",
"dev": true,
"license": "MIT",
"dependencies": {
"@jest/transform": "30.0.0-alpha.6",
"@types/babel__core": "^7.1.14",
"babel-plugin-istanbul": "^7.0.0",
"babel-preset-jest": "30.0.0-alpha.6",
"chalk": "^4.0.0",
"graceful-fs": "^4.2.9",
"slash": "^3.0.0"
},
"engines": {
"node": "^16.10.0 || ^18.12.0 || >=20.0.0"
},
"peerDependencies": {
"@babel/core": "^7.11.0"
}
},
"node_modules/babel-plugin-istanbul": {
"version": "7.0.0",
"resolved": "https://registry.npmjs.org/babel-plugin-istanbul/-/babel-plugin-istanbul-7.0.0.tgz",
"integrity": "sha512-C5OzENSx/A+gt7t4VH1I2XsflxyPUmXRFPKBxt33xncdOmq7oROVM3bZv9Ysjjkv8OJYDMa+tKuKMvqU/H3xdw==",
"dev": true,
"license": "BSD-3-Clause",
"dependencies": {
"@babel/helper-plugin-utils": "^7.0.0",
"@istanbuljs/load-nyc-config": "^1.0.0",
"@istanbuljs/schema": "^0.1.3",
"istanbul-lib-instrument": "^6.0.2",
"test-exclude": "^6.0.0"
},
"engines": {
"node": ">=12"
}
},
"node_modules/babel-plugin-jest-hoist": {
"version": "30.0.0-alpha.6",
"resolved": "https://registry.npmjs.org/babel-plugin-jest-hoist/-/babel-plugin-jest-hoist-30.0.0-alpha.6.tgz",
"integrity": "sha512-e/aPv0pmnvJqXM5SfCBpyMwZFEZrKW1Mb4unwTkxewk6/0TjwBk6l3B3F9H9OKZ3ErhkH4b+Epd3IIM5E53I2g==",
"dev": true,
"license": "MIT",
"dependencies": {
"@babel/template": "^7.3.3",
"@babel/types": "^7.3.3",
"@types/babel__core": "^7.1.14"
},
"engines": {
"node": "^16.10.0 || ^18.12.0 || >=20.0.0"
}
},
"node_modules/babel-preset-current-node-syntax": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/babel-preset-current-node-syntax/-/babel-preset-current-node-syntax-1.1.0.tgz",
"integrity": "sha512-ldYss8SbBlWva1bs28q78Ju5Zq1F+8BrqBZZ0VFhLBvhh6lCpC2o3gDJi/5DRLs9FgYZCnmPYIVFU4lRXCkyUw==",
"dev": true,
"license": "MIT",
"dependencies": {
"@babel/plugin-syntax-async-generators": "^7.8.4",
"@babel/plugin-syntax-bigint": "^7.8.3",
"@babel/plugin-syntax-class-properties": "^7.12.13",
"@babel/plugin-syntax-class-static-block": "^7.14.5",
"@babel/plugin-syntax-import-attributes": "^7.24.7",
"@babel/plugin-syntax-import-meta": "^7.10.4",
"@babel/plugin-syntax-json-strings": "^7.8.3",
"@babel/plugin-syntax-logical-assignment-operators": "^7.10.4",
"@babel/plugin-syntax-nullish-coalescing-operator": "^7.8.3",
"@babel/plugin-syntax-numeric-separator": "^7.10.4",
"@babel/plugin-syntax-object-rest-spread": "^7.8.3",
"@babel/plugin-syntax-optional-catch-binding": "^7.8.3",
"@babel/plugin-syntax-optional-chaining": "^7.8.3",
"@babel/plugin-syntax-private-property-in-object": "^7.14.5",
"@babel/plugin-syntax-top-level-await": "^7.14.5"
},
"peerDependencies": {
"@babel/core": "^7.0.0"
}
},
"node_modules/babel-preset-jest": {
"version": "30.0.0-alpha.6",
"resolved": "https://registry.npmjs.org/babel-preset-jest/-/babel-preset-jest-30.0.0-alpha.6.tgz",
"integrity": "sha512-Xsis7RI2oT2zlyCIEzMtjDiES0wKoQxTUo5BGzx1q3ZemnDE1/7xTC4/lI4eBLmAtwk/hpZLRYwltvbQEvyRWw==",
"dev": true,
"license": "MIT",
"dependencies": {
"babel-plugin-jest-hoist": "30.0.0-alpha.6",
"babel-preset-current-node-syntax": "^1.0.0"
},
"engines": {
"node": "^16.10.0 || ^18.12.0 || >=20.0.0"
},
"peerDependencies": {
"@babel/core": "^7.11.0"
}
},
"node_modules/balanced-match": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz",
"integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw=="
},
"node_modules/batch": {
"version": "0.6.1",
"resolved": "https://registry.npmjs.org/batch/-/batch-0.6.1.tgz",
"integrity": "sha512-x+VAiMRL6UPkx+kudNvxTl6hB2XNNCG2r+7wixVfIYwu/2HKRXimwQyaumLjMveWvT2Hkd/cAJw+QBMfJ/EKVw==",
"dev": true
},
"node_modules/binary-extensions": {
"version": "2.3.0",
"resolved": "https://registry.npmjs.org/binary-extensions/-/binary-extensions-2.3.0.tgz",
"integrity": "sha512-Ceh+7ox5qe7LJuLHoY0feh3pHuUDHAcRUeyL2VYghZwfpkNIy/+8Ocg0a3UuSoYzavmylwuLWQOf3hl0jjMMIw==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">=8"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/bluebird": {
"version": "3.7.2",
"resolved": "https://registry.npmjs.org/bluebird/-/bluebird-3.7.2.tgz",
"integrity": "sha512-XpNj6GDQzdfW+r2Wnn7xiSAd7TM3jzkxGXBGTtWKuSXv1xUV+azxAm8jdWZN06QTQk+2N2XB9jRDkvbmQmcRtg==",
"dev": true,
"license": "MIT"
},
"node_modules/body-parser": {
"version": "1.20.3",
"resolved": "https://registry.npmjs.org/body-parser/-/body-parser-1.20.3.tgz",
"integrity": "sha512-7rAxByjUMqQ3/bHJy7D6OGXvx/MMc4IqBn/X0fcM1QUcAItpZrBEYhWGem+tzXH90c+G01ypMcYJBO9Y30203g==",
"dev": true,
"dependencies": {
"bytes": "3.1.2",
"content-type": "~1.0.5",
"debug": "2.6.9",
"depd": "2.0.0",
"destroy": "1.2.0",
"http-errors": "2.0.0",
"iconv-lite": "0.4.24",
"on-finished": "2.4.1",
"qs": "6.13.0",
"raw-body": "2.5.2",
"type-is": "~1.6.18",
"unpipe": "1.0.0"
},
"engines": {
"node": ">= 0.8",
"npm": "1.2.8000 || >= 1.4.16"
}
},
"node_modules/bonjour-service": {
"version": "1.3.0",
"resolved": "https://registry.npmjs.org/bonjour-service/-/bonjour-service-1.3.0.tgz",
"integrity": "sha512-3YuAUiSkWykd+2Azjgyxei8OWf8thdn8AITIog2M4UICzoqfjlqr64WIjEXZllf/W6vK1goqleSR6brGomxQqA==",
"dev": true,
"license": "MIT",
"dependencies": {
"fast-deep-equal": "^3.1.3",
"multicast-dns": "^7.2.5"
}
},
"node_modules/boolean": {
"version": "3.2.0",
"resolved": "https://registry.npmjs.org/boolean/-/boolean-3.2.0.tgz",
"integrity": "sha512-d0II/GO9uf9lfUHH2BQsjxzRJZBdsjgsBiW4BvhWk/3qoKwQFjIDVN19PfX8F2D/r9PCMTtLWjYVCFrpeYUzsw==",
"deprecated": "Package no longer supported. Contact Support at https://www.npmjs.com/support for more info.",
"license": "MIT"
},
"node_modules/brace-expansion": {
"version": "1.1.12",
"resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.12.tgz",
"integrity": "sha512-9T9UjW3r0UW5c1Q7GTwllptXwhvYmEzFhzMfZ9H7FQWt+uZePjZPjBP/W1ZEyZ1twGWom5/56TF4lPcqjnDHcg==",
"dev": true,
"license": "MIT",
"dependencies": {
"balanced-match": "^1.0.0",
"concat-map": "0.0.1"
}
},
"node_modules/braces": {
"version": "3.0.3",
"resolved": "https://registry.npmjs.org/braces/-/braces-3.0.3.tgz",
"integrity": "sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA==",
"dev": true,
"dependencies": {
"fill-range": "^7.1.1"
},
"engines": {
"node": ">=8"
}
},
"node_modules/browserslist": {
"version": "4.24.2",
"resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.24.2.tgz",
"integrity": "sha512-ZIc+Q62revdMcqC6aChtW4jz3My3klmCO1fEmINZY/8J3EpBg5/A/D0AKmBveUh6pgoeycoMkVMko84tuYS+Gg==",
"dev": true,
"funding": [
{
"type": "opencollective",
"url": "https://opencollective.com/browserslist"
},
{
"type": "tidelift",
"url": "https://tidelift.com/funding/github/npm/browserslist"
},
{
"type": "github",
"url": "https://github.com/sponsors/ai"
}
],
"license": "MIT",
"dependencies": {
"caniuse-lite": "^1.0.30001669",
"electron-to-chromium": "^1.5.41",
"node-releases": "^2.0.18",
"update-browserslist-db": "^1.1.1"
},
"bin": {
"browserslist": "cli.js"
},
"engines": {
"node": "^6 || ^7 || ^8 || ^9 || ^10 || ^11 || ^12 || >=13.7"
}
},
"node_modules/bser": {
"version": "2.1.1",
"resolved": "https://registry.npmjs.org/bser/-/bser-2.1.1.tgz",
"integrity": "sha512-gQxTNE/GAfIIrmHLUE3oJyp5FO6HRBfhjnw4/wMmA63ZGDJnWBmgY/lyQBpnDUkGmAhbSe39tx2d/iTOAfglwQ==",
"dev": true,
"license": "Apache-2.0",
"dependencies": {
"node-int64": "^0.4.0"
}
},
"node_modules/buffer-from": {
"version": "1.1.2",
"resolved": "https://registry.npmjs.org/buffer-from/-/buffer-from-1.1.2.tgz",
"integrity": "sha512-E+XQCRwSbaaiChtv6k6Dwgc+bx+Bs6vuKJHHl5kox/BaKbhiXzqQOwK4cO22yElGp2OCmjwVhT3HmxgyPGnJfQ==",
"dev": true
},
"node_modules/bundle-name": {
"version": "4.1.0",
"resolved": "https://registry.npmjs.org/bundle-name/-/bundle-name-4.1.0.tgz",
"integrity": "sha512-tjwM5exMg6BGRI+kNmTntNsvdZS1X8BFYS6tnJ2hdH0kVxM6/eVZ2xy+FqStSWvYmtfFMDLIxurorHwDKfDz5Q==",
"dev": true,
"license": "MIT",
"dependencies": {
"run-applescript": "^7.0.0"
},
"engines": {
"node": ">=18"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/bytes": {
"version": "3.1.2",
"resolved": "https://registry.npmjs.org/bytes/-/bytes-3.1.2.tgz",
"integrity": "sha512-/Nf7TyzTx6S3yRJObOAV7956r8cr2+Oj8AC5dt8wSP3BQAoeX58NoHyCU8P8zGkNXStjTSi6fzO6F0pBdcYbEg==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">= 0.8"
}
},
"node_modules/cache-point": {
"version": "3.0.1",
"resolved": "https://registry.npmjs.org/cache-point/-/cache-point-3.0.1.tgz",
"integrity": "sha512-itTIMLEKbh6Dw5DruXbxAgcyLnh/oPGVLBfTPqBOftASxHe8bAeXy7JkO4F0LvHqht7XqP5O/09h5UcHS2w0FA==",
"dev": true,
"license": "MIT",
"dependencies": {
"array-back": "^6.2.2"
},
"engines": {
"node": ">=12.17"
},
"peerDependencies": {
"@75lb/nature": "latest"
},
"peerDependenciesMeta": {
"@75lb/nature": {
"optional": true
}
}
},
"node_modules/call-bind": {
"version": "1.0.7",
"resolved": "https://registry.npmjs.org/call-bind/-/call-bind-1.0.7.tgz",
"integrity": "sha512-GHTSNSYICQ7scH7sZ+M2rFopRoLh8t2bLSW6BbgrtLsahOIB5iyAVJf9GjWK3cYTDaMj4XdBpM1cA6pIS0Kv2w==",
"dev": true,
"dependencies": {
"es-define-property": "^1.0.0",
"es-errors": "^1.3.0",
"function-bind": "^1.1.2",
"get-intrinsic": "^1.2.4",
"set-function-length": "^1.2.1"
},
"engines": {
"node": ">= 0.4"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/callsites": {
"version": "3.1.0",
"resolved": "https://registry.npmjs.org/callsites/-/callsites-3.1.0.tgz",
"integrity": "sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">=6"
}
},
"node_modules/camelcase": {
"version": "5.3.1",
"resolved": "https://registry.npmjs.org/camelcase/-/camelcase-5.3.1.tgz",
"integrity": "sha512-L28STB170nwWS63UjtlEOE3dldQApaJXZkOI1uMFfzf3rRuPegHaHesyee+YxQ+W6SvRDQV6UrdOdRiR153wJg==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">=6"
}
},
"node_modules/caniuse-lite": {
"version": "1.0.30001677",
"resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001677.tgz",
"integrity": "sha512-fmfjsOlJUpMWu+mAAtZZZHz7UEwsUxIIvu1TJfO1HqFQvB/B+ii0xr9B5HpbZY/mC4XZ8SvjHJqtAY6pDPQEog==",
"dev": true,
"funding": [
{
"type": "opencollective",
"url": "https://opencollective.com/browserslist"
},
{
"type": "tidelift",
"url": "https://tidelift.com/funding/github/npm/caniuse-lite"
},
{
"type": "github",
"url": "https://github.com/sponsors/ai"
}
],
"license": "CC-BY-4.0"
},
"node_modules/catharsis": {
"version": "0.9.0",
"resolved": "git+ssh://git@github.com/xenova/catharsis.git#dda824bd3b6c08d418e8c8a03f20446cfb25aa1e",
"dev": true,
"license": "MIT",
"dependencies": {
"lodash": "^4.17.21"
},
"engines": {
"node": ">= v18.16.0"
}
},
"node_modules/chalk": {
"version": "4.1.2",
"resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz",
"integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==",
"dev": true,
"dependencies": {
"ansi-styles": "^4.1.0",
"supports-color": "^7.1.0"
},
"engines": {
"node": ">=10"
},
"funding": {
"url": "https://github.com/chalk/chalk?sponsor=1"
}
},
"node_modules/chalk-template": {
"version": "0.4.0",
"resolved": "https://registry.npmjs.org/chalk-template/-/chalk-template-0.4.0.tgz",
"integrity": "sha512-/ghrgmhfY8RaSdeo43hNXxpoHAtxdbskUHjPpfqUWGttFgycUhYPGx3YZBCnUCvOa7Doivn1IZec3DEGFoMgLg==",
"dev": true,
"license": "MIT",
"dependencies": {
"chalk": "^4.1.2"
},
"engines": {
"node": ">=12"
},
"funding": {
"url": "https://github.com/chalk/chalk-template?sponsor=1"
}
},
"node_modules/chalk/node_modules/supports-color": {
"version": "7.2.0",
"resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz",
"integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==",
"dev": true,
"dependencies": {
"has-flag": "^4.0.0"
},
"engines": {
"node": ">=8"
}
},
"node_modules/char-regex": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/char-regex/-/char-regex-1.0.2.tgz",
"integrity": "sha512-kWWXztvZ5SBQV+eRgKFeh8q5sLuZY2+8WUIzlxWVTg+oGwY14qylx1KbKzHd8P6ZYkAg0xyIDU9JMHhyJMZ1jw==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">=10"
}
},
"node_modules/chokidar": {
"version": "3.6.0",
"resolved": "https://registry.npmjs.org/chokidar/-/chokidar-3.6.0.tgz",
"integrity": "sha512-7VT13fmjotKpGipCW9JEQAusEPE+Ei8nl6/g4FBAmIm0GOOLMua9NDDo/DWp0ZAxCr3cPq5ZpBqmPAQgDda2Pw==",
"dev": true,
"license": "MIT",
"dependencies": {
"anymatch": "~3.1.2",
"braces": "~3.0.2",
"glob-parent": "~5.1.2",
"is-binary-path": "~2.1.0",
"is-glob": "~4.0.1",
"normalize-path": "~3.0.0",
"readdirp": "~3.6.0"
},
"engines": {
"node": ">= 8.10.0"
},
"funding": {
"url": "https://paulmillr.com/funding/"
},
"optionalDependencies": {
"fsevents": "~2.3.2"
}
},
"node_modules/chownr": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/chownr/-/chownr-3.0.0.tgz",
"integrity": "sha512-+IxzY9BZOQd/XuYPRmrvEVjF/nqj5kgT4kEq7VofrDoM1MxoRjEWkrCC3EtLi59TVawxTAn+orJwFQcrqEN1+g==",
"engines": {
"node": ">=18"
}
},
"node_modules/chrome-trace-event": {
"version": "1.0.3",
"resolved": "https://registry.npmjs.org/chrome-trace-event/-/chrome-trace-event-1.0.3.tgz",
"integrity": "sha512-p3KULyQg4S7NIHixdwbGX+nFHkoBiA4YQmyWtjb8XngSKV124nJmRysgAeujbUVb15vh+RvFUfCPqU7rXk+hZg==",
"dev": true,
"engines": {
"node": ">=6.0"
}
},
"node_modules/ci-info": {
"version": "3.8.0",
"resolved": "https://registry.npmjs.org/ci-info/-/ci-info-3.8.0.tgz",
"integrity": "sha512-eXTggHWSooYhq49F2opQhuHWgzucfF2YgODK4e1566GQs5BIfP30B0oenwBJHfWxAs2fyPB1s7Mg949zLf61Yw==",
"dev": true,
"funding": [
{
"type": "github",
"url": "https://github.com/sponsors/sibiraj-s"
}
],
"engines": {
"node": ">=8"
}
},
"node_modules/cjs-module-lexer": {
"version": "1.4.1",
"resolved": "https://registry.npmjs.org/cjs-module-lexer/-/cjs-module-lexer-1.4.1.tgz",
"integrity": "sha512-cuSVIHi9/9E/+821Qjdvngor+xpnlwnuwIyZOaLmHBVdXL+gP+I6QQB9VkO7RI77YIcTV+S1W9AreJ5eN63JBA==",
"dev": true,
"license": "MIT"
},
"node_modules/cliui": {
"version": "8.0.1",
"resolved": "https://registry.npmjs.org/cliui/-/cliui-8.0.1.tgz",
"integrity": "sha512-BSeNnyus75C4//NQ9gQt1/csTXyo/8Sb+afLAkzAptFuMsod9HFokGNudZpi/oQV73hnVK+sR+5PVRMd+Dr7YQ==",
"dev": true,
"license": "ISC",
"dependencies": {
"string-width": "^4.2.0",
"strip-ansi": "^6.0.1",
"wrap-ansi": "^7.0.0"
},
"engines": {
"node": ">=12"
}
},
"node_modules/clone-deep": {
"version": "4.0.1",
"resolved": "https://registry.npmjs.org/clone-deep/-/clone-deep-4.0.1.tgz",
"integrity": "sha512-neHB9xuzh/wk0dIHweyAXv2aPGZIVk3pLMe+/RNzINf17fe0OG96QroktYAUm7SM1PBnzTabaLboqqxDyMU+SQ==",
"dev": true,
"dependencies": {
"is-plain-object": "^2.0.4",
"kind-of": "^6.0.2",
"shallow-clone": "^3.0.0"
},
"engines": {
"node": ">=6"
}
},
"node_modules/co": {
"version": "4.6.0",
"resolved": "https://registry.npmjs.org/co/-/co-4.6.0.tgz",
"integrity": "sha512-QVb0dM5HvG+uaxitm8wONl7jltx8dqhfU33DcqtOZcLSVIKSDDLDi7+0LbAKiyI8hD9u42m2YxXSkMGWThaecQ==",
"dev": true,
"license": "MIT",
"engines": {
"iojs": ">= 1.0.0",
"node": ">= 0.12.0"
}
},
"node_modules/collect-v8-coverage": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/collect-v8-coverage/-/collect-v8-coverage-1.0.2.tgz",
"integrity": "sha512-lHl4d5/ONEbLlJvaJNtsF/Lz+WvB07u2ycqTYbdrq7UypDXailES4valYb2eWiJFxZlVmpGekfqoxQhzyFdT4Q==",
"dev": true,
"license": "MIT"
},
"node_modules/color": {
"version": "4.2.3",
"resolved": "https://registry.npmjs.org/color/-/color-4.2.3.tgz",
"integrity": "sha512-1rXeuUUiGGrykh+CeBdu5Ie7OJwinCgQY0bc7GCRxy5xVHy+moaqkpL/jqQq0MtQOeYcrqEz4abc5f0KtU7W4A==",
"dependencies": {
"color-convert": "^2.0.1",
"color-string": "^1.9.0"
},
"engines": {
"node": ">=12.5.0"
}
},
"node_modules/color-convert": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz",
"integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==",
"dependencies": {
"color-name": "~1.1.4"
},
"engines": {
"node": ">=7.0.0"
}
},
"node_modules/color-name": {
"version": "1.1.4",
"resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz",
"integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA=="
},
"node_modules/color-string": {
"version": "1.9.1",
"resolved": "https://registry.npmjs.org/color-string/-/color-string-1.9.1.tgz",
"integrity": "sha512-shrVawQFojnZv6xM40anx4CkoDP+fZsw/ZerEMsW/pyzsRbElpsL/DBVW7q3ExxwusdNXI3lXpuhEZkzs8p5Eg==",
"dependencies": {
"color-name": "^1.0.0",
"simple-swizzle": "^0.2.2"
}
},
"node_modules/colorette": {
"version": "2.0.20",
"resolved": "https://registry.npmjs.org/colorette/-/colorette-2.0.20.tgz",
"integrity": "sha512-IfEDxwoWIjkeXL1eXcDiow4UbKjhLdq6/EuSVR9GMN7KVH3r9gQ83e73hsz1Nd1T3ijd5xv1wcWRYO+D6kCI2w==",
"dev": true
},
"node_modules/command-line-args": {
"version": "6.0.1",
"resolved": "https://registry.npmjs.org/command-line-args/-/command-line-args-6.0.1.tgz",
"integrity": "sha512-Jr3eByUjqyK0qd8W0SGFW1nZwqCaNCtbXjRo2cRJC1OYxWl3MZ5t1US3jq+cO4sPavqgw4l9BMGX0CBe+trepg==",
"dev": true,
"license": "MIT",
"dependencies": {
"array-back": "^6.2.2",
"find-replace": "^5.0.2",
"lodash.camelcase": "^4.3.0",
"typical": "^7.2.0"
},
"engines": {
"node": ">=12.20"
},
"peerDependencies": {
"@75lb/nature": "latest"
},
"peerDependenciesMeta": {
"@75lb/nature": {
"optional": true
}
}
},
"node_modules/command-line-usage": {
"version": "7.0.3",
"resolved": "https://registry.npmjs.org/command-line-usage/-/command-line-usage-7.0.3.tgz",
"integrity": "sha512-PqMLy5+YGwhMh1wS04mVG44oqDsgyLRSKJBdOo1bnYhMKBW65gZF1dRp2OZRhiTjgUHljy99qkO7bsctLaw35Q==",
"dev": true,
"license": "MIT",
"dependencies": {
"array-back": "^6.2.2",
"chalk-template": "^0.4.0",
"table-layout": "^4.1.0",
"typical": "^7.1.1"
},
"engines": {
"node": ">=12.20.0"
}
},
"node_modules/commander": {
"version": "2.20.3",
"resolved": "https://registry.npmjs.org/commander/-/commander-2.20.3.tgz",
"integrity": "sha512-GpVkmM8vF2vQUkj2LvZmD35JxeJOLCwJ9cUkugyk2nuhbv3+mJvpLYYt+0+USMxE+oj+ey/lJEnhZw75x/OMcQ==",
"dev": true
},
"node_modules/common-sequence": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/common-sequence/-/common-sequence-3.0.0.tgz",
"integrity": "sha512-g/CgSYk93y+a1IKm50tKl7kaT/OjjTYVQlEbUlt/49ZLV1mcKpUU7iyDiqTAeLdb4QDtQfq3ako8y8v//fzrWQ==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">=12.17"
}
},
"node_modules/compressible": {
"version": "2.0.18",
"resolved": "https://registry.npmjs.org/compressible/-/compressible-2.0.18.tgz",
"integrity": "sha512-AF3r7P5dWxL8MxyITRMlORQNaOA2IkAFaTr4k7BUumjPtRpGDTZpl0Pb1XCO6JeDCBdp126Cgs9sMxqSjgYyRg==",
"dev": true,
"dependencies": {
"mime-db": ">= 1.43.0 < 2"
},
"engines": {
"node": ">= 0.6"
}
},
"node_modules/compression": {
"version": "1.8.1",
"resolved": "https://registry.npmjs.org/compression/-/compression-1.8.1.tgz",
"integrity": "sha512-9mAqGPHLakhCLeNyxPkK4xVo746zQ/czLH1Ky+vkitMnWfWZps8r0qXuwhwizagCRttsL4lfG4pIOvaWLpAP0w==",
"dev": true,
"license": "MIT",
"dependencies": {
"bytes": "3.1.2",
"compressible": "~2.0.18",
"debug": "2.6.9",
"negotiator": "~0.6.4",
"on-headers": "~1.1.0",
"safe-buffer": "5.2.1",
"vary": "~1.1.2"
},
"engines": {
"node": ">= 0.8.0"
}
},
"node_modules/compression/node_modules/negotiator": {
"version": "0.6.4",
"resolved": "https://registry.npmjs.org/negotiator/-/negotiator-0.6.4.tgz",
"integrity": "sha512-myRT3DiWPHqho5PrJaIRyaMv2kgYf0mUVgBNOYMuCH5Ki1yEiQaf/ZJuQ62nvpc44wL5WDbTX7yGJi1Neevw8w==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">= 0.6"
}
},
"node_modules/concat-map": {
"version": "0.0.1",
"resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz",
"integrity": "sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==",
"dev": true
},
"node_modules/config-master": {
"version": "3.1.0",
"resolved": "https://registry.npmjs.org/config-master/-/config-master-3.1.0.tgz",
"integrity": "sha512-n7LBL1zBzYdTpF1mx5DNcZnZn05CWIdsdvtPL4MosvqbBUK3Rq6VWEtGUuF3Y0s9/CIhMejezqlSkP6TnCJ/9g==",
"dev": true,
"dependencies": {
"walk-back": "^2.0.1"
}
},
"node_modules/config-master/node_modules/walk-back": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/walk-back/-/walk-back-2.0.1.tgz",
"integrity": "sha512-Nb6GvBR8UWX1D+Le+xUq0+Q1kFmRBIWVrfLnQAOmcpEzA9oAxwJ9gIr36t9TWYfzvWRvuMtjHiVsJYEkXWaTAQ==",
"dev": true,
"engines": {
"node": ">=0.10.0"
}
},
"node_modules/connect-history-api-fallback": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/connect-history-api-fallback/-/connect-history-api-fallback-2.0.0.tgz",
"integrity": "sha512-U73+6lQFmfiNPrYbXqr6kZ1i1wiRqXnp2nhMsINseWXO8lDau0LGEffJ8kQi4EjLZympVgRdvqjAgiZ1tgzDDA==",
"dev": true,
"engines": {
"node": ">=0.8"
}
},
"node_modules/content-disposition": {
"version": "0.5.4",
"resolved": "https://registry.npmjs.org/content-disposition/-/content-disposition-0.5.4.tgz",
"integrity": "sha512-FveZTNuGw04cxlAiWbzi6zTAL/lhehaWbTtgluJh4/E95DqMwTmha3KZN1aAWA8cFIhHzMZUvLevkw5Rqk+tSQ==",
"dev": true,
"dependencies": {
"safe-buffer": "5.2.1"
},
"engines": {
"node": ">= 0.6"
}
},
"node_modules/content-type": {
"version": "1.0.5",
"resolved": "https://registry.npmjs.org/content-type/-/content-type-1.0.5.tgz",
"integrity": "sha512-nTjqfcBFEipKdXCv4YDQWCfmcLZKm81ldF0pAopTvyrFGVbcR6P/VAAd5G7N+0tTr8QqiU0tFadD6FK4NtJwOA==",
"dev": true,
"engines": {
"node": ">= 0.6"
}
},
"node_modules/convert-source-map": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/convert-source-map/-/convert-source-map-2.0.0.tgz",
"integrity": "sha512-Kvp459HrV2FEJ1CAsi1Ku+MY3kasH19TFykTz2xWmMeq6bk2NU3XXvfJ+Q61m0xktWwt+1HSYf3JZsTms3aRJg==",
"dev": true,
"license": "MIT"
},
"node_modules/cookie": {
"version": "0.7.1",
"resolved": "https://registry.npmjs.org/cookie/-/cookie-0.7.1.tgz",
"integrity": "sha512-6DnInpx7SJ2AK3+CTUE/ZM0vWTUboZCegxhC2xiIydHR9jNuTAASBrfEpHhiGOZw/nX51bHt6YQl8jsGo4y/0w==",
"dev": true,
"engines": {
"node": ">= 0.6"
}
},
"node_modules/cookie-signature": {
"version": "1.0.6",
"resolved": "https://registry.npmjs.org/cookie-signature/-/cookie-signature-1.0.6.tgz",
"integrity": "sha512-QADzlaHc8icV8I7vbaJXJwod9HWYp8uCqf1xa4OfNu1T7JVxQIrUgOWtHdNDtPiywmFbiS12VjotIXLrKM3orQ==",
"dev": true
},
"node_modules/core-util-is": {
"version": "1.0.3",
"resolved": "https://registry.npmjs.org/core-util-is/-/core-util-is-1.0.3.tgz",
"integrity": "sha512-ZQBvi1DcpJ4GDqanjucZ2Hj3wEO5pZDS89BWbkcrvdxksJorwUDDZamX9ldFkp9aw2lmBDLgkObEA4DWNJ9FYQ==",
"dev": true
},
"node_modules/cross-spawn": {
"version": "7.0.6",
"resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.6.tgz",
"integrity": "sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA==",
"license": "MIT",
"dependencies": {
"path-key": "^3.1.0",
"shebang-command": "^2.0.0",
"which": "^2.0.1"
},
"engines": {
"node": ">= 8"
}
},
"node_modules/current-module-paths": {
"version": "1.1.2",
"resolved": "https://registry.npmjs.org/current-module-paths/-/current-module-paths-1.1.2.tgz",
"integrity": "sha512-H4s4arcLx/ugbu1XkkgSvcUZax0L6tXUqnppGniQb8l5VjUKGHoayXE5RiriiPhYDd+kjZnaok1Uig13PKtKYQ==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">=12.17"
}
},
"node_modules/debug": {
"version": "2.6.9",
"resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz",
"integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==",
"dev": true,
"dependencies": {
"ms": "2.0.0"
}
},
"node_modules/dedent": {
"version": "1.5.3",
"resolved": "https://registry.npmjs.org/dedent/-/dedent-1.5.3.tgz",
"integrity": "sha512-NHQtfOOW68WD8lgypbLA5oT+Bt0xXJhiYvoR6SmmNXZfpzOGXwdKWmcwG8N7PwVVWV3eF/68nmD9BaJSsTBhyQ==",
"dev": true,
"license": "MIT",
"peerDependencies": {
"babel-plugin-macros": "^3.1.0"
},
"peerDependenciesMeta": {
"babel-plugin-macros": {
"optional": true
}
}
},
"node_modules/deepmerge": {
"version": "4.3.1",
"resolved": "https://registry.npmjs.org/deepmerge/-/deepmerge-4.3.1.tgz",
"integrity": "sha512-3sUqbMEc77XqpdNO7FRyRog+eW3ph+GYCbj+rK+uYyRMuwsVy0rMiVtPn+QJlKFvWP/1PYpapqYn0Me2knFn+A==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">=0.10.0"
}
},
"node_modules/default-browser": {
"version": "5.2.1",
"resolved": "https://registry.npmjs.org/default-browser/-/default-browser-5.2.1.tgz",
"integrity": "sha512-WY/3TUME0x3KPYdRRxEJJvXRHV4PyPoUsxtZa78lwItwRQRHhd2U9xOscaT/YTf8uCXIAjeJOFBVEh/7FtD8Xg==",
"dev": true,
"license": "MIT",
"dependencies": {
"bundle-name": "^4.1.0",
"default-browser-id": "^5.0.0"
},
"engines": {
"node": ">=18"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/default-browser-id": {
"version": "5.0.0",
"resolved": "https://registry.npmjs.org/default-browser-id/-/default-browser-id-5.0.0.tgz",
"integrity": "sha512-A6p/pu/6fyBcA1TRz/GqWYPViplrftcW2gZC9q79ngNCKAeR/X3gcEdXQHl4KNXV+3wgIJ1CPkJQ3IHM6lcsyA==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">=18"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/define-data-property": {
"version": "1.1.4",
"resolved": "https://registry.npmjs.org/define-data-property/-/define-data-property-1.1.4.tgz",
"integrity": "sha512-rBMvIzlpA8v6E+SJZoo++HAYqsLrkg7MSfIinMPFhmkorw7X+dOXVJQs+QT69zGkzMyfDnIMN2Wid1+NbL3T+A==",
"dependencies": {
"es-define-property": "^1.0.0",
"es-errors": "^1.3.0",
"gopd": "^1.0.1"
},
"engines": {
"node": ">= 0.4"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/define-lazy-prop": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/define-lazy-prop/-/define-lazy-prop-3.0.0.tgz",
"integrity": "sha512-N+MeXYoqr3pOgn8xfyRPREN7gHakLYjhsHhWGT3fWAiL4IkAt0iDw14QiiEm2bE30c5XX5q0FtAA3CK5f9/BUg==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">=12"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/define-properties": {
"version": "1.2.1",
"resolved": "https://registry.npmjs.org/define-properties/-/define-properties-1.2.1.tgz",
"integrity": "sha512-8QmQKqEASLd5nx0U1B1okLElbUuuttJ/AnYmRXbbbGDWh6uS208EjD4Xqq/I9wK7u0v6O08XhTWnt5XtEbR6Dg==",
"license": "MIT",
"dependencies": {
"define-data-property": "^1.0.1",
"has-property-descriptors": "^1.0.0",
"object-keys": "^1.1.1"
},
"engines": {
"node": ">= 0.4"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/depd": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/depd/-/depd-2.0.0.tgz",
"integrity": "sha512-g7nH6P6dyDioJogAAGprGpCtVImJhpPk/roCzdb3fIh61/s/nPsfR6onyMwkCAR/OlC3yBC0lESvUoQEAssIrw==",
"dev": true,
"engines": {
"node": ">= 0.8"
}
},
"node_modules/destroy": {
"version": "1.2.0",
"resolved": "https://registry.npmjs.org/destroy/-/destroy-1.2.0.tgz",
"integrity": "sha512-2sJGJTaXIIaR1w4iJSNoN0hnMY7Gpc/n8D4qSCJw8QqFWXf7cuAgnEHxBpweaVcPevC2l3KpjYCx3NypQQgaJg==",
"dev": true,
"engines": {
"node": ">= 0.8",
"npm": "1.2.8000 || >= 1.4.16"
}
},
"node_modules/detect-libc": {
"version": "2.0.3",
"resolved": "https://registry.npmjs.org/detect-libc/-/detect-libc-2.0.3.tgz",
"integrity": "sha512-bwy0MGW55bG41VqxxypOsdSdGqLwXPI/focwgTYCFMbdUiBAxLg9CFzG08sz2aqzknwiX7Hkl0bQENjg8iLByw==",
"engines": {
"node": ">=8"
}
},
"node_modules/detect-newline": {
"version": "3.1.0",
"resolved": "https://registry.npmjs.org/detect-newline/-/detect-newline-3.1.0.tgz",
"integrity": "sha512-TLz+x/vEXm/Y7P7wn1EJFNLxYpUD4TgMosxY6fAVJUnJMbupHBOncxyWUG9OpTaH9EBD7uFI5LfEgmMOc54DsA==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">=8"
}
},
"node_modules/detect-node": {
"version": "2.1.0",
"resolved": "https://registry.npmjs.org/detect-node/-/detect-node-2.1.0.tgz",
"integrity": "sha512-T0NIuQpnTvFDATNuHN5roPwSBG83rFsuO+MXXH9/3N1eFbn4wcPjttvjMLEPWJ0RGUYgQE7cGgS3tNxbqCGM7g=="
},
"node_modules/diff-sequences": {
"version": "29.4.3",
"resolved": "https://registry.npmjs.org/diff-sequences/-/diff-sequences-29.4.3.tgz",
"integrity": "sha512-ofrBgwpPhCD85kMKtE9RYFFq6OC1A89oW2vvgWZNCwxrUpRUILopY7lsYyMDSjc8g6U6aiO0Qubg6r4Wgt5ZnA==",
"dev": true,
"engines": {
"node": "^14.15.0 || ^16.10.0 || >=18.0.0"
}
},
"node_modules/dmd": {
"version": "7.1.1",
"resolved": "https://registry.npmjs.org/dmd/-/dmd-7.1.1.tgz",
"integrity": "sha512-Ap2HP6iuOek7eShReDLr9jluNJm9RMZESlt29H/Xs1qrVMkcS9X6m5h1mBC56WMxNiSo0wvjGICmZlYUSFjwZQ==",
"dev": true,
"license": "MIT",
"dependencies": {
"array-back": "^6.2.2",
"cache-point": "^3.0.0",
"common-sequence": "^3.0.0",
"file-set": "^5.2.2",
"handlebars": "^4.7.8",
"marked": "^4.3.0",
"walk-back": "^5.1.1"
},
"engines": {
"node": ">=12.17"
},
"peerDependencies": {
"@75lb/nature": "latest"
},
"peerDependenciesMeta": {
"@75lb/nature": {
"optional": true
}
}
},
"node_modules/dns-packet": {
"version": "5.6.1",
"resolved": "https://registry.npmjs.org/dns-packet/-/dns-packet-5.6.1.tgz",
"integrity": "sha512-l4gcSouhcgIKRvyy99RNVOgxXiicE+2jZoNmaNmZ6JXiGajBOJAesk1OBlJuM5k2c+eudGdLxDqXuPCKIj6kpw==",
"dev": true,
"license": "MIT",
"dependencies": {
"@leichtgewicht/ip-codec": "^2.0.1"
},
"engines": {
"node": ">=6"
}
},
"node_modules/eastasianwidth": {
"version": "0.2.0",
"resolved": "https://registry.npmjs.org/eastasianwidth/-/eastasianwidth-0.2.0.tgz",
"integrity": "sha512-I88TYZWc9XiYHRQ4/3c5rjjfgkjhLyW2luGIheGERbNQ6OY7yTybanSpDXZa8y7VUP9YmDcYa+eyq4ca7iLqWA=="
},
"node_modules/ee-first": {
"version": "1.1.1",
"resolved": "https://registry.npmjs.org/ee-first/-/ee-first-1.1.1.tgz",
"integrity": "sha512-WMwm9LhRUo+WUaRN+vRuETqG89IgZphVSNkdFgeb6sS/E4OrDIN7t48CAewSHXc6C8lefD8KKfr5vY61brQlow==",
"dev": true
},
"node_modules/electron-to-chromium": {
"version": "1.5.50",
"resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.5.50.tgz",
"integrity": "sha512-eMVObiUQ2LdgeO1F/ySTXsvqvxb6ZH2zPGaMYsWzRDdOddUa77tdmI0ltg+L16UpbWdhPmuF3wIQYyQq65WfZw==",
"dev": true,
"license": "ISC"
},
"node_modules/emittery": {
"version": "0.13.1",
"resolved": "https://registry.npmjs.org/emittery/-/emittery-0.13.1.tgz",
"integrity": "sha512-DeWwawk6r5yR9jFgnDKYt4sLS0LmHJJi3ZOnb5/JdbYwj3nW+FxQnHIjhBKz8YLC7oRNPVM9NQ47I3CVx34eqQ==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">=12"
},
"funding": {
"url": "https://github.com/sindresorhus/emittery?sponsor=1"
}
},
"node_modules/emoji-regex": {
"version": "8.0.0",
"resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz",
"integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A=="
},
"node_modules/encodeurl": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/encodeurl/-/encodeurl-2.0.0.tgz",
"integrity": "sha512-Q0n9HRi4m6JuGIV1eFlmvJB7ZEVxu93IrMyiMsGC0lrMJMWzRgx6WGquyfQgZVb31vhGgXnfmPNNXmxnOkRBrg==",
"dev": true,
"engines": {
"node": ">= 0.8"
}
},
"node_modules/enhanced-resolve": {
"version": "5.17.1",
"resolved": "https://registry.npmjs.org/enhanced-resolve/-/enhanced-resolve-5.17.1.tgz",
"integrity": "sha512-LMHl3dXhTcfv8gM4kEzIUeTQ+7fpdA0l2tUf34BddXPkz2A5xJ5L/Pchd5BL6rdccM9QGvu0sWZzK1Z1t4wwyg==",
"dev": true,
"dependencies": {
"graceful-fs": "^4.2.4",
"tapable": "^2.2.0"
},
"engines": {
"node": ">=10.13.0"
}
},
"node_modules/entities": {
"version": "4.5.0",
"resolved": "https://registry.npmjs.org/entities/-/entities-4.5.0.tgz",
"integrity": "sha512-V0hjH4dGPh9Ao5p0MoRY6BVqtwCjhz6vI5LT8AJ55H+4g9/4vbHx1I54fS0XuclLhDHArPQCiMjDxjaL8fPxhw==",
"dev": true,
"license": "BSD-2-Clause",
"engines": {
"node": ">=0.12"
},
"funding": {
"url": "https://github.com/fb55/entities?sponsor=1"
}
},
"node_modules/envinfo": {
"version": "7.8.1",
"resolved": "https://registry.npmjs.org/envinfo/-/envinfo-7.8.1.tgz",
"integrity": "sha512-/o+BXHmB7ocbHEAs6F2EnG0ogybVVUdkRunTT2glZU9XAaGmhqskrvKwqXuDfNjEO0LZKWdejEEpnq8aM0tOaw==",
"dev": true,
"bin": {
"envinfo": "dist/cli.js"
},
"engines": {
"node": ">=4"
}
},
"node_modules/error-ex": {
"version": "1.3.2",
"resolved": "https://registry.npmjs.org/error-ex/-/error-ex-1.3.2.tgz",
"integrity": "sha512-7dFHNmqeFSEt2ZBsCriorKnn3Z2pj+fd9kmI6QoWw4//DL+icEBfc0U7qJCisqrTsKTjw4fNFy2pW9OqStD84g==",
"dev": true,
"license": "MIT",
"dependencies": {
"is-arrayish": "^0.2.1"
}
},
"node_modules/error-ex/node_modules/is-arrayish": {
"version": "0.2.1",
"resolved": "https://registry.npmjs.org/is-arrayish/-/is-arrayish-0.2.1.tgz",
"integrity": "sha512-zz06S8t0ozoDXMG+ube26zeCTNXcKIPJZJi8hBrF4idCLms4CG9QtK7qBl1boi5ODzFpjswb5JPmHCbMpjaYzg==",
"dev": true,
"license": "MIT"
},
"node_modules/es-define-property": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/es-define-property/-/es-define-property-1.0.0.tgz",
"integrity": "sha512-jxayLKShrEqqzJ0eumQbVhTYQM27CfT1T35+gCgDFoL82JLsXqTJ76zv6A0YLOgEnLUMvLzsDsGIrl8NFpT2gQ==",
"dependencies": {
"get-intrinsic": "^1.2.4"
},
"engines": {
"node": ">= 0.4"
}
},
"node_modules/es-errors": {
"version": "1.3.0",
"resolved": "https://registry.npmjs.org/es-errors/-/es-errors-1.3.0.tgz",
"integrity": "sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==",
"engines": {
"node": ">= 0.4"
}
},
"node_modules/es-module-lexer": {
"version": "1.2.1",
"resolved": "https://registry.npmjs.org/es-module-lexer/-/es-module-lexer-1.2.1.tgz",
"integrity": "sha512-9978wrXM50Y4rTMmW5kXIC09ZdXQZqkE4mxhwkd8VbzsGkXGPgV4zWuqQJgCEzYngdo2dYDa0l8xhX4fkSwJSg==",
"dev": true
},
"node_modules/es6-error": {
"version": "4.1.1",
"resolved": "https://registry.npmjs.org/es6-error/-/es6-error-4.1.1.tgz",
"integrity": "sha512-Um/+FxMr9CISWh0bi5Zv0iOD+4cFh5qLeks1qhAopKVAJw3drgKbKySikp7wGhDL0HPeaja0P5ULZrxLkniUVg==",
"license": "MIT"
},
"node_modules/escalade": {
"version": "3.2.0",
"resolved": "https://registry.npmjs.org/escalade/-/escalade-3.2.0.tgz",
"integrity": "sha512-WUj2qlxaQtO4g6Pq5c29GTcWGDyd8itL8zTlipgECz3JesAiiOKotd8JU6otB3PACgG6xkJUyVhboMS+bje/jA==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">=6"
}
},
"node_modules/escape-html": {
"version": "1.0.3",
"resolved": "https://registry.npmjs.org/escape-html/-/escape-html-1.0.3.tgz",
"integrity": "sha512-NiSupZ4OeuGwr68lGIeym/ksIZMJodUGOSCZ/FSnTxcrekbvqrgdUxlJOMpijaKZVjAJrWrGs/6Jy8OMuyj9ow==",
"dev": true
},
"node_modules/escape-string-regexp": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-2.0.0.tgz",
"integrity": "sha512-UpzcLCXolUWcNu5HtVMHYdXJjArjsF9C0aNnquZYY4uW/Vu0miy5YoWvbV345HauVvcAUnpRuhMMcqTcGOY2+w==",
"dev": true,
"engines": {
"node": ">=8"
}
},
"node_modules/eslint-scope": {
"version": "5.1.1",
"resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-5.1.1.tgz",
"integrity": "sha512-2NxwbF/hZ0KpepYN0cNbo+FN6XoK7GaHlQhgx/hIZl6Va0bF45RQOOwhLIy8lQDbuCiadSLCBnH2CFYquit5bw==",
"dev": true,
"dependencies": {
"esrecurse": "^4.3.0",
"estraverse": "^4.1.1"
},
"engines": {
"node": ">=8.0.0"
}
},
"node_modules/esprima": {
"version": "4.0.1",
"resolved": "https://registry.npmjs.org/esprima/-/esprima-4.0.1.tgz",
"integrity": "sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A==",
"dev": true,
"license": "BSD-2-Clause",
"bin": {
"esparse": "bin/esparse.js",
"esvalidate": "bin/esvalidate.js"
},
"engines": {
"node": ">=4"
}
},
"node_modules/esrecurse": {
"version": "4.3.0",
"resolved": "https://registry.npmjs.org/esrecurse/-/esrecurse-4.3.0.tgz",
"integrity": "sha512-KmfKL3b6G+RXvP8N1vr3Tq1kL/oCFgn2NYXEtqP8/L3pKapUA4G8cFVaoF3SU323CD4XypR/ffioHmkti6/Tag==",
"dev": true,
"dependencies": {
"estraverse": "^5.2.0"
},
"engines": {
"node": ">=4.0"
}
},
"node_modules/esrecurse/node_modules/estraverse": {
"version": "5.3.0",
"resolved": "https://registry.npmjs.org/estraverse/-/estraverse-5.3.0.tgz",
"integrity": "sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA==",
"dev": true,
"engines": {
"node": ">=4.0"
}
},
"node_modules/estraverse": {
"version": "4.3.0",
"resolved": "https://registry.npmjs.org/estraverse/-/estraverse-4.3.0.tgz",
"integrity": "sha512-39nnKffWz8xN1BU/2c79n9nB9HDzo0niYUqx6xyqUnyoAnQyyWpOTdZEeiCch8BBu515t4wp9ZmgVfVhn9EBpw==",
"dev": true,
"engines": {
"node": ">=4.0"
}
},
"node_modules/etag": {
"version": "1.8.1",
"resolved": "https://registry.npmjs.org/etag/-/etag-1.8.1.tgz",
"integrity": "sha512-aIL5Fx7mawVa300al2BnEE4iNvo1qETxLrPI/o05L7z6go7fCw1J6EQmbK4FmJ2AS7kgVF/KEZWufBfdClMcPg==",
"dev": true,
"engines": {
"node": ">= 0.6"
}
},
"node_modules/eventemitter3": {
"version": "4.0.7",
"resolved": "https://registry.npmjs.org/eventemitter3/-/eventemitter3-4.0.7.tgz",
"integrity": "sha512-8guHBZCwKnFhYdHr2ysuRWErTwhoN2X8XELRlrRwpmfeY2jjuUN4taQMsULKUVo1K4DvZl+0pgfyoysHxvmvEw==",
"dev": true
},
"node_modules/events": {
"version": "3.3.0",
"resolved": "https://registry.npmjs.org/events/-/events-3.3.0.tgz",
"integrity": "sha512-mQw+2fkQbALzQ7V0MY0IqdnXNOeTtP4r0lN9z7AAawCXgqea7bDii20AYrIBrFd/Hx0M2Ocz6S111CaFkUcb0Q==",
"dev": true,
"engines": {
"node": ">=0.8.x"
}
},
"node_modules/execa": {
"version": "5.1.1",
"resolved": "https://registry.npmjs.org/execa/-/execa-5.1.1.tgz",
"integrity": "sha512-8uSpZZocAZRBAPIEINJj3Lo9HyGitllczc27Eh5YYojjMFMn8yHMDMaUHE2Jqfq05D/wucwI4JGURyXt1vchyg==",
"dev": true,
"dependencies": {
"cross-spawn": "^7.0.3",
"get-stream": "^6.0.0",
"human-signals": "^2.1.0",
"is-stream": "^2.0.0",
"merge-stream": "^2.0.0",
"npm-run-path": "^4.0.1",
"onetime": "^5.1.2",
"signal-exit": "^3.0.3",
"strip-final-newline": "^2.0.0"
},
"engines": {
"node": ">=10"
},
"funding": {
"url": "https://github.com/sindresorhus/execa?sponsor=1"
}
},
"node_modules/exit": {
"version": "0.1.2",
"resolved": "https://registry.npmjs.org/exit/-/exit-0.1.2.tgz",
"integrity": "sha512-Zk/eNKV2zbjpKzrsQ+n1G6poVbErQxJ0LBOJXaKZ1EViLzH+hrLu9cdXI4zw9dBQJslwBEpbQ2P1oS7nDxs6jQ==",
"dev": true,
"engines": {
"node": ">= 0.8.0"
}
},
"node_modules/expect": {
"version": "29.6.1",
"resolved": "https://registry.npmjs.org/expect/-/expect-29.6.1.tgz",
"integrity": "sha512-XEdDLonERCU1n9uR56/Stx9OqojaLAQtZf9PrCHH9Hl8YXiEIka3H4NXJ3NOIBmQJTg7+j7buh34PMHfJujc8g==",
"dev": true,
"dependencies": {
"@jest/expect-utils": "^29.6.1",
"@types/node": "*",
"jest-get-type": "^29.4.3",
"jest-matcher-utils": "^29.6.1",
"jest-message-util": "^29.6.1",
"jest-util": "^29.6.1"
},
"engines": {
"node": "^14.15.0 || ^16.10.0 || >=18.0.0"
}
},
"node_modules/express": {
"version": "4.21.2",
"resolved": "https://registry.npmjs.org/express/-/express-4.21.2.tgz",
"integrity": "sha512-28HqgMZAmih1Czt9ny7qr6ek2qddF4FclbMzwhCREB6OFfH+rXAnuNCwo1/wFvrtbgsQDb4kSbX9de9lFbrXnA==",
"dev": true,
"license": "MIT",
"dependencies": {
"accepts": "~1.3.8",
"array-flatten": "1.1.1",
"body-parser": "1.20.3",
"content-disposition": "0.5.4",
"content-type": "~1.0.4",
"cookie": "0.7.1",
"cookie-signature": "1.0.6",
"debug": "2.6.9",
"depd": "2.0.0",
"encodeurl": "~2.0.0",
"escape-html": "~1.0.3",
"etag": "~1.8.1",
"finalhandler": "1.3.1",
"fresh": "0.5.2",
"http-errors": "2.0.0",
"merge-descriptors": "1.0.3",
"methods": "~1.1.2",
"on-finished": "2.4.1",
"parseurl": "~1.3.3",
"path-to-regexp": "0.1.12",
"proxy-addr": "~2.0.7",
"qs": "6.13.0",
"range-parser": "~1.2.1",
"safe-buffer": "5.2.1",
"send": "0.19.0",
"serve-static": "1.16.2",
"setprototypeof": "1.2.0",
"statuses": "2.0.1",
"type-is": "~1.6.18",
"utils-merge": "1.0.1",
"vary": "~1.1.2"
},
"engines": {
"node": ">= 0.10.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/express"
}
},
"node_modules/express/node_modules/array-flatten": {
"version": "1.1.1",
"resolved": "https://registry.npmjs.org/array-flatten/-/array-flatten-1.1.1.tgz",
"integrity": "sha512-PCVAQswWemu6UdxsDFFX/+gVeYqKAod3D3UVm91jHwynguOwAvYPhx8nNlM++NqRcK6CxxpUafjmhIdKiHibqg==",
"dev": true
},
"node_modules/fast-deep-equal": {
"version": "3.1.3",
"resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz",
"integrity": "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==",
"dev": true
},
"node_modules/fast-glob": {
"version": "3.3.2",
"resolved": "https://registry.npmjs.org/fast-glob/-/fast-glob-3.3.2.tgz",
"integrity": "sha512-oX2ruAFQwf/Orj8m737Y5adxDQO0LAB7/S5MnxCdTNDd4p6BsyIVsv9JQsATbTSq8KHRpLwIHbVlUNatxd+1Ow==",
"dev": true,
"license": "MIT",
"dependencies": {
"@nodelib/fs.stat": "^2.0.2",
"@nodelib/fs.walk": "^1.2.3",
"glob-parent": "^5.1.2",
"merge2": "^1.3.0",
"micromatch": "^4.0.4"
},
"engines": {
"node": ">=8.6.0"
}
},
"node_modules/fast-json-stable-stringify": {
"version": "2.1.0",
"resolved": "https://registry.npmjs.org/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz",
"integrity": "sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==",
"dev": true
},
"node_modules/fast-uri": {
"version": "3.0.3",
"resolved": "https://registry.npmjs.org/fast-uri/-/fast-uri-3.0.3.tgz",
"integrity": "sha512-aLrHthzCjH5He4Z2H9YZ+v6Ujb9ocRuW6ZzkJQOrTxleEijANq4v1TsaPaVG1PZcuurEzrLcWRyYBYXD5cEiaw==",
"dev": true,
"license": "BSD-3-Clause"
},
"node_modules/fastest-levenshtein": {
"version": "1.0.16",
"resolved": "https://registry.npmjs.org/fastest-levenshtein/-/fastest-levenshtein-1.0.16.tgz",
"integrity": "sha512-eRnCtTTtGZFpQCwhJiUOuxPQWRXVKYDn0b2PeHfXL6/Zi53SLAzAHfVhVWK2AryC/WH05kGfxhFIPvTF0SXQzg==",
"dev": true,
"engines": {
"node": ">= 4.9.1"
}
},
"node_modules/fastq": {
"version": "1.17.1",
"resolved": "https://registry.npmjs.org/fastq/-/fastq-1.17.1.tgz",
"integrity": "sha512-sRVD3lWVIXWg6By68ZN7vho9a1pQcN/WBFaAAsDDFzlJjvoGx0P8z7V1t72grFJfJhu3YPZBuu25f7Kaw2jN1w==",
"dev": true,
"license": "ISC",
"dependencies": {
"reusify": "^1.0.4"
}
},
"node_modules/faye-websocket": {
"version": "0.11.4",
"resolved": "https://registry.npmjs.org/faye-websocket/-/faye-websocket-0.11.4.tgz",
"integrity": "sha512-CzbClwlXAuiRQAlUyfqPgvPoNKTckTPGfwZV4ZdAhVcP2lh9KUxJg2b5GkE7XbjKQ3YJnQ9z6D9ntLAlB+tP8g==",
"dev": true,
"dependencies": {
"websocket-driver": ">=0.5.1"
},
"engines": {
"node": ">=0.8.0"
}
},
"node_modules/fb-watchman": {
"version": "2.0.2",
"resolved": "https://registry.npmjs.org/fb-watchman/-/fb-watchman-2.0.2.tgz",
"integrity": "sha512-p5161BqbuCaSnB8jIbzQHOlpgsPmK5rJVDfDKO91Axs5NC1uu3HRQm6wt9cd9/+GtQQIO53JdGXXoyDpTAsgYA==",
"dev": true,
"license": "Apache-2.0",
"dependencies": {
"bser": "2.1.1"
}
},
"node_modules/file-set": {
"version": "5.2.2",
"resolved": "https://registry.npmjs.org/file-set/-/file-set-5.2.2.tgz",
"integrity": "sha512-/KgJI1V/QaDK4enOk/E2xMFk1cTWJghEr7UmWiRZfZ6upt6gQCfMn4jJ7aOm64OKurj4TaVnSSgSDqv5ZKYA3A==",
"dev": true,
"license": "MIT",
"dependencies": {
"array-back": "^6.2.2",
"fast-glob": "^3.3.2"
},
"engines": {
"node": ">=12.17"
},
"peerDependencies": {
"@75lb/nature": "latest"
},
"peerDependenciesMeta": {
"@75lb/nature": {
"optional": true
}
}
},
"node_modules/fill-range": {
"version": "7.1.1",
"resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.1.1.tgz",
"integrity": "sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg==",
"dev": true,
"dependencies": {
"to-regex-range": "^5.0.1"
},
"engines": {
"node": ">=8"
}
},
"node_modules/finalhandler": {
"version": "1.3.1",
"resolved": "https://registry.npmjs.org/finalhandler/-/finalhandler-1.3.1.tgz",
"integrity": "sha512-6BN9trH7bp3qvnrRyzsBz+g3lZxTNZTbVO2EV1CS0WIcDbawYVdYvGflME/9QP0h0pYlCDBCTjYa9nZzMDpyxQ==",
"dev": true,
"dependencies": {
"debug": "2.6.9",
"encodeurl": "~2.0.0",
"escape-html": "~1.0.3",
"on-finished": "2.4.1",
"parseurl": "~1.3.3",
"statuses": "2.0.1",
"unpipe": "~1.0.0"
},
"engines": {
"node": ">= 0.8"
}
},
"node_modules/find-replace": {
"version": "5.0.2",
"resolved": "https://registry.npmjs.org/find-replace/-/find-replace-5.0.2.tgz",
"integrity": "sha512-Y45BAiE3mz2QsrN2fb5QEtO4qb44NcS7en/0y9PEVsg351HsLeVclP8QPMH79Le9sH3rs5RSwJu99W0WPZO43Q==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">=14"
},
"peerDependencies": {
"@75lb/nature": "latest"
},
"peerDependenciesMeta": {
"@75lb/nature": {
"optional": true
}
}
},
"node_modules/find-up": {
"version": "4.1.0",
"resolved": "https://registry.npmjs.org/find-up/-/find-up-4.1.0.tgz",
"integrity": "sha512-PpOwAdQ/YlXQ2vj8a3h8IipDuYRi3wceVQQGYWxNINccq40Anw7BlsEXCMbt1Zt+OLA6Fq9suIpIWD0OsnISlw==",
"dev": true,
"dependencies": {
"locate-path": "^5.0.0",
"path-exists": "^4.0.0"
},
"engines": {
"node": ">=8"
}
},
"node_modules/flatbuffers": {
"version": "25.1.24",
"resolved": "https://registry.npmjs.org/flatbuffers/-/flatbuffers-25.1.24.tgz",
"integrity": "sha512-Ni+KCqYquU30UEgGkrrwpbYtUcUmNuLFcQ5Xdy9DK7WUaji+AAov+Bf12FEYmu0eI15y31oD38utnBexe0cAYA==",
"license": "Apache-2.0"
},
"node_modules/follow-redirects": {
"version": "1.15.6",
"resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.15.6.tgz",
"integrity": "sha512-wWN62YITEaOpSK584EZXJafH1AGpO8RVgElfkuXbTOrPX4fIfOyEpW/CsiNd8JdYrAoOvafRTOEnvsO++qCqFA==",
"dev": true,
"funding": [
{
"type": "individual",
"url": "https://github.com/sponsors/RubenVerborgh"
}
],
"engines": {
"node": ">=4.0"
},
"peerDependenciesMeta": {
"debug": {
"optional": true
}
}
},
"node_modules/foreground-child": {
"version": "3.1.1",
"resolved": "https://registry.npmjs.org/foreground-child/-/foreground-child-3.1.1.tgz",
"integrity": "sha512-TMKDUnIte6bfb5nWv7V/caI169OHgvwjb7V4WkeUvbQQdjr5rWKqHFiKWb/fcOwB+CzBT+qbWjvj+DVwRskpIg==",
"dependencies": {
"cross-spawn": "^7.0.0",
"signal-exit": "^4.0.1"
},
"engines": {
"node": ">=14"
},
"funding": {
"url": "https://github.com/sponsors/isaacs"
}
},
"node_modules/foreground-child/node_modules/signal-exit": {
"version": "4.1.0",
"resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-4.1.0.tgz",
"integrity": "sha512-bzyZ1e88w9O1iNJbKnOlvYTrWPDl46O1bG0D3XInv+9tkPrxrN8jUUTiFlDkkmKWgn1M6CfIA13SuGqOa9Korw==",
"engines": {
"node": ">=14"
},
"funding": {
"url": "https://github.com/sponsors/isaacs"
}
},
"node_modules/forwarded": {
"version": "0.2.0",
"resolved": "https://registry.npmjs.org/forwarded/-/forwarded-0.2.0.tgz",
"integrity": "sha512-buRG0fpBtRHSTCOASe6hD258tEubFoRLb4ZNA6NxMVHNw2gOcwHo9wyablzMzOA5z9xA9L1KNjk/Nt6MT9aYow==",
"dev": true,
"engines": {
"node": ">= 0.6"
}
},
"node_modules/fresh": {
"version": "0.5.2",
"resolved": "https://registry.npmjs.org/fresh/-/fresh-0.5.2.tgz",
"integrity": "sha512-zJ2mQYM18rEFOudeV4GShTGIQ7RbzA7ozbU9I/XBpm7kqgMywgmylMwXHxZJmkVoYkna9d2pVXVXPdYTP9ej8Q==",
"dev": true,
"engines": {
"node": ">= 0.6"
}
},
"node_modules/fs.realpath": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz",
"integrity": "sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==",
"dev": true
},
"node_modules/function-bind": {
"version": "1.1.2",
"resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz",
"integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==",
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/gensync": {
"version": "1.0.0-beta.2",
"resolved": "https://registry.npmjs.org/gensync/-/gensync-1.0.0-beta.2.tgz",
"integrity": "sha512-3hN7NaskYvMDLQY55gnW3NQ+mesEAepTqlg+VEbj7zzqEMBVNhzcGYYeqFo/TlYz6eQiFcp1HcsCZO+nGgS8zg==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">=6.9.0"
}
},
"node_modules/get-caller-file": {
"version": "2.0.5",
"resolved": "https://registry.npmjs.org/get-caller-file/-/get-caller-file-2.0.5.tgz",
"integrity": "sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==",
"dev": true,
"license": "ISC",
"engines": {
"node": "6.* || 8.* || >= 10.*"
}
},
"node_modules/get-intrinsic": {
"version": "1.2.4",
"resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.2.4.tgz",
"integrity": "sha512-5uYhsJH8VJBTv7oslg4BznJYhDoRI6waYCxMmCdnTrcCrHA/fCFKoTFz2JKKE0HdDFUF7/oQuhzumXJK7paBRQ==",
"dependencies": {
"es-errors": "^1.3.0",
"function-bind": "^1.1.2",
"has-proto": "^1.0.1",
"has-symbols": "^1.0.3",
"hasown": "^2.0.0"
},
"engines": {
"node": ">= 0.4"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/get-package-type": {
"version": "0.1.0",
"resolved": "https://registry.npmjs.org/get-package-type/-/get-package-type-0.1.0.tgz",
"integrity": "sha512-pjzuKtY64GYfWizNAJ0fr9VqttZkNiK2iS430LtIHzjBEr6bX8Am2zm4sW4Ro5wjWW5cAlRL1qAMTcXbjNAO2Q==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">=8.0.0"
}
},
"node_modules/get-stream": {
"version": "6.0.1",
"resolved": "https://registry.npmjs.org/get-stream/-/get-stream-6.0.1.tgz",
"integrity": "sha512-ts6Wi+2j3jQjqi70w5AlN8DFnkSwC+MqmxEzdEALB2qXZYV3X/b1CTfgPLGJNMeAWxdPfU8FO1ms3NUfaHCPYg==",
"dev": true,
"engines": {
"node": ">=10"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/glob": {
"version": "7.2.3",
"resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz",
"integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==",
"dev": true,
"dependencies": {
"fs.realpath": "^1.0.0",
"inflight": "^1.0.4",
"inherits": "2",
"minimatch": "^3.1.1",
"once": "^1.3.0",
"path-is-absolute": "^1.0.0"
},
"engines": {
"node": "*"
},
"funding": {
"url": "https://github.com/sponsors/isaacs"
}
},
"node_modules/glob-parent": {
"version": "5.1.2",
"resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz",
"integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==",
"dev": true,
"license": "ISC",
"dependencies": {
"is-glob": "^4.0.1"
},
"engines": {
"node": ">= 6"
}
},
"node_modules/glob-to-regexp": {
"version": "0.4.1",
"resolved": "https://registry.npmjs.org/glob-to-regexp/-/glob-to-regexp-0.4.1.tgz",
"integrity": "sha512-lkX1HJXwyMcprw/5YUZc2s7DrpAiHB21/V+E1rHUrVNokkvB6bqMzT0VfV6/86ZNabt1k14YOIaT7nDvOX3Iiw==",
"dev": true
},
"node_modules/global-agent": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/global-agent/-/global-agent-3.0.0.tgz",
"integrity": "sha512-PT6XReJ+D07JvGoxQMkT6qji/jVNfX/h364XHZOWeRzy64sSFr+xJ5OX7LI3b4MPQzdL4H8Y8M0xzPpsVMwA8Q==",
"license": "BSD-3-Clause",
"dependencies": {
"boolean": "^3.0.1",
"es6-error": "^4.1.1",
"matcher": "^3.0.0",
"roarr": "^2.15.3",
"semver": "^7.3.2",
"serialize-error": "^7.0.1"
},
"engines": {
"node": ">=10.0"
}
},
"node_modules/globals": {
"version": "11.12.0",
"resolved": "https://registry.npmjs.org/globals/-/globals-11.12.0.tgz",
"integrity": "sha512-WOBp/EEGUiIsJSp7wcv/y6MO+lV9UoncWqxuFfm8eBwzWNgyfBd6Gz+IeKQ9jCmyhoH99g15M3T+QaVHFjizVA==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">=4"
}
},
"node_modules/globalthis": {
"version": "1.0.4",
"resolved": "https://registry.npmjs.org/globalthis/-/globalthis-1.0.4.tgz",
"integrity": "sha512-DpLKbNU4WylpxJykQujfCcwYWiV/Jhm50Goo0wrVILAv5jOr9d+H+UR3PhSCD2rCCEIg0uc+G+muBTwD54JhDQ==",
"license": "MIT",
"dependencies": {
"define-properties": "^1.2.1",
"gopd": "^1.0.1"
},
"engines": {
"node": ">= 0.4"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/gopd": {
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/gopd/-/gopd-1.0.1.tgz",
"integrity": "sha512-d65bNlIadxvpb/A2abVdlqKqV563juRnZ1Wtk6s1sIR8uNsXR70xqIzVqxVf1eTqDunwT2MkczEeaezCKTZhwA==",
"dependencies": {
"get-intrinsic": "^1.1.3"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/graceful-fs": {
"version": "4.2.11",
"resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.11.tgz",
"integrity": "sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ==",
"dev": true
},
"node_modules/guid-typescript": {
"version": "1.0.9",
"resolved": "https://registry.npmjs.org/guid-typescript/-/guid-typescript-1.0.9.tgz",
"integrity": "sha512-Y8T4vYhEfwJOTbouREvG+3XDsjr8E3kIr7uf+JZ0BYloFsttiHU0WfvANVsR7TxNUJa/WpCnw/Ino/p+DeBhBQ=="
},
"node_modules/handle-thing": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/handle-thing/-/handle-thing-2.0.1.tgz",
"integrity": "sha512-9Qn4yBxelxoh2Ow62nP+Ka/kMnOXRi8BXnRaUwezLNhqelnN49xKz4F/dPP8OYLxLxq6JDtZb2i9XznUQbNPTg==",
"dev": true
},
"node_modules/handlebars": {
"version": "4.7.8",
"resolved": "https://registry.npmjs.org/handlebars/-/handlebars-4.7.8.tgz",
"integrity": "sha512-vafaFqs8MZkRrSX7sFVUdo3ap/eNiLnb4IakshzvP56X5Nr1iGKAIqdX6tMlm6HcNRIkr6AxO5jFEoJzzpT8aQ==",
"dev": true,
"license": "MIT",
"dependencies": {
"minimist": "^1.2.5",
"neo-async": "^2.6.2",
"source-map": "^0.6.1",
"wordwrap": "^1.0.0"
},
"bin": {
"handlebars": "bin/handlebars"
},
"engines": {
"node": ">=0.4.7"
},
"optionalDependencies": {
"uglify-js": "^3.1.4"
}
},
"node_modules/has": {
"version": "1.0.3",
"resolved": "https://registry.npmjs.org/has/-/has-1.0.3.tgz",
"integrity": "sha512-f2dvO0VU6Oej7RkWJGrehjbzMAjFp5/VKPp5tTpWIV4JHHZK1/BxbFRtf/siA2SWTe09caDmVtYYzWEIbBS4zw==",
"dev": true,
"dependencies": {
"function-bind": "^1.1.1"
},
"engines": {
"node": ">= 0.4.0"
}
},
"node_modules/has-flag": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz",
"integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==",
"dev": true,
"engines": {
"node": ">=8"
}
},
"node_modules/has-property-descriptors": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/has-property-descriptors/-/has-property-descriptors-1.0.2.tgz",
"integrity": "sha512-55JNKuIW+vq4Ke1BjOTjM2YctQIvCT7GFzHwmfZPGo5wnrgkid0YQtnAleFSqumZm4az3n2BS+erby5ipJdgrg==",
"dependencies": {
"es-define-property": "^1.0.0"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/has-proto": {
"version": "1.0.3",
"resolved": "https://registry.npmjs.org/has-proto/-/has-proto-1.0.3.tgz",
"integrity": "sha512-SJ1amZAJUiZS+PhsVLf5tGydlaVB8EdFpaSO4gmiUKUOxk8qzn5AIy4ZeJUmh22znIdk/uMAUT2pl3FxzVUH+Q==",
"engines": {
"node": ">= 0.4"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/has-symbols": {
"version": "1.0.3",
"resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.0.3.tgz",
"integrity": "sha512-l3LCuF6MgDNwTDKkdYGEihYjt5pRPbEg46rtlmnSPlUbgmB8LOIrKJbYYFBSbnPaJexMKtiPO8hmeRjRz2Td+A==",
"engines": {
"node": ">= 0.4"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/hasown": {
"version": "2.0.2",
"resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.2.tgz",
"integrity": "sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==",
"dependencies": {
"function-bind": "^1.1.2"
},
"engines": {
"node": ">= 0.4"
}
},
"node_modules/hpack.js": {
"version": "2.1.6",
"resolved": "https://registry.npmjs.org/hpack.js/-/hpack.js-2.1.6.tgz",
"integrity": "sha512-zJxVehUdMGIKsRaNt7apO2Gqp0BdqW5yaiGHXXmbpvxgBYVZnAql+BJb4RO5ad2MgpbZKn5G6nMnegrH1FcNYQ==",
"dev": true,
"dependencies": {
"inherits": "^2.0.1",
"obuf": "^1.0.0",
"readable-stream": "^2.0.1",
"wbuf": "^1.1.0"
}
},
"node_modules/hpack.js/node_modules/readable-stream": {
"version": "2.3.8",
"resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.8.tgz",
"integrity": "sha512-8p0AUk4XODgIewSi0l8Epjs+EVnWiK7NoDIEGU0HhE7+ZyY8D1IMY7odu5lRrFXGg71L15KG8QrPmum45RTtdA==",
"dev": true,
"dependencies": {
"core-util-is": "~1.0.0",
"inherits": "~2.0.3",
"isarray": "~1.0.0",
"process-nextick-args": "~2.0.0",
"safe-buffer": "~5.1.1",
"string_decoder": "~1.1.1",
"util-deprecate": "~1.0.1"
}
},
"node_modules/hpack.js/node_modules/safe-buffer": {
"version": "5.1.2",
"resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz",
"integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==",
"dev": true
},
"node_modules/hpack.js/node_modules/string_decoder": {
"version": "1.1.1",
"resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz",
"integrity": "sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==",
"dev": true,
"dependencies": {
"safe-buffer": "~5.1.0"
}
},
"node_modules/html-escaper": {
"version": "2.0.2",
"resolved": "https://registry.npmjs.org/html-escaper/-/html-escaper-2.0.2.tgz",
"integrity": "sha512-H2iMtd0I4Mt5eYiapRdIDjp+XzelXQ0tFE4JS7YFwFevXXMmOp9myNrUvCg0D6ws8iqkRPBfKHgbwig1SmlLfg==",
"dev": true,
"license": "MIT"
},
"node_modules/http-deceiver": {
"version": "1.2.7",
"resolved": "https://registry.npmjs.org/http-deceiver/-/http-deceiver-1.2.7.tgz",
"integrity": "sha512-LmpOGxTfbpgtGVxJrj5k7asXHCgNZp5nLfp+hWc8QQRqtb7fUy6kRY3BO1h9ddF6yIPYUARgxGOwB42DnxIaNw==",
"dev": true
},
"node_modules/http-errors": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/http-errors/-/http-errors-2.0.0.tgz",
"integrity": "sha512-FtwrG/euBzaEjYeRqOgly7G0qviiXoJWnvEH2Z1plBdXgbyjv34pHTSb9zoeHMyDy33+DWy5Wt9Wo+TURtOYSQ==",
"dev": true,
"dependencies": {
"depd": "2.0.0",
"inherits": "2.0.4",
"setprototypeof": "1.2.0",
"statuses": "2.0.1",
"toidentifier": "1.0.1"
},
"engines": {
"node": ">= 0.8"
}
},
"node_modules/http-parser-js": {
"version": "0.5.8",
"resolved": "https://registry.npmjs.org/http-parser-js/-/http-parser-js-0.5.8.tgz",
"integrity": "sha512-SGeBX54F94Wgu5RH3X5jsDtf4eHyRogWX1XGT3b4HuW3tQPM4AaBzoUji/4AAJNXCEOWZ5O0DgZmJw1947gD5Q==",
"dev": true
},
"node_modules/http-proxy": {
"version": "1.18.1",
"resolved": "https://registry.npmjs.org/http-proxy/-/http-proxy-1.18.1.tgz",
"integrity": "sha512-7mz/721AbnJwIVbnaSv1Cz3Am0ZLT/UBwkC92VlxhXv/k/BBQfM2fXElQNC27BVGr0uwUpplYPQM9LnaBMR5NQ==",
"dev": true,
"dependencies": {
"eventemitter3": "^4.0.0",
"follow-redirects": "^1.0.0",
"requires-port": "^1.0.0"
},
"engines": {
"node": ">=8.0.0"
}
},
"node_modules/http-proxy-middleware": {
"version": "2.0.9",
"resolved": "https://registry.npmjs.org/http-proxy-middleware/-/http-proxy-middleware-2.0.9.tgz",
"integrity": "sha512-c1IyJYLYppU574+YI7R4QyX2ystMtVXZwIdzazUIPIJsHuWNd+mho2j+bKoHftndicGj9yh+xjd+l0yj7VeT1Q==",
"dev": true,
"license": "MIT",
"dependencies": {
"@types/http-proxy": "^1.17.8",
"http-proxy": "^1.18.1",
"is-glob": "^4.0.1",
"is-plain-obj": "^3.0.0",
"micromatch": "^4.0.2"
},
"engines": {
"node": ">=12.0.0"
},
"peerDependencies": {
"@types/express": "^4.17.13"
},
"peerDependenciesMeta": {
"@types/express": {
"optional": true
}
}
},
"node_modules/human-signals": {
"version": "2.1.0",
"resolved": "https://registry.npmjs.org/human-signals/-/human-signals-2.1.0.tgz",
"integrity": "sha512-B4FFZ6q/T2jhhksgkbEW3HBvWIfDW85snkQgawt07S7J5QXTk6BkNV+0yAeZrM5QpMAdYlocGoljn0sJ/WQkFw==",
"dev": true,
"engines": {
"node": ">=10.17.0"
}
},
"node_modules/hyperdyperid": {
"version": "1.2.0",
"resolved": "https://registry.npmjs.org/hyperdyperid/-/hyperdyperid-1.2.0.tgz",
"integrity": "sha512-Y93lCzHYgGWdrJ66yIktxiaGULYc6oGiABxhcO5AufBeOyoIdZF7bIfLaOrbM0iGIOXQQgxxRrFEnb+Y6w1n4A==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">=10.18"
}
},
"node_modules/iconv-lite": {
"version": "0.4.24",
"resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.4.24.tgz",
"integrity": "sha512-v3MXnZAcvnywkTUEZomIActle7RXXeedOR31wwl7VlyoXO4Qi9arvSenNQWne1TcRwhCL1HwLI21bEqdpj8/rA==",
"dev": true,
"dependencies": {
"safer-buffer": ">= 2.1.2 < 3"
},
"engines": {
"node": ">=0.10.0"
}
},
"node_modules/import-local": {
"version": "3.1.0",
"resolved": "https://registry.npmjs.org/import-local/-/import-local-3.1.0.tgz",
"integrity": "sha512-ASB07uLtnDs1o6EHjKpX34BKYDSqnFerfTOJL2HvMqF70LnxpjkzDB8J44oT9pu4AMPkQwf8jl6szgvNd2tRIg==",
"dev": true,
"dependencies": {
"pkg-dir": "^4.2.0",
"resolve-cwd": "^3.0.0"
},
"bin": {
"import-local-fixture": "fixtures/cli.js"
},
"engines": {
"node": ">=8"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/imurmurhash": {
"version": "0.1.4",
"resolved": "https://registry.npmjs.org/imurmurhash/-/imurmurhash-0.1.4.tgz",
"integrity": "sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">=0.8.19"
}
},
"node_modules/inflight": {
"version": "1.0.6",
"resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz",
"integrity": "sha512-k92I/b08q4wvFscXCLvqfsHCrjrF7yiXsQuIVvVE7N82W3+aqpzuUdBbfhWcy/FZR3/4IgflMgKLOsvPDrGCJA==",
"dev": true,
"dependencies": {
"once": "^1.3.0",
"wrappy": "1"
}
},
"node_modules/inherits": {
"version": "2.0.4",
"resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz",
"integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==",
"dev": true
},
"node_modules/interpret": {
"version": "3.1.1",
"resolved": "https://registry.npmjs.org/interpret/-/interpret-3.1.1.tgz",
"integrity": "sha512-6xwYfHbajpoF0xLW+iwLkhwgvLoZDfjYfoFNu8ftMoXINzwuymNLd9u/KmwtdT2GbR+/Cz66otEGEVVUHX9QLQ==",
"dev": true,
"engines": {
"node": ">=10.13.0"
}
},
"node_modules/ipaddr.js": {
"version": "2.2.0",
"resolved": "https://registry.npmjs.org/ipaddr.js/-/ipaddr.js-2.2.0.tgz",
"integrity": "sha512-Ag3wB2o37wslZS19hZqorUnrnzSkpOVy+IiiDEiTqNubEYpYuHWIf6K4psgN2ZWKExS4xhVCrRVfb/wfW8fWJA==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">= 10"
}
},
"node_modules/is-arrayish": {
"version": "0.3.2",
"resolved": "https://registry.npmjs.org/is-arrayish/-/is-arrayish-0.3.2.tgz",
"integrity": "sha512-eVRqCvVlZbuw3GrM63ovNSNAeA1K16kaR/LRY/92w0zxQ5/1YzwblUX652i4Xs9RwAGjW9d9y6X88t8OaAJfWQ=="
},
"node_modules/is-binary-path": {
"version": "2.1.0",
"resolved": "https://registry.npmjs.org/is-binary-path/-/is-binary-path-2.1.0.tgz",
"integrity": "sha512-ZMERYes6pDydyuGidse7OsHxtbI7WVeUEozgR/g7rd0xUimYNlvZRE/K2MgZTjWy725IfelLeVcEM97mmtRGXw==",
"dev": true,
"license": "MIT",
"dependencies": {
"binary-extensions": "^2.0.0"
},
"engines": {
"node": ">=8"
}
},
"node_modules/is-core-module": {
"version": "2.12.0",
"resolved": "https://registry.npmjs.org/is-core-module/-/is-core-module-2.12.0.tgz",
"integrity": "sha512-RECHCBCd/viahWmwj6enj19sKbHfJrddi/6cBDsNTKbNq0f7VeaUkBo60BqzvPqo/W54ChS62Z5qyun7cfOMqQ==",
"dev": true,
"dependencies": {
"has": "^1.0.3"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/is-docker": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/is-docker/-/is-docker-3.0.0.tgz",
"integrity": "sha512-eljcgEDlEns/7AXFosB5K/2nCM4P7FQPkGc/DWLy5rmFEWvZayGrik1d9/QIY5nJ4f9YsVvBkA6kJpHn9rISdQ==",
"dev": true,
"license": "MIT",
"bin": {
"is-docker": "cli.js"
},
"engines": {
"node": "^12.20.0 || ^14.13.1 || >=16.0.0"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/is-extglob": {
"version": "2.1.1",
"resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz",
"integrity": "sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==",
"dev": true,
"engines": {
"node": ">=0.10.0"
}
},
"node_modules/is-fullwidth-code-point": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz",
"integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==",
"engines": {
"node": ">=8"
}
},
"node_modules/is-generator-fn": {
"version": "2.1.0",
"resolved": "https://registry.npmjs.org/is-generator-fn/-/is-generator-fn-2.1.0.tgz",
"integrity": "sha512-cTIB4yPYL/Grw0EaSzASzg6bBy9gqCofvWN8okThAYIxKJZC+udlRAmGbM0XLeniEJSs8uEgHPGuHSe1XsOLSQ==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">=6"
}
},
"node_modules/is-glob": {
"version": "4.0.3",
"resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.3.tgz",
"integrity": "sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==",
"dev": true,
"dependencies": {
"is-extglob": "^2.1.1"
},
"engines": {
"node": ">=0.10.0"
}
},
"node_modules/is-inside-container": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/is-inside-container/-/is-inside-container-1.0.0.tgz",
"integrity": "sha512-KIYLCCJghfHZxqjYBE7rEy0OBuTd5xCHS7tHVgvCLkx7StIoaxwNW3hCALgEUjFfeRk+MG/Qxmp/vtETEF3tRA==",
"dev": true,
"license": "MIT",
"dependencies": {
"is-docker": "^3.0.0"
},
"bin": {
"is-inside-container": "cli.js"
},
"engines": {
"node": ">=14.16"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/is-network-error": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/is-network-error/-/is-network-error-1.1.0.tgz",
"integrity": "sha512-tUdRRAnhT+OtCZR/LxZelH/C7QtjtFrTu5tXCA8pl55eTUElUHT+GPYV8MBMBvea/j+NxQqVt3LbWMRir7Gx9g==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">=16"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/is-number": {
"version": "7.0.0",
"resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz",
"integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==",
"dev": true,
"engines": {
"node": ">=0.12.0"
}
},
"node_modules/is-plain-obj": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-3.0.0.tgz",
"integrity": "sha512-gwsOE28k+23GP1B6vFl1oVh/WOzmawBrKwo5Ev6wMKzPkaXaCDIQKzLnvsA42DRlbVTWorkgTKIviAKCWkfUwA==",
"dev": true,
"engines": {
"node": ">=10"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/is-plain-object": {
"version": "2.0.4",
"resolved": "https://registry.npmjs.org/is-plain-object/-/is-plain-object-2.0.4.tgz",
"integrity": "sha512-h5PpgXkWitc38BBMYawTYMWJHFZJVnBquFE57xFpjB8pJFiF6gZ+bU+WyI/yqXiFR5mdLsgYNaPe8uao6Uv9Og==",
"dev": true,
"dependencies": {
"isobject": "^3.0.1"
},
"engines": {
"node": ">=0.10.0"
}
},
"node_modules/is-stream": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/is-stream/-/is-stream-2.0.1.tgz",
"integrity": "sha512-hFoiJiTl63nn+kstHGBtewWSKnQLpyb155KHheA1l39uvtO9nWIop1p3udqPcUd/xbF1VLMO4n7OI6p7RbngDg==",
"dev": true,
"engines": {
"node": ">=8"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/is-wsl": {
"version": "3.1.0",
"resolved": "https://registry.npmjs.org/is-wsl/-/is-wsl-3.1.0.tgz",
"integrity": "sha512-UcVfVfaK4Sc4m7X3dUSoHoozQGBEFeDC+zVo06t98xe8CzHSZZBekNXH+tu0NalHolcJ/QAGqS46Hef7QXBIMw==",
"dev": true,
"license": "MIT",
"dependencies": {
"is-inside-container": "^1.0.0"
},
"engines": {
"node": ">=16"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/isarray": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/isarray/-/isarray-1.0.0.tgz",
"integrity": "sha512-VLghIWNM6ELQzo7zwmcg0NmTVyWKYjvIeM83yjp0wRDTmUnrM678fQbcKBo6n2CJEF0szoG//ytg+TKla89ALQ==",
"dev": true
},
"node_modules/isexe": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz",
"integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw=="
},
"node_modules/isobject": {
"version": "3.0.1",
"resolved": "https://registry.npmjs.org/isobject/-/isobject-3.0.1.tgz",
"integrity": "sha512-WhB9zCku7EGTj/HQQRz5aUQEUeoQZH2bWcltRErOpymJ4boYE6wL9Tbr23krRPSZ+C5zqNSrSw+Cc7sZZ4b7vg==",
"dev": true,
"engines": {
"node": ">=0.10.0"
}
},
"node_modules/istanbul-lib-coverage": {
"version": "3.2.2",
"resolved": "https://registry.npmjs.org/istanbul-lib-coverage/-/istanbul-lib-coverage-3.2.2.tgz",
"integrity": "sha512-O8dpsF+r0WV/8MNRKfnmrtCWhuKjxrq2w+jpzBL5UZKTi2LeVWnWOmWRxFlesJONmc+wLAGvKQZEOanko0LFTg==",
"dev": true,
"license": "BSD-3-Clause",
"engines": {
"node": ">=8"
}
},
"node_modules/istanbul-lib-instrument": {
"version": "6.0.3",
"resolved": "https://registry.npmjs.org/istanbul-lib-instrument/-/istanbul-lib-instrument-6.0.3.tgz",
"integrity": "sha512-Vtgk7L/R2JHyyGW07spoFlB8/lpjiOLTjMdms6AFMraYt3BaJauod/NGrfnVG/y4Ix1JEuMRPDPEj2ua+zz1/Q==",
"dev": true,
"license": "BSD-3-Clause",
"dependencies": {
"@babel/core": "^7.23.9",
"@babel/parser": "^7.23.9",
"@istanbuljs/schema": "^0.1.3",
"istanbul-lib-coverage": "^3.2.0",
"semver": "^7.5.4"
},
"engines": {
"node": ">=10"
}
},
"node_modules/istanbul-lib-report": {
"version": "3.0.1",
"resolved": "https://registry.npmjs.org/istanbul-lib-report/-/istanbul-lib-report-3.0.1.tgz",
"integrity": "sha512-GCfE1mtsHGOELCU8e/Z7YWzpmybrx/+dSTfLrvY8qRmaY6zXTKWn6WQIjaAFw069icm6GVMNkgu0NzI4iPZUNw==",
"dev": true,
"license": "BSD-3-Clause",
"dependencies": {
"istanbul-lib-coverage": "^3.0.0",
"make-dir": "^4.0.0",
"supports-color": "^7.1.0"
},
"engines": {
"node": ">=10"
}
},
"node_modules/istanbul-lib-report/node_modules/supports-color": {
"version": "7.2.0",
"resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz",
"integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==",
"dev": true,
"license": "MIT",
"dependencies": {
"has-flag": "^4.0.0"
},
"engines": {
"node": ">=8"
}
},
"node_modules/istanbul-lib-source-maps": {
"version": "5.0.6",
"resolved": "https://registry.npmjs.org/istanbul-lib-source-maps/-/istanbul-lib-source-maps-5.0.6.tgz",
"integrity": "sha512-yg2d+Em4KizZC5niWhQaIomgf5WlL4vOOjZ5xGCmF8SnPE/mDWWXgvRExdcpCgh9lLRRa1/fSYp2ymmbJ1pI+A==",
"dev": true,
"license": "BSD-3-Clause",
"dependencies": {
"@jridgewell/trace-mapping": "^0.3.23",
"debug": "^4.1.1",
"istanbul-lib-coverage": "^3.0.0"
},
"engines": {
"node": ">=10"
}
},
"node_modules/istanbul-lib-source-maps/node_modules/debug": {
"version": "4.3.7",
"resolved": "https://registry.npmjs.org/debug/-/debug-4.3.7.tgz",
"integrity": "sha512-Er2nc/H7RrMXZBFCEim6TCmMk02Z8vLC2Rbi1KEBggpo0fS6l0S1nnapwmIi3yW/+GOJap1Krg4w0Hg80oCqgQ==",
"dev": true,
"license": "MIT",
"dependencies": {
"ms": "^2.1.3"
},
"engines": {
"node": ">=6.0"
},
"peerDependenciesMeta": {
"supports-color": {
"optional": true
}
}
},
"node_modules/istanbul-lib-source-maps/node_modules/ms": {
"version": "2.1.3",
"resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz",
"integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==",
"dev": true,
"license": "MIT"
},
"node_modules/istanbul-reports": {
"version": "3.1.7",
"resolved": "https://registry.npmjs.org/istanbul-reports/-/istanbul-reports-3.1.7.tgz",
"integrity": "sha512-BewmUXImeuRk2YY0PVbxgKAysvhRPUQE0h5QRM++nVWyubKGV0l8qQ5op8+B2DOmwSe63Jivj0BjkPQVf8fP5g==",
"dev": true,
"license": "BSD-3-Clause",
"dependencies": {
"html-escaper": "^2.0.0",
"istanbul-lib-report": "^3.0.0"
},
"engines": {
"node": ">=8"
}
},
"node_modules/jackspeak": {
"version": "3.1.2",
"resolved": "https://registry.npmjs.org/jackspeak/-/jackspeak-3.1.2.tgz",
"integrity": "sha512-kWmLKn2tRtfYMF/BakihVVRzBKOxz4gJMiL2Rj91WnAB5TPZumSH99R/Yf1qE1u4uRimvCSJfm6hnxohXeEXjQ==",
"dependencies": {
"@isaacs/cliui": "^8.0.2"
},
"engines": {
"node": ">=14"
},
"funding": {
"url": "https://github.com/sponsors/isaacs"
},
"optionalDependencies": {
"@pkgjs/parseargs": "^0.11.0"
}
},
"node_modules/jest": {
"version": "30.0.0-alpha.6",
"resolved": "https://registry.npmjs.org/jest/-/jest-30.0.0-alpha.6.tgz",
"integrity": "sha512-9T3nAcIAcEpCX2MdxcjG2IDfG/0tjumnCkVNGh+AKkRXcWF4Er5jLROKvXsgXUJCmr/nMqLF6LG0GrDJ0kjFag==",
"dev": true,
"license": "MIT",
"dependencies": {
"@jest/core": "30.0.0-alpha.6",
"@jest/types": "30.0.0-alpha.6",
"import-local": "^3.0.2",
"jest-cli": "30.0.0-alpha.6"
},
"bin": {
"jest": "bin/jest.js"
},
"engines": {
"node": "^16.10.0 || ^18.12.0 || >=20.0.0"
},
"peerDependencies": {
"node-notifier": "^8.0.1 || ^9.0.0 || ^10.0.0"
},
"peerDependenciesMeta": {
"node-notifier": {
"optional": true
}
}
},
"node_modules/jest-changed-files": {
"version": "30.0.0-alpha.6",
"resolved": "https://registry.npmjs.org/jest-changed-files/-/jest-changed-files-30.0.0-alpha.6.tgz",
"integrity": "sha512-Fmyt6W27L4fRBl/gReFC4WU+3XIqB7ySHu+a9QxrERapfCb43o7y81TCvTwJHSw5dxGzXLOObVB0tRMDWMafnw==",
"dev": true,
"license": "MIT",
"dependencies": {
"execa": "^5.0.0",
"jest-util": "30.0.0-alpha.6",
"p-limit": "^3.1.0"
},
"engines": {
"node": "^16.10.0 || ^18.12.0 || >=20.0.0"
}
},
"node_modules/jest-changed-files/node_modules/@jest/schemas": {
"version": "30.0.0-alpha.6",
"resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-30.0.0-alpha.6.tgz",
"integrity": "sha512-Ukr3kR/VsBq8+JHU92xArhSJeFQHVHs5T1laPO00GrrNzv3DvoHn3/EVVagGn9CHbLeAyJHXFRHYxq3+520kiA==",
"dev": true,
"license": "MIT",
"dependencies": {
"@sinclair/typebox": "^0.33.0"
},
"engines": {
"node": "^16.10.0 || ^18.12.0 || >=20.0.0"
}
},
"node_modules/jest-changed-files/node_modules/@jest/types": {
"version": "30.0.0-alpha.6",
"resolved": "https://registry.npmjs.org/@jest/types/-/types-30.0.0-alpha.6.tgz",
"integrity": "sha512-qUjAm8uvIR7oExn/Fp7/bvn58HSZng5itQDM9x0vaxXWxxGH/8MDmqX/h7OUBz9ka+KfYRaTxe4Y6wiM8+nphw==",
"dev": true,
"license": "MIT",
"dependencies": {
"@jest/pattern": "30.0.0-alpha.6",
"@jest/schemas": "30.0.0-alpha.6",
"@types/istanbul-lib-coverage": "^2.0.0",
"@types/istanbul-reports": "^3.0.0",
"@types/node": "*",
"@types/yargs": "^17.0.8",
"chalk": "^4.0.0"
},
"engines": {
"node": "^16.10.0 || ^18.12.0 || >=20.0.0"
}
},
"node_modules/jest-changed-files/node_modules/@sinclair/typebox": {
"version": "0.33.17",
"resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.33.17.tgz",
"integrity": "sha512-75232GRx3wp3P7NP+yc4nRK3XUAnaQShxTAzapgmQrgs0QvSq0/mOJGoZXRpH15cFCKyys+4laCPbBselqJ5Ag==",
"dev": true,
"license": "MIT"
},
"node_modules/jest-changed-files/node_modules/ci-info": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/ci-info/-/ci-info-4.0.0.tgz",
"integrity": "sha512-TdHqgGf9odd8SXNuxtUBVx8Nv+qZOejE6qyqiy5NtbYYQOeFa6zmHkxlPzmaLxWWHsU6nJmB7AETdVPi+2NBUg==",
"dev": true,
"funding": [
{
"type": "github",
"url": "https://github.com/sponsors/sibiraj-s"
}
],
"license": "MIT",
"engines": {
"node": ">=8"
}
},
"node_modules/jest-changed-files/node_modules/jest-util": {
"version": "30.0.0-alpha.6",
"resolved": "https://registry.npmjs.org/jest-util/-/jest-util-30.0.0-alpha.6.tgz",
"integrity": "sha512-JlimakOVDyoMC8TEG+knoufxUqLG+Btihf1G8o5sHxz54C6oL54Wetfepp+Nhuj/1hSL0sQtkovvjlEycf9i0w==",
"dev": true,
"license": "MIT",
"dependencies": {
"@jest/types": "30.0.0-alpha.6",
"@types/node": "*",
"chalk": "^4.0.0",
"ci-info": "^4.0.0",
"graceful-fs": "^4.2.9",
"picomatch": "^4.0.0"
},
"engines": {
"node": "^16.10.0 || ^18.12.0 || >=20.0.0"
}
},
"node_modules/jest-changed-files/node_modules/p-limit": {
"version": "3.1.0",
"resolved": "https://registry.npmjs.org/p-limit/-/p-limit-3.1.0.tgz",
"integrity": "sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==",
"dev": true,
"license": "MIT",
"dependencies": {
"yocto-queue": "^0.1.0"
},
"engines": {
"node": ">=10"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/jest-changed-files/node_modules/picomatch": {
"version": "4.0.2",
"resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.2.tgz",
"integrity": "sha512-M7BAV6Rlcy5u+m6oPhAPFgJTzAioX/6B0DxyvDlo9l8+T3nLKbrczg2WLUyzd45L8RqfUMyGPzekbMvX2Ldkwg==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">=12"
},
"funding": {
"url": "https://github.com/sponsors/jonschlinkert"
}
},
"node_modules/jest-circus": {
"version": "30.0.0-alpha.6",
"resolved": "https://registry.npmjs.org/jest-circus/-/jest-circus-30.0.0-alpha.6.tgz",
"integrity": "sha512-1C62WeTyWinn6zR61syYKe5yqVbV+ftf21vOgj8AtTxGfMUAlGCpeZ5zh4Kc9Qk7r/PiPiHWZtgZmeT4oe9Dug==",
"dev": true,
"license": "MIT",
"dependencies": {
"@jest/environment": "30.0.0-alpha.6",
"@jest/expect": "30.0.0-alpha.6",
"@jest/test-result": "30.0.0-alpha.6",
"@jest/types": "30.0.0-alpha.6",
"@types/node": "*",
"chalk": "^4.0.0",
"co": "^4.6.0",
"dedent": "^1.0.0",
"is-generator-fn": "^2.0.0",
"jest-each": "30.0.0-alpha.6",
"jest-matcher-utils": "30.0.0-alpha.6",
"jest-message-util": "30.0.0-alpha.6",
"jest-runtime": "30.0.0-alpha.6",
"jest-snapshot": "30.0.0-alpha.6",
"jest-util": "30.0.0-alpha.6",
"p-limit": "^3.1.0",
"pretty-format": "30.0.0-alpha.6",
"pure-rand": "^6.0.0",
"slash": "^3.0.0",
"stack-utils": "^2.0.3"
},
"engines": {
"node": "^16.10.0 || ^18.12.0 || >=20.0.0"
}
},
"node_modules/jest-circus/node_modules/@jest/schemas": {
"version": "30.0.0-alpha.6",
"resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-30.0.0-alpha.6.tgz",
"integrity": "sha512-Ukr3kR/VsBq8+JHU92xArhSJeFQHVHs5T1laPO00GrrNzv3DvoHn3/EVVagGn9CHbLeAyJHXFRHYxq3+520kiA==",
"dev": true,
"license": "MIT",
"dependencies": {
"@sinclair/typebox": "^0.33.0"
},
"engines": {
"node": "^16.10.0 || ^18.12.0 || >=20.0.0"
}
},
"node_modules/jest-circus/node_modules/@jest/types": {
"version": "30.0.0-alpha.6",
"resolved": "https://registry.npmjs.org/@jest/types/-/types-30.0.0-alpha.6.tgz",
"integrity": "sha512-qUjAm8uvIR7oExn/Fp7/bvn58HSZng5itQDM9x0vaxXWxxGH/8MDmqX/h7OUBz9ka+KfYRaTxe4Y6wiM8+nphw==",
"dev": true,
"license": "MIT",
"dependencies": {
"@jest/pattern": "30.0.0-alpha.6",
"@jest/schemas": "30.0.0-alpha.6",
"@types/istanbul-lib-coverage": "^2.0.0",
"@types/istanbul-reports": "^3.0.0",
"@types/node": "*",
"@types/yargs": "^17.0.8",
"chalk": "^4.0.0"
},
"engines": {
"node": "^16.10.0 || ^18.12.0 || >=20.0.0"
}
},
"node_modules/jest-circus/node_modules/@sinclair/typebox": {
"version": "0.33.17",
"resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.33.17.tgz",
"integrity": "sha512-75232GRx3wp3P7NP+yc4nRK3XUAnaQShxTAzapgmQrgs0QvSq0/mOJGoZXRpH15cFCKyys+4laCPbBselqJ5Ag==",
"dev": true,
"license": "MIT"
},
"node_modules/jest-circus/node_modules/ansi-styles": {
"version": "5.2.0",
"resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-5.2.0.tgz",
"integrity": "sha512-Cxwpt2SfTzTtXcfOlzGEee8O+c+MmUgGrNiBcXnuWxuFJHe6a5Hz7qwhwe5OgaSYI0IJvkLqWX1ASG+cJOkEiA==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">=10"
},
"funding": {
"url": "https://github.com/chalk/ansi-styles?sponsor=1"
}
},
"node_modules/jest-circus/node_modules/ci-info": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/ci-info/-/ci-info-4.0.0.tgz",
"integrity": "sha512-TdHqgGf9odd8SXNuxtUBVx8Nv+qZOejE6qyqiy5NtbYYQOeFa6zmHkxlPzmaLxWWHsU6nJmB7AETdVPi+2NBUg==",
"dev": true,
"funding": [
{
"type": "github",
"url": "https://github.com/sponsors/sibiraj-s"
}
],
"license": "MIT",
"engines": {
"node": ">=8"
}
},
"node_modules/jest-circus/node_modules/diff-sequences": {
"version": "30.0.0-alpha.6",
"resolved": "https://registry.npmjs.org/diff-sequences/-/diff-sequences-30.0.0-alpha.6.tgz",
"integrity": "sha512-DVGt3/yzbneMUTuupsMqyfSXMnU2fE0lVsC9uFsJmRpluvSi7ZhrS0GX5tnMna6Ta788FGfOUx+irI/+cAZ4EA==",
"dev": true,
"license": "MIT",
"engines": {
"node": "^16.10.0 || ^18.12.0 || >=20.0.0"
}
},
"node_modules/jest-circus/node_modules/jest-diff": {
"version": "30.0.0-alpha.6",
"resolved": "https://registry.npmjs.org/jest-diff/-/jest-diff-30.0.0-alpha.6.tgz",
"integrity": "sha512-43j1DoYwVKrkbB67a2gC0ijjIY9biF0JSPXv7H6zlOkzNlqYg8hSDzrurLNo6zGKatW4JSBLE79LmXPJPj1m6A==",
"dev": true,
"license": "MIT",
"dependencies": {
"chalk": "^4.0.0",
"diff-sequences": "30.0.0-alpha.6",
"jest-get-type": "30.0.0-alpha.6",
"pretty-format": "30.0.0-alpha.6"
},
"engines": {
"node": "^16.10.0 || ^18.12.0 || >=20.0.0"
}
},
"node_modules/jest-circus/node_modules/jest-get-type": {
"version": "30.0.0-alpha.6",
"resolved": "https://registry.npmjs.org/jest-get-type/-/jest-get-type-30.0.0-alpha.6.tgz",
"integrity": "sha512-lJEoQdCY4ICN6+T0lJ9BODKuqPOEpCv2NnJsEO1nmsK0fbWZmN/pgOPHVqLfK8i3jZpUmgupJ1w8r36mc8iiBQ==",
"dev": true,
"license": "MIT",
"engines": {
"node": "^16.10.0 || ^18.12.0 || >=20.0.0"
}
},
"node_modules/jest-circus/node_modules/jest-matcher-utils": {
"version": "30.0.0-alpha.6",
"resolved": "https://registry.npmjs.org/jest-matcher-utils/-/jest-matcher-utils-30.0.0-alpha.6.tgz",
"integrity": "sha512-jaq7+HznsK54G0qzu96ZwfMEKHmlPiDqg6qG2p/hVQzr6Y/qVMRh8abI9Y1lX6SSXkr+S9mPAkmOsuJNLTLYmQ==",
"dev": true,
"license": "MIT",
"dependencies": {
"chalk": "^4.0.0",
"jest-diff": "30.0.0-alpha.6",
"jest-get-type": "30.0.0-alpha.6",
"pretty-format": "30.0.0-alpha.6"
},
"engines": {
"node": "^16.10.0 || ^18.12.0 || >=20.0.0"
}
},
"node_modules/jest-circus/node_modules/jest-message-util": {
"version": "30.0.0-alpha.6",
"resolved": "https://registry.npmjs.org/jest-message-util/-/jest-message-util-30.0.0-alpha.6.tgz",
"integrity": "sha512-XAGJqkrBo7m3bFxWqiNqL0PyAWGf1XHR6bTve90MjBKJuIzhJsounGTzBNUw8JoU7Uzcj5Z6ZmEhaE3CDnwjfw==",
"dev": true,
"license": "MIT",
"dependencies": {
"@babel/code-frame": "^7.12.13",
"@jest/types": "30.0.0-alpha.6",
"@types/stack-utils": "^2.0.0",
"chalk": "^4.0.0",
"graceful-fs": "^4.2.9",
"micromatch": "^4.0.7",
"pretty-format": "30.0.0-alpha.6",
"slash": "^3.0.0",
"stack-utils": "^2.0.3"
},
"engines": {
"node": "^16.10.0 || ^18.12.0 || >=20.0.0"
}
},
"node_modules/jest-circus/node_modules/jest-util": {
"version": "30.0.0-alpha.6",
"resolved": "https://registry.npmjs.org/jest-util/-/jest-util-30.0.0-alpha.6.tgz",
"integrity": "sha512-JlimakOVDyoMC8TEG+knoufxUqLG+Btihf1G8o5sHxz54C6oL54Wetfepp+Nhuj/1hSL0sQtkovvjlEycf9i0w==",
"dev": true,
"license": "MIT",
"dependencies": {
"@jest/types": "30.0.0-alpha.6",
"@types/node": "*",
"chalk": "^4.0.0",
"ci-info": "^4.0.0",
"graceful-fs": "^4.2.9",
"picomatch": "^4.0.0"
},
"engines": {
"node": "^16.10.0 || ^18.12.0 || >=20.0.0"
}
},
"node_modules/jest-circus/node_modules/p-limit": {
"version": "3.1.0",
"resolved": "https://registry.npmjs.org/p-limit/-/p-limit-3.1.0.tgz",
"integrity": "sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==",
"dev": true,
"license": "MIT",
"dependencies": {
"yocto-queue": "^0.1.0"
},
"engines": {
"node": ">=10"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/jest-circus/node_modules/picomatch": {
"version": "4.0.2",
"resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.2.tgz",
"integrity": "sha512-M7BAV6Rlcy5u+m6oPhAPFgJTzAioX/6B0DxyvDlo9l8+T3nLKbrczg2WLUyzd45L8RqfUMyGPzekbMvX2Ldkwg==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">=12"
},
"funding": {
"url": "https://github.com/sponsors/jonschlinkert"
}
},
"node_modules/jest-circus/node_modules/pretty-format": {
"version": "30.0.0-alpha.6",
"resolved": "https://registry.npmjs.org/pretty-format/-/pretty-format-30.0.0-alpha.6.tgz",
"integrity": "sha512-xkeffkZoqQmRrcNewpOsUCKNOl+CkPqjt3Ld749uz1S7/O7GuPNPv2fZk3v/1U/FE8/B4Zz0llVL80MKON1tOQ==",
"dev": true,
"license": "MIT",
"dependencies": {
"@jest/schemas": "30.0.0-alpha.6",
"ansi-styles": "^5.0.0",
"react-is": "^18.0.0"
},
"engines": {
"node": "^16.10.0 || ^18.12.0 || >=20.0.0"
}
},
"node_modules/jest-cli": {
"version": "30.0.0-alpha.6",
"resolved": "https://registry.npmjs.org/jest-cli/-/jest-cli-30.0.0-alpha.6.tgz",
"integrity": "sha512-3VYzI2KgpMNAsf+LdRAQtAbhH3IDyFnT36U6URXot+2JWwoCGQQ6w4HIfqyOSlH4aejKgTPSfxki2shRPDFtlQ==",
"dev": true,
"license": "MIT",
"dependencies": {
"@jest/core": "30.0.0-alpha.6",
"@jest/test-result": "30.0.0-alpha.6",
"@jest/types": "30.0.0-alpha.6",
"chalk": "^4.0.0",
"exit": "^0.1.2",
"import-local": "^3.0.2",
"jest-config": "30.0.0-alpha.6",
"jest-util": "30.0.0-alpha.6",
"jest-validate": "30.0.0-alpha.6",
"yargs": "^17.3.1"
},
"bin": {
"jest": "bin/jest.js"
},
"engines": {
"node": "^16.10.0 || ^18.12.0 || >=20.0.0"
},
"peerDependencies": {
"node-notifier": "^8.0.1 || ^9.0.0 || ^10.0.0"
},
"peerDependenciesMeta": {
"node-notifier": {
"optional": true
}
}
},
"node_modules/jest-cli/node_modules/@jest/schemas": {
"version": "30.0.0-alpha.6",
"resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-30.0.0-alpha.6.tgz",
"integrity": "sha512-Ukr3kR/VsBq8+JHU92xArhSJeFQHVHs5T1laPO00GrrNzv3DvoHn3/EVVagGn9CHbLeAyJHXFRHYxq3+520kiA==",
"dev": true,
"license": "MIT",
"dependencies": {
"@sinclair/typebox": "^0.33.0"
},
"engines": {
"node": "^16.10.0 || ^18.12.0 || >=20.0.0"
}
},
"node_modules/jest-cli/node_modules/@jest/types": {
"version": "30.0.0-alpha.6",
"resolved": "https://registry.npmjs.org/@jest/types/-/types-30.0.0-alpha.6.tgz",
"integrity": "sha512-qUjAm8uvIR7oExn/Fp7/bvn58HSZng5itQDM9x0vaxXWxxGH/8MDmqX/h7OUBz9ka+KfYRaTxe4Y6wiM8+nphw==",
"dev": true,
"license": "MIT",
"dependencies": {
"@jest/pattern": "30.0.0-alpha.6",
"@jest/schemas": "30.0.0-alpha.6",
"@types/istanbul-lib-coverage": "^2.0.0",
"@types/istanbul-reports": "^3.0.0",
"@types/node": "*",
"@types/yargs": "^17.0.8",
"chalk": "^4.0.0"
},
"engines": {
"node": "^16.10.0 || ^18.12.0 || >=20.0.0"
}
},
"node_modules/jest-cli/node_modules/@sinclair/typebox": {
"version": "0.33.17",
"resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.33.17.tgz",
"integrity": "sha512-75232GRx3wp3P7NP+yc4nRK3XUAnaQShxTAzapgmQrgs0QvSq0/mOJGoZXRpH15cFCKyys+4laCPbBselqJ5Ag==",
"dev": true,
"license": "MIT"
},
"node_modules/jest-cli/node_modules/ci-info": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/ci-info/-/ci-info-4.0.0.tgz",
"integrity": "sha512-TdHqgGf9odd8SXNuxtUBVx8Nv+qZOejE6qyqiy5NtbYYQOeFa6zmHkxlPzmaLxWWHsU6nJmB7AETdVPi+2NBUg==",
"dev": true,
"funding": [
{
"type": "github",
"url": "https://github.com/sponsors/sibiraj-s"
}
],
"license": "MIT",
"engines": {
"node": ">=8"
}
},
"node_modules/jest-cli/node_modules/jest-util": {
"version": "30.0.0-alpha.6",
"resolved": "https://registry.npmjs.org/jest-util/-/jest-util-30.0.0-alpha.6.tgz",
"integrity": "sha512-JlimakOVDyoMC8TEG+knoufxUqLG+Btihf1G8o5sHxz54C6oL54Wetfepp+Nhuj/1hSL0sQtkovvjlEycf9i0w==",
"dev": true,
"license": "MIT",
"dependencies": {
"@jest/types": "30.0.0-alpha.6",
"@types/node": "*",
"chalk": "^4.0.0",
"ci-info": "^4.0.0",
"graceful-fs": "^4.2.9",
"picomatch": "^4.0.0"
},
"engines": {
"node": "^16.10.0 || ^18.12.0 || >=20.0.0"
}
},
"node_modules/jest-cli/node_modules/picomatch": {
"version": "4.0.2",
"resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.2.tgz",
"integrity": "sha512-M7BAV6Rlcy5u+m6oPhAPFgJTzAioX/6B0DxyvDlo9l8+T3nLKbrczg2WLUyzd45L8RqfUMyGPzekbMvX2Ldkwg==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">=12"
},
"funding": {
"url": "https://github.com/sponsors/jonschlinkert"
}
},
"node_modules/jest-config": {
"version": "30.0.0-alpha.6",
"resolved": "https://registry.npmjs.org/jest-config/-/jest-config-30.0.0-alpha.6.tgz",
"integrity": "sha512-Tq9rH1mg9+nlIhh3efGwMSogFVKZ9z7c6P33ZlK74iJlnqqIAKYERZL2nNmNC5+5p8uxlTPSFZfBz9O8NGKotw==",
"dev": true,
"license": "MIT",
"dependencies": {
"@babel/core": "^7.11.6",
"@jest/pattern": "30.0.0-alpha.6",
"@jest/test-sequencer": "30.0.0-alpha.6",
"@jest/types": "30.0.0-alpha.6",
"babel-jest": "30.0.0-alpha.6",
"chalk": "^4.0.0",
"ci-info": "^4.0.0",
"deepmerge": "^4.2.2",
"glob": "^10.3.10",
"graceful-fs": "^4.2.9",
"jest-circus": "30.0.0-alpha.6",
"jest-docblock": "30.0.0-alpha.6",
"jest-environment-node": "30.0.0-alpha.6",
"jest-get-type": "30.0.0-alpha.6",
"jest-regex-util": "30.0.0-alpha.6",
"jest-resolve": "30.0.0-alpha.6",
"jest-runner": "30.0.0-alpha.6",
"jest-util": "30.0.0-alpha.6",
"jest-validate": "30.0.0-alpha.6",
"micromatch": "^4.0.7",
"parse-json": "^5.2.0",
"pretty-format": "30.0.0-alpha.6",
"slash": "^3.0.0",
"strip-json-comments": "^3.1.1"
},
"engines": {
"node": "^16.10.0 || ^18.12.0 || >=20.0.0"
},
"peerDependencies": {
"@types/node": "*",
"esbuild-register": ">=3.4.0",
"ts-node": ">=9.0.0"
},
"peerDependenciesMeta": {
"@types/node": {
"optional": true
},
"esbuild-register": {
"optional": true
},
"ts-node": {
"optional": true
}
}
},
"node_modules/jest-config/node_modules/@jest/schemas": {
"version": "30.0.0-alpha.6",
"resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-30.0.0-alpha.6.tgz",
"integrity": "sha512-Ukr3kR/VsBq8+JHU92xArhSJeFQHVHs5T1laPO00GrrNzv3DvoHn3/EVVagGn9CHbLeAyJHXFRHYxq3+520kiA==",
"dev": true,
"license": "MIT",
"dependencies": {
"@sinclair/typebox": "^0.33.0"
},
"engines": {
"node": "^16.10.0 || ^18.12.0 || >=20.0.0"
}
},
"node_modules/jest-config/node_modules/@jest/types": {
"version": "30.0.0-alpha.6",
"resolved": "https://registry.npmjs.org/@jest/types/-/types-30.0.0-alpha.6.tgz",
"integrity": "sha512-qUjAm8uvIR7oExn/Fp7/bvn58HSZng5itQDM9x0vaxXWxxGH/8MDmqX/h7OUBz9ka+KfYRaTxe4Y6wiM8+nphw==",
"dev": true,
"license": "MIT",
"dependencies": {
"@jest/pattern": "30.0.0-alpha.6",
"@jest/schemas": "30.0.0-alpha.6",
"@types/istanbul-lib-coverage": "^2.0.0",
"@types/istanbul-reports": "^3.0.0",
"@types/node": "*",
"@types/yargs": "^17.0.8",
"chalk": "^4.0.0"
},
"engines": {
"node": "^16.10.0 || ^18.12.0 || >=20.0.0"
}
},
"node_modules/jest-config/node_modules/@sinclair/typebox": {
"version": "0.33.17",
"resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.33.17.tgz",
"integrity": "sha512-75232GRx3wp3P7NP+yc4nRK3XUAnaQShxTAzapgmQrgs0QvSq0/mOJGoZXRpH15cFCKyys+4laCPbBselqJ5Ag==",
"dev": true,
"license": "MIT"
},
"node_modules/jest-config/node_modules/ansi-styles": {
"version": "5.2.0",
"resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-5.2.0.tgz",
"integrity": "sha512-Cxwpt2SfTzTtXcfOlzGEee8O+c+MmUgGrNiBcXnuWxuFJHe6a5Hz7qwhwe5OgaSYI0IJvkLqWX1ASG+cJOkEiA==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">=10"
},
"funding": {
"url": "https://github.com/chalk/ansi-styles?sponsor=1"
}
},
"node_modules/jest-config/node_modules/brace-expansion": {
"version": "2.0.2",
"resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.2.tgz",
"integrity": "sha512-Jt0vHyM+jmUBqojB7E1NIYadt0vI0Qxjxd2TErW94wDz+E2LAm5vKMXXwg6ZZBTHPuUlDgQHKXvjGBdfcF1ZDQ==",
"dev": true,
"license": "MIT",
"dependencies": {
"balanced-match": "^1.0.0"
}
},
"node_modules/jest-config/node_modules/ci-info": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/ci-info/-/ci-info-4.0.0.tgz",
"integrity": "sha512-TdHqgGf9odd8SXNuxtUBVx8Nv+qZOejE6qyqiy5NtbYYQOeFa6zmHkxlPzmaLxWWHsU6nJmB7AETdVPi+2NBUg==",
"dev": true,
"funding": [
{
"type": "github",
"url": "https://github.com/sponsors/sibiraj-s"
}
],
"license": "MIT",
"engines": {
"node": ">=8"
}
},
"node_modules/jest-config/node_modules/glob": {
"version": "10.4.5",
"resolved": "https://registry.npmjs.org/glob/-/glob-10.4.5.tgz",
"integrity": "sha512-7Bv8RF0k6xjo7d4A/PxYLbUCfb6c+Vpd2/mB2yRDlew7Jb5hEXiCD9ibfO7wpk8i4sevK6DFny9h7EYbM3/sHg==",
"dev": true,
"license": "ISC",
"dependencies": {
"foreground-child": "^3.1.0",
"jackspeak": "^3.1.2",
"minimatch": "^9.0.4",
"minipass": "^7.1.2",
"package-json-from-dist": "^1.0.0",
"path-scurry": "^1.11.1"
},
"bin": {
"glob": "dist/esm/bin.mjs"
},
"funding": {
"url": "https://github.com/sponsors/isaacs"
}
},
"node_modules/jest-config/node_modules/jest-get-type": {
"version": "30.0.0-alpha.6",
"resolved": "https://registry.npmjs.org/jest-get-type/-/jest-get-type-30.0.0-alpha.6.tgz",
"integrity": "sha512-lJEoQdCY4ICN6+T0lJ9BODKuqPOEpCv2NnJsEO1nmsK0fbWZmN/pgOPHVqLfK8i3jZpUmgupJ1w8r36mc8iiBQ==",
"dev": true,
"license": "MIT",
"engines": {
"node": "^16.10.0 || ^18.12.0 || >=20.0.0"
}
},
"node_modules/jest-config/node_modules/jest-util": {
"version": "30.0.0-alpha.6",
"resolved": "https://registry.npmjs.org/jest-util/-/jest-util-30.0.0-alpha.6.tgz",
"integrity": "sha512-JlimakOVDyoMC8TEG+knoufxUqLG+Btihf1G8o5sHxz54C6oL54Wetfepp+Nhuj/1hSL0sQtkovvjlEycf9i0w==",
"dev": true,
"license": "MIT",
"dependencies": {
"@jest/types": "30.0.0-alpha.6",
"@types/node": "*",
"chalk": "^4.0.0",
"ci-info": "^4.0.0",
"graceful-fs": "^4.2.9",
"picomatch": "^4.0.0"
},
"engines": {
"node": "^16.10.0 || ^18.12.0 || >=20.0.0"
}
},
"node_modules/jest-config/node_modules/minimatch": {
"version": "9.0.5",
"resolved": "https://registry.npmjs.org/minimatch/-/minimatch-9.0.5.tgz",
"integrity": "sha512-G6T0ZX48xgozx7587koeX9Ys2NYy6Gmv//P89sEte9V9whIapMNF4idKxnW2QtCcLiTWlb/wfCabAtAFWhhBow==",
"dev": true,
"license": "ISC",
"dependencies": {
"brace-expansion": "^2.0.1"
},
"engines": {
"node": ">=16 || 14 >=14.17"
},
"funding": {
"url": "https://github.com/sponsors/isaacs"
}
},
"node_modules/jest-config/node_modules/picomatch": {
"version": "4.0.2",
"resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.2.tgz",
"integrity": "sha512-M7BAV6Rlcy5u+m6oPhAPFgJTzAioX/6B0DxyvDlo9l8+T3nLKbrczg2WLUyzd45L8RqfUMyGPzekbMvX2Ldkwg==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">=12"
},
"funding": {
"url": "https://github.com/sponsors/jonschlinkert"
}
},
"node_modules/jest-config/node_modules/pretty-format": {
"version": "30.0.0-alpha.6",
"resolved": "https://registry.npmjs.org/pretty-format/-/pretty-format-30.0.0-alpha.6.tgz",
"integrity": "sha512-xkeffkZoqQmRrcNewpOsUCKNOl+CkPqjt3Ld749uz1S7/O7GuPNPv2fZk3v/1U/FE8/B4Zz0llVL80MKON1tOQ==",
"dev": true,
"license": "MIT",
"dependencies": {
"@jest/schemas": "30.0.0-alpha.6",
"ansi-styles": "^5.0.0",
"react-is": "^18.0.0"
},
"engines": {
"node": "^16.10.0 || ^18.12.0 || >=20.0.0"
}
},
"node_modules/jest-diff": {
"version": "29.6.1",
"resolved": "https://registry.npmjs.org/jest-diff/-/jest-diff-29.6.1.tgz",
"integrity": "sha512-FsNCvinvl8oVxpNLttNQX7FAq7vR+gMDGj90tiP7siWw1UdakWUGqrylpsYrpvj908IYckm5Y0Q7azNAozU1Kg==",
"dev": true,
"dependencies": {
"chalk": "^4.0.0",
"diff-sequences": "^29.4.3",
"jest-get-type": "^29.4.3",
"pretty-format": "^29.6.1"
},
"engines": {
"node": "^14.15.0 || ^16.10.0 || >=18.0.0"
}
},
"node_modules/jest-docblock": {
"version": "30.0.0-alpha.6",
"resolved": "https://registry.npmjs.org/jest-docblock/-/jest-docblock-30.0.0-alpha.6.tgz",
"integrity": "sha512-KXRLgRo7/rF1wqxQupsFCZa6wOp1qrDg4GdSXKfIHODYQb0dpi4rYaYA8xV5l2g9KwYc9/zV7l1tPe9TOr27ew==",
"dev": true,
"license": "MIT",
"dependencies": {
"detect-newline": "^3.0.0"
},
"engines": {
"node": "^16.10.0 || ^18.12.0 || >=20.0.0"
}
},
"node_modules/jest-each": {
"version": "30.0.0-alpha.6",
"resolved": "https://registry.npmjs.org/jest-each/-/jest-each-30.0.0-alpha.6.tgz",
"integrity": "sha512-snLI2JNYkoBMlZRrNk67XiauUy+uEzRCszKdj+cqHyZ4/MU8fz7gCxbn3g0zmiGUxr0RX0534UxMjc82Sk++tg==",
"dev": true,
"license": "MIT",
"dependencies": {
"@jest/types": "30.0.0-alpha.6",
"chalk": "^4.0.0",
"jest-get-type": "30.0.0-alpha.6",
"jest-util": "30.0.0-alpha.6",
"pretty-format": "30.0.0-alpha.6"
},
"engines": {
"node": "^16.10.0 || ^18.12.0 || >=20.0.0"
}
},
"node_modules/jest-each/node_modules/@jest/schemas": {
"version": "30.0.0-alpha.6",
"resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-30.0.0-alpha.6.tgz",
"integrity": "sha512-Ukr3kR/VsBq8+JHU92xArhSJeFQHVHs5T1laPO00GrrNzv3DvoHn3/EVVagGn9CHbLeAyJHXFRHYxq3+520kiA==",
"dev": true,
"license": "MIT",
"dependencies": {
"@sinclair/typebox": "^0.33.0"
},
"engines": {
"node": "^16.10.0 || ^18.12.0 || >=20.0.0"
}
},
"node_modules/jest-each/node_modules/@jest/types": {
"version": "30.0.0-alpha.6",
"resolved": "https://registry.npmjs.org/@jest/types/-/types-30.0.0-alpha.6.tgz",
"integrity": "sha512-qUjAm8uvIR7oExn/Fp7/bvn58HSZng5itQDM9x0vaxXWxxGH/8MDmqX/h7OUBz9ka+KfYRaTxe4Y6wiM8+nphw==",
"dev": true,
"license": "MIT",
"dependencies": {
"@jest/pattern": "30.0.0-alpha.6",
"@jest/schemas": "30.0.0-alpha.6",
"@types/istanbul-lib-coverage": "^2.0.0",
"@types/istanbul-reports": "^3.0.0",
"@types/node": "*",
"@types/yargs": "^17.0.8",
"chalk": "^4.0.0"
},
"engines": {
"node": "^16.10.0 || ^18.12.0 || >=20.0.0"
}
},
"node_modules/jest-each/node_modules/@sinclair/typebox": {
"version": "0.33.17",
"resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.33.17.tgz",
"integrity": "sha512-75232GRx3wp3P7NP+yc4nRK3XUAnaQShxTAzapgmQrgs0QvSq0/mOJGoZXRpH15cFCKyys+4laCPbBselqJ5Ag==",
"dev": true,
"license": "MIT"
},
"node_modules/jest-each/node_modules/ansi-styles": {
"version": "5.2.0",
"resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-5.2.0.tgz",
"integrity": "sha512-Cxwpt2SfTzTtXcfOlzGEee8O+c+MmUgGrNiBcXnuWxuFJHe6a5Hz7qwhwe5OgaSYI0IJvkLqWX1ASG+cJOkEiA==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">=10"
},
"funding": {
"url": "https://github.com/chalk/ansi-styles?sponsor=1"
}
},
"node_modules/jest-each/node_modules/ci-info": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/ci-info/-/ci-info-4.0.0.tgz",
"integrity": "sha512-TdHqgGf9odd8SXNuxtUBVx8Nv+qZOejE6qyqiy5NtbYYQOeFa6zmHkxlPzmaLxWWHsU6nJmB7AETdVPi+2NBUg==",
"dev": true,
"funding": [
{
"type": "github",
"url": "https://github.com/sponsors/sibiraj-s"
}
],
"license": "MIT",
"engines": {
"node": ">=8"
}
},
"node_modules/jest-each/node_modules/jest-get-type": {
"version": "30.0.0-alpha.6",
"resolved": "https://registry.npmjs.org/jest-get-type/-/jest-get-type-30.0.0-alpha.6.tgz",
"integrity": "sha512-lJEoQdCY4ICN6+T0lJ9BODKuqPOEpCv2NnJsEO1nmsK0fbWZmN/pgOPHVqLfK8i3jZpUmgupJ1w8r36mc8iiBQ==",
"dev": true,
"license": "MIT",
"engines": {
"node": "^16.10.0 || ^18.12.0 || >=20.0.0"
}
},
"node_modules/jest-each/node_modules/jest-util": {
"version": "30.0.0-alpha.6",
"resolved": "https://registry.npmjs.org/jest-util/-/jest-util-30.0.0-alpha.6.tgz",
"integrity": "sha512-JlimakOVDyoMC8TEG+knoufxUqLG+Btihf1G8o5sHxz54C6oL54Wetfepp+Nhuj/1hSL0sQtkovvjlEycf9i0w==",
"dev": true,
"license": "MIT",
"dependencies": {
"@jest/types": "30.0.0-alpha.6",
"@types/node": "*",
"chalk": "^4.0.0",
"ci-info": "^4.0.0",
"graceful-fs": "^4.2.9",
"picomatch": "^4.0.0"
},
"engines": {
"node": "^16.10.0 || ^18.12.0 || >=20.0.0"
}
},
"node_modules/jest-each/node_modules/picomatch": {
"version": "4.0.2",
"resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.2.tgz",
"integrity": "sha512-M7BAV6Rlcy5u+m6oPhAPFgJTzAioX/6B0DxyvDlo9l8+T3nLKbrczg2WLUyzd45L8RqfUMyGPzekbMvX2Ldkwg==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">=12"
},
"funding": {
"url": "https://github.com/sponsors/jonschlinkert"
}
},
"node_modules/jest-each/node_modules/pretty-format": {
"version": "30.0.0-alpha.6",
"resolved": "https://registry.npmjs.org/pretty-format/-/pretty-format-30.0.0-alpha.6.tgz",
"integrity": "sha512-xkeffkZoqQmRrcNewpOsUCKNOl+CkPqjt3Ld749uz1S7/O7GuPNPv2fZk3v/1U/FE8/B4Zz0llVL80MKON1tOQ==",
"dev": true,
"license": "MIT",
"dependencies": {
"@jest/schemas": "30.0.0-alpha.6",
"ansi-styles": "^5.0.0",
"react-is": "^18.0.0"
},
"engines": {
"node": "^16.10.0 || ^18.12.0 || >=20.0.0"
}
},
"node_modules/jest-environment-node": {
"version": "30.0.0-alpha.6",
"resolved": "https://registry.npmjs.org/jest-environment-node/-/jest-environment-node-30.0.0-alpha.6.tgz",
"integrity": "sha512-UN9W3dFzO150Bqj1x+1pq7dMUqw/QhpqhdtmC3B1P6GD9eKEMFGuRw3EButx5SGzrZOqRNlF+tUNC8CoWGW2Og==",
"dev": true,
"license": "MIT",
"dependencies": {
"@jest/environment": "30.0.0-alpha.6",
"@jest/fake-timers": "30.0.0-alpha.6",
"@jest/types": "30.0.0-alpha.6",
"@types/node": "*",
"jest-mock": "30.0.0-alpha.6",
"jest-util": "30.0.0-alpha.6"
},
"engines": {
"node": "^16.10.0 || ^18.12.0 || >=20.0.0"
}
},
"node_modules/jest-environment-node/node_modules/@jest/schemas": {
"version": "30.0.0-alpha.6",
"resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-30.0.0-alpha.6.tgz",
"integrity": "sha512-Ukr3kR/VsBq8+JHU92xArhSJeFQHVHs5T1laPO00GrrNzv3DvoHn3/EVVagGn9CHbLeAyJHXFRHYxq3+520kiA==",
"dev": true,
"license": "MIT",
"dependencies": {
"@sinclair/typebox": "^0.33.0"
},
"engines": {
"node": "^16.10.0 || ^18.12.0 || >=20.0.0"
}
},
"node_modules/jest-environment-node/node_modules/@jest/types": {
"version": "30.0.0-alpha.6",
"resolved": "https://registry.npmjs.org/@jest/types/-/types-30.0.0-alpha.6.tgz",
"integrity": "sha512-qUjAm8uvIR7oExn/Fp7/bvn58HSZng5itQDM9x0vaxXWxxGH/8MDmqX/h7OUBz9ka+KfYRaTxe4Y6wiM8+nphw==",
"dev": true,
"license": "MIT",
"dependencies": {
"@jest/pattern": "30.0.0-alpha.6",
"@jest/schemas": "30.0.0-alpha.6",
"@types/istanbul-lib-coverage": "^2.0.0",
"@types/istanbul-reports": "^3.0.0",
"@types/node": "*",
"@types/yargs": "^17.0.8",
"chalk": "^4.0.0"
},
"engines": {
"node": "^16.10.0 || ^18.12.0 || >=20.0.0"
}
},
"node_modules/jest-environment-node/node_modules/@sinclair/typebox": {
"version": "0.33.17",
"resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.33.17.tgz",
"integrity": "sha512-75232GRx3wp3P7NP+yc4nRK3XUAnaQShxTAzapgmQrgs0QvSq0/mOJGoZXRpH15cFCKyys+4laCPbBselqJ5Ag==",
"dev": true,
"license": "MIT"
},
"node_modules/jest-environment-node/node_modules/ci-info": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/ci-info/-/ci-info-4.0.0.tgz",
"integrity": "sha512-TdHqgGf9odd8SXNuxtUBVx8Nv+qZOejE6qyqiy5NtbYYQOeFa6zmHkxlPzmaLxWWHsU6nJmB7AETdVPi+2NBUg==",
"dev": true,
"funding": [
{
"type": "github",
"url": "https://github.com/sponsors/sibiraj-s"
}
],
"license": "MIT",
"engines": {
"node": ">=8"
}
},
"node_modules/jest-environment-node/node_modules/jest-util": {
"version": "30.0.0-alpha.6",
"resolved": "https://registry.npmjs.org/jest-util/-/jest-util-30.0.0-alpha.6.tgz",
"integrity": "sha512-JlimakOVDyoMC8TEG+knoufxUqLG+Btihf1G8o5sHxz54C6oL54Wetfepp+Nhuj/1hSL0sQtkovvjlEycf9i0w==",
"dev": true,
"license": "MIT",
"dependencies": {
"@jest/types": "30.0.0-alpha.6",
"@types/node": "*",
"chalk": "^4.0.0",
"ci-info": "^4.0.0",
"graceful-fs": "^4.2.9",
"picomatch": "^4.0.0"
},
"engines": {
"node": "^16.10.0 || ^18.12.0 || >=20.0.0"
}
},
"node_modules/jest-environment-node/node_modules/picomatch": {
"version": "4.0.2",
"resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.2.tgz",
"integrity": "sha512-M7BAV6Rlcy5u+m6oPhAPFgJTzAioX/6B0DxyvDlo9l8+T3nLKbrczg2WLUyzd45L8RqfUMyGPzekbMvX2Ldkwg==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">=12"
},
"funding": {
"url": "https://github.com/sponsors/jonschlinkert"
}
},
"node_modules/jest-get-type": {
"version": "29.4.3",
"resolved": "https://registry.npmjs.org/jest-get-type/-/jest-get-type-29.4.3.tgz",
"integrity": "sha512-J5Xez4nRRMjk8emnTpWrlkyb9pfRQQanDrvWHhsR1+VUfbwxi30eVcZFlcdGInRibU4G5LwHXpI7IRHU0CY+gg==",
"dev": true,
"engines": {
"node": "^14.15.0 || ^16.10.0 || >=18.0.0"
}
},
"node_modules/jest-haste-map": {
"version": "30.0.0-alpha.6",
"resolved": "https://registry.npmjs.org/jest-haste-map/-/jest-haste-map-30.0.0-alpha.6.tgz",
"integrity": "sha512-NR/Kw8HyOkuWIdT8ynsp9KnsTDvWnlz8WSOmtQxySTIzOWbZaeJ2FJi9LoDL6+vhKpdlLfUvhgZVtnFJSLCzew==",
"dev": true,
"license": "MIT",
"dependencies": {
"@jest/types": "30.0.0-alpha.6",
"@types/node": "*",
"anymatch": "^3.0.3",
"fb-watchman": "^2.0.0",
"graceful-fs": "^4.2.9",
"jest-regex-util": "30.0.0-alpha.6",
"jest-util": "30.0.0-alpha.6",
"jest-worker": "30.0.0-alpha.6",
"micromatch": "^4.0.7",
"walker": "^1.0.8"
},
"engines": {
"node": "^16.10.0 || ^18.12.0 || >=20.0.0"
},
"optionalDependencies": {
"fsevents": "^2.3.2"
}
},
"node_modules/jest-haste-map/node_modules/@jest/schemas": {
"version": "30.0.0-alpha.6",
"resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-30.0.0-alpha.6.tgz",
"integrity": "sha512-Ukr3kR/VsBq8+JHU92xArhSJeFQHVHs5T1laPO00GrrNzv3DvoHn3/EVVagGn9CHbLeAyJHXFRHYxq3+520kiA==",
"dev": true,
"license": "MIT",
"dependencies": {
"@sinclair/typebox": "^0.33.0"
},
"engines": {
"node": "^16.10.0 || ^18.12.0 || >=20.0.0"
}
},
"node_modules/jest-haste-map/node_modules/@jest/types": {
"version": "30.0.0-alpha.6",
"resolved": "https://registry.npmjs.org/@jest/types/-/types-30.0.0-alpha.6.tgz",
"integrity": "sha512-qUjAm8uvIR7oExn/Fp7/bvn58HSZng5itQDM9x0vaxXWxxGH/8MDmqX/h7OUBz9ka+KfYRaTxe4Y6wiM8+nphw==",
"dev": true,
"license": "MIT",
"dependencies": {
"@jest/pattern": "30.0.0-alpha.6",
"@jest/schemas": "30.0.0-alpha.6",
"@types/istanbul-lib-coverage": "^2.0.0",
"@types/istanbul-reports": "^3.0.0",
"@types/node": "*",
"@types/yargs": "^17.0.8",
"chalk": "^4.0.0"
},
"engines": {
"node": "^16.10.0 || ^18.12.0 || >=20.0.0"
}
},
"node_modules/jest-haste-map/node_modules/@sinclair/typebox": {
"version": "0.33.17",
"resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.33.17.tgz",
"integrity": "sha512-75232GRx3wp3P7NP+yc4nRK3XUAnaQShxTAzapgmQrgs0QvSq0/mOJGoZXRpH15cFCKyys+4laCPbBselqJ5Ag==",
"dev": true,
"license": "MIT"
},
"node_modules/jest-haste-map/node_modules/ci-info": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/ci-info/-/ci-info-4.0.0.tgz",
"integrity": "sha512-TdHqgGf9odd8SXNuxtUBVx8Nv+qZOejE6qyqiy5NtbYYQOeFa6zmHkxlPzmaLxWWHsU6nJmB7AETdVPi+2NBUg==",
"dev": true,
"funding": [
{
"type": "github",
"url": "https://github.com/sponsors/sibiraj-s"
}
],
"license": "MIT",
"engines": {
"node": ">=8"
}
},
"node_modules/jest-haste-map/node_modules/jest-util": {
"version": "30.0.0-alpha.6",
"resolved": "https://registry.npmjs.org/jest-util/-/jest-util-30.0.0-alpha.6.tgz",
"integrity": "sha512-JlimakOVDyoMC8TEG+knoufxUqLG+Btihf1G8o5sHxz54C6oL54Wetfepp+Nhuj/1hSL0sQtkovvjlEycf9i0w==",
"dev": true,
"license": "MIT",
"dependencies": {
"@jest/types": "30.0.0-alpha.6",
"@types/node": "*",
"chalk": "^4.0.0",
"ci-info": "^4.0.0",
"graceful-fs": "^4.2.9",
"picomatch": "^4.0.0"
},
"engines": {
"node": "^16.10.0 || ^18.12.0 || >=20.0.0"
}
},
"node_modules/jest-haste-map/node_modules/jest-worker": {
"version": "30.0.0-alpha.6",
"resolved": "https://registry.npmjs.org/jest-worker/-/jest-worker-30.0.0-alpha.6.tgz",
"integrity": "sha512-qlzX7zFT/QdUV/LWsJwZBlaIBaJ+E2VH3d1gArGVP+9hUHGpJkEzCSBK7yuZrkt+M/U0Jre5+maPRmkinEF4DA==",
"dev": true,
"license": "MIT",
"dependencies": {
"@types/node": "*",
"@ungap/structured-clone": "^1.2.0",
"jest-util": "30.0.0-alpha.6",
"merge-stream": "^2.0.0",
"supports-color": "^8.0.0"
},
"engines": {
"node": "^16.10.0 || ^18.12.0 || >=20.0.0"
}
},
"node_modules/jest-haste-map/node_modules/picomatch": {
"version": "4.0.2",
"resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.2.tgz",
"integrity": "sha512-M7BAV6Rlcy5u+m6oPhAPFgJTzAioX/6B0DxyvDlo9l8+T3nLKbrczg2WLUyzd45L8RqfUMyGPzekbMvX2Ldkwg==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">=12"
},
"funding": {
"url": "https://github.com/sponsors/jonschlinkert"
}
},
"node_modules/jest-leak-detector": {
"version": "30.0.0-alpha.6",
"resolved": "https://registry.npmjs.org/jest-leak-detector/-/jest-leak-detector-30.0.0-alpha.6.tgz",
"integrity": "sha512-a6fh/6h6dCDyj+aplGqkajVqzmi+qYHs5X8orMZv+u56++gUezJZJf8GCiQqw2vtxcsWVPUuQXa3kF33tAYzNQ==",
"dev": true,
"license": "MIT",
"dependencies": {
"jest-get-type": "30.0.0-alpha.6",
"pretty-format": "30.0.0-alpha.6"
},
"engines": {
"node": "^16.10.0 || ^18.12.0 || >=20.0.0"
}
},
"node_modules/jest-leak-detector/node_modules/@jest/schemas": {
"version": "30.0.0-alpha.6",
"resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-30.0.0-alpha.6.tgz",
"integrity": "sha512-Ukr3kR/VsBq8+JHU92xArhSJeFQHVHs5T1laPO00GrrNzv3DvoHn3/EVVagGn9CHbLeAyJHXFRHYxq3+520kiA==",
"dev": true,
"license": "MIT",
"dependencies": {
"@sinclair/typebox": "^0.33.0"
},
"engines": {
"node": "^16.10.0 || ^18.12.0 || >=20.0.0"
}
},
"node_modules/jest-leak-detector/node_modules/@sinclair/typebox": {
"version": "0.33.17",
"resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.33.17.tgz",
"integrity": "sha512-75232GRx3wp3P7NP+yc4nRK3XUAnaQShxTAzapgmQrgs0QvSq0/mOJGoZXRpH15cFCKyys+4laCPbBselqJ5Ag==",
"dev": true,
"license": "MIT"
},
"node_modules/jest-leak-detector/node_modules/ansi-styles": {
"version": "5.2.0",
"resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-5.2.0.tgz",
"integrity": "sha512-Cxwpt2SfTzTtXcfOlzGEee8O+c+MmUgGrNiBcXnuWxuFJHe6a5Hz7qwhwe5OgaSYI0IJvkLqWX1ASG+cJOkEiA==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">=10"
},
"funding": {
"url": "https://github.com/chalk/ansi-styles?sponsor=1"
}
},
"node_modules/jest-leak-detector/node_modules/jest-get-type": {
"version": "30.0.0-alpha.6",
"resolved": "https://registry.npmjs.org/jest-get-type/-/jest-get-type-30.0.0-alpha.6.tgz",
"integrity": "sha512-lJEoQdCY4ICN6+T0lJ9BODKuqPOEpCv2NnJsEO1nmsK0fbWZmN/pgOPHVqLfK8i3jZpUmgupJ1w8r36mc8iiBQ==",
"dev": true,
"license": "MIT",
"engines": {
"node": "^16.10.0 || ^18.12.0 || >=20.0.0"
}
},
"node_modules/jest-leak-detector/node_modules/pretty-format": {
"version": "30.0.0-alpha.6",
"resolved": "https://registry.npmjs.org/pretty-format/-/pretty-format-30.0.0-alpha.6.tgz",
"integrity": "sha512-xkeffkZoqQmRrcNewpOsUCKNOl+CkPqjt3Ld749uz1S7/O7GuPNPv2fZk3v/1U/FE8/B4Zz0llVL80MKON1tOQ==",
"dev": true,
"license": "MIT",
"dependencies": {
"@jest/schemas": "30.0.0-alpha.6",
"ansi-styles": "^5.0.0",
"react-is": "^18.0.0"
},
"engines": {
"node": "^16.10.0 || ^18.12.0 || >=20.0.0"
}
},
"node_modules/jest-matcher-utils": {
"version": "29.6.1",
"resolved": "https://registry.npmjs.org/jest-matcher-utils/-/jest-matcher-utils-29.6.1.tgz",
"integrity": "sha512-SLaztw9d2mfQQKHmJXKM0HCbl2PPVld/t9Xa6P9sgiExijviSp7TnZZpw2Fpt+OI3nwUO/slJbOfzfUMKKC5QA==",
"dev": true,
"dependencies": {
"chalk": "^4.0.0",
"jest-diff": "^29.6.1",
"jest-get-type": "^29.4.3",
"pretty-format": "^29.6.1"
},
"engines": {
"node": "^14.15.0 || ^16.10.0 || >=18.0.0"
}
},
"node_modules/jest-message-util": {
"version": "29.6.1",
"resolved": "https://registry.npmjs.org/jest-message-util/-/jest-message-util-29.6.1.tgz",
"integrity": "sha512-KoAW2zAmNSd3Gk88uJ56qXUWbFk787QKmjjJVOjtGFmmGSZgDBrlIL4AfQw1xyMYPNVD7dNInfIbur9B2rd/wQ==",
"dev": true,
"dependencies": {
"@babel/code-frame": "^7.12.13",
"@jest/types": "^29.6.1",
"@types/stack-utils": "^2.0.0",
"chalk": "^4.0.0",
"graceful-fs": "^4.2.9",
"micromatch": "^4.0.4",
"pretty-format": "^29.6.1",
"slash": "^3.0.0",
"stack-utils": "^2.0.3"
},
"engines": {
"node": "^14.15.0 || ^16.10.0 || >=18.0.0"
}
},
"node_modules/jest-mock": {
"version": "30.0.0-alpha.6",
"resolved": "https://registry.npmjs.org/jest-mock/-/jest-mock-30.0.0-alpha.6.tgz",
"integrity": "sha512-ezW02IXiKyFYAgDuxfAlONWULitSaB66t411fq2BJxQtgyMGtv59CsnhgbKb0gQp+9vig5MO5ytDCUPalTbarg==",
"dev": true,
"license": "MIT",
"dependencies": {
"@jest/types": "30.0.0-alpha.6",
"@types/node": "*",
"jest-util": "30.0.0-alpha.6"
},
"engines": {
"node": "^16.10.0 || ^18.12.0 || >=20.0.0"
}
},
"node_modules/jest-mock/node_modules/@jest/schemas": {
"version": "30.0.0-alpha.6",
"resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-30.0.0-alpha.6.tgz",
"integrity": "sha512-Ukr3kR/VsBq8+JHU92xArhSJeFQHVHs5T1laPO00GrrNzv3DvoHn3/EVVagGn9CHbLeAyJHXFRHYxq3+520kiA==",
"dev": true,
"license": "MIT",
"dependencies": {
"@sinclair/typebox": "^0.33.0"
},
"engines": {
"node": "^16.10.0 || ^18.12.0 || >=20.0.0"
}
},
"node_modules/jest-mock/node_modules/@jest/types": {
"version": "30.0.0-alpha.6",
"resolved": "https://registry.npmjs.org/@jest/types/-/types-30.0.0-alpha.6.tgz",
"integrity": "sha512-qUjAm8uvIR7oExn/Fp7/bvn58HSZng5itQDM9x0vaxXWxxGH/8MDmqX/h7OUBz9ka+KfYRaTxe4Y6wiM8+nphw==",
"dev": true,
"license": "MIT",
"dependencies": {
"@jest/pattern": "30.0.0-alpha.6",
"@jest/schemas": "30.0.0-alpha.6",
"@types/istanbul-lib-coverage": "^2.0.0",
"@types/istanbul-reports": "^3.0.0",
"@types/node": "*",
"@types/yargs": "^17.0.8",
"chalk": "^4.0.0"
},
"engines": {
"node": "^16.10.0 || ^18.12.0 || >=20.0.0"
}
},
"node_modules/jest-mock/node_modules/@sinclair/typebox": {
"version": "0.33.17",
"resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.33.17.tgz",
"integrity": "sha512-75232GRx3wp3P7NP+yc4nRK3XUAnaQShxTAzapgmQrgs0QvSq0/mOJGoZXRpH15cFCKyys+4laCPbBselqJ5Ag==",
"dev": true,
"license": "MIT"
},
"node_modules/jest-mock/node_modules/ci-info": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/ci-info/-/ci-info-4.0.0.tgz",
"integrity": "sha512-TdHqgGf9odd8SXNuxtUBVx8Nv+qZOejE6qyqiy5NtbYYQOeFa6zmHkxlPzmaLxWWHsU6nJmB7AETdVPi+2NBUg==",
"dev": true,
"funding": [
{
"type": "github",
"url": "https://github.com/sponsors/sibiraj-s"
}
],
"license": "MIT",
"engines": {
"node": ">=8"
}
},
"node_modules/jest-mock/node_modules/jest-util": {
"version": "30.0.0-alpha.6",
"resolved": "https://registry.npmjs.org/jest-util/-/jest-util-30.0.0-alpha.6.tgz",
"integrity": "sha512-JlimakOVDyoMC8TEG+knoufxUqLG+Btihf1G8o5sHxz54C6oL54Wetfepp+Nhuj/1hSL0sQtkovvjlEycf9i0w==",
"dev": true,
"license": "MIT",
"dependencies": {
"@jest/types": "30.0.0-alpha.6",
"@types/node": "*",
"chalk": "^4.0.0",
"ci-info": "^4.0.0",
"graceful-fs": "^4.2.9",
"picomatch": "^4.0.0"
},
"engines": {
"node": "^16.10.0 || ^18.12.0 || >=20.0.0"
}
},
"node_modules/jest-mock/node_modules/picomatch": {
"version": "4.0.2",
"resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.2.tgz",
"integrity": "sha512-M7BAV6Rlcy5u+m6oPhAPFgJTzAioX/6B0DxyvDlo9l8+T3nLKbrczg2WLUyzd45L8RqfUMyGPzekbMvX2Ldkwg==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">=12"
},
"funding": {
"url": "https://github.com/sponsors/jonschlinkert"
}
},
"node_modules/jest-pnp-resolver": {
"version": "1.2.3",
"resolved": "https://registry.npmjs.org/jest-pnp-resolver/-/jest-pnp-resolver-1.2.3.tgz",
"integrity": "sha512-+3NpwQEnRoIBtx4fyhblQDPgJI0H1IEIkX7ShLUjPGA7TtUTvI1oiKi3SR4oBR0hQhQR80l4WAe5RrXBwWMA8w==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">=6"
},
"peerDependencies": {
"jest-resolve": "*"
},
"peerDependenciesMeta": {
"jest-resolve": {
"optional": true
}
}
},
"node_modules/jest-regex-util": {
"version": "30.0.0-alpha.6",
"resolved": "https://registry.npmjs.org/jest-regex-util/-/jest-regex-util-30.0.0-alpha.6.tgz",
"integrity": "sha512-XcsAVaqc69QyMz1/FChyhWSoAMaKcDPhFOuWJz/H51LppsyZRAJPXkPnMopsS+qfut8cggExr9QLcsYaX6hqqA==",
"dev": true,
"license": "MIT",
"engines": {
"node": "^16.10.0 || ^18.12.0 || >=20.0.0"
}
},
"node_modules/jest-resolve": {
"version": "30.0.0-alpha.6",
"resolved": "https://registry.npmjs.org/jest-resolve/-/jest-resolve-30.0.0-alpha.6.tgz",
"integrity": "sha512-0EyeId+RFng52qHvuxOzKjZd2uDF/2Hdzpzt54+biGgY/VVAvf8mYE9UV7g6154Ozpq6KLztSqqMCfPgVs4CbA==",
"dev": true,
"license": "MIT",
"dependencies": {
"chalk": "^4.0.0",
"graceful-fs": "^4.2.9",
"jest-haste-map": "30.0.0-alpha.6",
"jest-pnp-resolver": "^1.2.2",
"jest-util": "30.0.0-alpha.6",
"jest-validate": "30.0.0-alpha.6",
"resolve": "^1.20.0",
"resolve.exports": "^2.0.0",
"slash": "^3.0.0"
},
"engines": {
"node": "^16.10.0 || ^18.12.0 || >=20.0.0"
}
},
"node_modules/jest-resolve-dependencies": {
"version": "30.0.0-alpha.6",
"resolved": "https://registry.npmjs.org/jest-resolve-dependencies/-/jest-resolve-dependencies-30.0.0-alpha.6.tgz",
"integrity": "sha512-G+st0nBR4FNIvVCHq8YNJBiG6t7u0+cxM099lbtOoJNJU+ZTdIxSyzPnnmp/C+YHd1QOlDNlplvL+xe1KHhPUA==",
"dev": true,
"license": "MIT",
"dependencies": {
"jest-regex-util": "30.0.0-alpha.6",
"jest-snapshot": "30.0.0-alpha.6"
},
"engines": {
"node": "^16.10.0 || ^18.12.0 || >=20.0.0"
}
},
"node_modules/jest-resolve/node_modules/@jest/schemas": {
"version": "30.0.0-alpha.6",
"resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-30.0.0-alpha.6.tgz",
"integrity": "sha512-Ukr3kR/VsBq8+JHU92xArhSJeFQHVHs5T1laPO00GrrNzv3DvoHn3/EVVagGn9CHbLeAyJHXFRHYxq3+520kiA==",
"dev": true,
"license": "MIT",
"dependencies": {
"@sinclair/typebox": "^0.33.0"
},
"engines": {
"node": "^16.10.0 || ^18.12.0 || >=20.0.0"
}
},
"node_modules/jest-resolve/node_modules/@jest/types": {
"version": "30.0.0-alpha.6",
"resolved": "https://registry.npmjs.org/@jest/types/-/types-30.0.0-alpha.6.tgz",
"integrity": "sha512-qUjAm8uvIR7oExn/Fp7/bvn58HSZng5itQDM9x0vaxXWxxGH/8MDmqX/h7OUBz9ka+KfYRaTxe4Y6wiM8+nphw==",
"dev": true,
"license": "MIT",
"dependencies": {
"@jest/pattern": "30.0.0-alpha.6",
"@jest/schemas": "30.0.0-alpha.6",
"@types/istanbul-lib-coverage": "^2.0.0",
"@types/istanbul-reports": "^3.0.0",
"@types/node": "*",
"@types/yargs": "^17.0.8",
"chalk": "^4.0.0"
},
"engines": {
"node": "^16.10.0 || ^18.12.0 || >=20.0.0"
}
},
"node_modules/jest-resolve/node_modules/@sinclair/typebox": {
"version": "0.33.17",
"resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.33.17.tgz",
"integrity": "sha512-75232GRx3wp3P7NP+yc4nRK3XUAnaQShxTAzapgmQrgs0QvSq0/mOJGoZXRpH15cFCKyys+4laCPbBselqJ5Ag==",
"dev": true,
"license": "MIT"
},
"node_modules/jest-resolve/node_modules/ci-info": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/ci-info/-/ci-info-4.0.0.tgz",
"integrity": "sha512-TdHqgGf9odd8SXNuxtUBVx8Nv+qZOejE6qyqiy5NtbYYQOeFa6zmHkxlPzmaLxWWHsU6nJmB7AETdVPi+2NBUg==",
"dev": true,
"funding": [
{
"type": "github",
"url": "https://github.com/sponsors/sibiraj-s"
}
],
"license": "MIT",
"engines": {
"node": ">=8"
}
},
"node_modules/jest-resolve/node_modules/jest-util": {
"version": "30.0.0-alpha.6",
"resolved": "https://registry.npmjs.org/jest-util/-/jest-util-30.0.0-alpha.6.tgz",
"integrity": "sha512-JlimakOVDyoMC8TEG+knoufxUqLG+Btihf1G8o5sHxz54C6oL54Wetfepp+Nhuj/1hSL0sQtkovvjlEycf9i0w==",
"dev": true,
"license": "MIT",
"dependencies": {
"@jest/types": "30.0.0-alpha.6",
"@types/node": "*",
"chalk": "^4.0.0",
"ci-info": "^4.0.0",
"graceful-fs": "^4.2.9",
"picomatch": "^4.0.0"
},
"engines": {
"node": "^16.10.0 || ^18.12.0 || >=20.0.0"
}
},
"node_modules/jest-resolve/node_modules/picomatch": {
"version": "4.0.2",
"resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.2.tgz",
"integrity": "sha512-M7BAV6Rlcy5u+m6oPhAPFgJTzAioX/6B0DxyvDlo9l8+T3nLKbrczg2WLUyzd45L8RqfUMyGPzekbMvX2Ldkwg==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">=12"
},
"funding": {
"url": "https://github.com/sponsors/jonschlinkert"
}
},
"node_modules/jest-runner": {
"version": "30.0.0-alpha.6",
"resolved": "https://registry.npmjs.org/jest-runner/-/jest-runner-30.0.0-alpha.6.tgz",
"integrity": "sha512-SoADy4YnspMpXLNnRCXNIoinm1N5SMci+iF6Y29Duv3wnWhcL14XjEOcyUKBB+AIL52YwouLeUHkCyCspbBk1Q==",
"dev": true,
"license": "MIT",
"dependencies": {
"@jest/console": "30.0.0-alpha.6",
"@jest/environment": "30.0.0-alpha.6",
"@jest/test-result": "30.0.0-alpha.6",
"@jest/transform": "30.0.0-alpha.6",
"@jest/types": "30.0.0-alpha.6",
"@types/node": "*",
"chalk": "^4.0.0",
"emittery": "^0.13.1",
"graceful-fs": "^4.2.9",
"jest-docblock": "30.0.0-alpha.6",
"jest-environment-node": "30.0.0-alpha.6",
"jest-haste-map": "30.0.0-alpha.6",
"jest-leak-detector": "30.0.0-alpha.6",
"jest-message-util": "30.0.0-alpha.6",
"jest-resolve": "30.0.0-alpha.6",
"jest-runtime": "30.0.0-alpha.6",
"jest-util": "30.0.0-alpha.6",
"jest-watcher": "30.0.0-alpha.6",
"jest-worker": "30.0.0-alpha.6",
"p-limit": "^3.1.0",
"source-map-support": "0.5.13"
},
"engines": {
"node": "^16.10.0 || ^18.12.0 || >=20.0.0"
}
},
"node_modules/jest-runner/node_modules/@jest/schemas": {
"version": "30.0.0-alpha.6",
"resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-30.0.0-alpha.6.tgz",
"integrity": "sha512-Ukr3kR/VsBq8+JHU92xArhSJeFQHVHs5T1laPO00GrrNzv3DvoHn3/EVVagGn9CHbLeAyJHXFRHYxq3+520kiA==",
"dev": true,
"license": "MIT",
"dependencies": {
"@sinclair/typebox": "^0.33.0"
},
"engines": {
"node": "^16.10.0 || ^18.12.0 || >=20.0.0"
}
},
"node_modules/jest-runner/node_modules/@jest/types": {
"version": "30.0.0-alpha.6",
"resolved": "https://registry.npmjs.org/@jest/types/-/types-30.0.0-alpha.6.tgz",
"integrity": "sha512-qUjAm8uvIR7oExn/Fp7/bvn58HSZng5itQDM9x0vaxXWxxGH/8MDmqX/h7OUBz9ka+KfYRaTxe4Y6wiM8+nphw==",
"dev": true,
"license": "MIT",
"dependencies": {
"@jest/pattern": "30.0.0-alpha.6",
"@jest/schemas": "30.0.0-alpha.6",
"@types/istanbul-lib-coverage": "^2.0.0",
"@types/istanbul-reports": "^3.0.0",
"@types/node": "*",
"@types/yargs": "^17.0.8",
"chalk": "^4.0.0"
},
"engines": {
"node": "^16.10.0 || ^18.12.0 || >=20.0.0"
}
},
"node_modules/jest-runner/node_modules/@sinclair/typebox": {
"version": "0.33.17",
"resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.33.17.tgz",
"integrity": "sha512-75232GRx3wp3P7NP+yc4nRK3XUAnaQShxTAzapgmQrgs0QvSq0/mOJGoZXRpH15cFCKyys+4laCPbBselqJ5Ag==",
"dev": true,
"license": "MIT"
},
"node_modules/jest-runner/node_modules/ansi-styles": {
"version": "5.2.0",
"resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-5.2.0.tgz",
"integrity": "sha512-Cxwpt2SfTzTtXcfOlzGEee8O+c+MmUgGrNiBcXnuWxuFJHe6a5Hz7qwhwe5OgaSYI0IJvkLqWX1ASG+cJOkEiA==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">=10"
},
"funding": {
"url": "https://github.com/chalk/ansi-styles?sponsor=1"
}
},
"node_modules/jest-runner/node_modules/ci-info": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/ci-info/-/ci-info-4.0.0.tgz",
"integrity": "sha512-TdHqgGf9odd8SXNuxtUBVx8Nv+qZOejE6qyqiy5NtbYYQOeFa6zmHkxlPzmaLxWWHsU6nJmB7AETdVPi+2NBUg==",
"dev": true,
"funding": [
{
"type": "github",
"url": "https://github.com/sponsors/sibiraj-s"
}
],
"license": "MIT",
"engines": {
"node": ">=8"
}
},
"node_modules/jest-runner/node_modules/jest-message-util": {
"version": "30.0.0-alpha.6",
"resolved": "https://registry.npmjs.org/jest-message-util/-/jest-message-util-30.0.0-alpha.6.tgz",
"integrity": "sha512-XAGJqkrBo7m3bFxWqiNqL0PyAWGf1XHR6bTve90MjBKJuIzhJsounGTzBNUw8JoU7Uzcj5Z6ZmEhaE3CDnwjfw==",
"dev": true,
"license": "MIT",
"dependencies": {
"@babel/code-frame": "^7.12.13",
"@jest/types": "30.0.0-alpha.6",
"@types/stack-utils": "^2.0.0",
"chalk": "^4.0.0",
"graceful-fs": "^4.2.9",
"micromatch": "^4.0.7",
"pretty-format": "30.0.0-alpha.6",
"slash": "^3.0.0",
"stack-utils": "^2.0.3"
},
"engines": {
"node": "^16.10.0 || ^18.12.0 || >=20.0.0"
}
},
"node_modules/jest-runner/node_modules/jest-util": {
"version": "30.0.0-alpha.6",
"resolved": "https://registry.npmjs.org/jest-util/-/jest-util-30.0.0-alpha.6.tgz",
"integrity": "sha512-JlimakOVDyoMC8TEG+knoufxUqLG+Btihf1G8o5sHxz54C6oL54Wetfepp+Nhuj/1hSL0sQtkovvjlEycf9i0w==",
"dev": true,
"license": "MIT",
"dependencies": {
"@jest/types": "30.0.0-alpha.6",
"@types/node": "*",
"chalk": "^4.0.0",
"ci-info": "^4.0.0",
"graceful-fs": "^4.2.9",
"picomatch": "^4.0.0"
},
"engines": {
"node": "^16.10.0 || ^18.12.0 || >=20.0.0"
}
},
"node_modules/jest-runner/node_modules/jest-worker": {
"version": "30.0.0-alpha.6",
"resolved": "https://registry.npmjs.org/jest-worker/-/jest-worker-30.0.0-alpha.6.tgz",
"integrity": "sha512-qlzX7zFT/QdUV/LWsJwZBlaIBaJ+E2VH3d1gArGVP+9hUHGpJkEzCSBK7yuZrkt+M/U0Jre5+maPRmkinEF4DA==",
"dev": true,
"license": "MIT",
"dependencies": {
"@types/node": "*",
"@ungap/structured-clone": "^1.2.0",
"jest-util": "30.0.0-alpha.6",
"merge-stream": "^2.0.0",
"supports-color": "^8.0.0"
},
"engines": {
"node": "^16.10.0 || ^18.12.0 || >=20.0.0"
}
},
"node_modules/jest-runner/node_modules/p-limit": {
"version": "3.1.0",
"resolved": "https://registry.npmjs.org/p-limit/-/p-limit-3.1.0.tgz",
"integrity": "sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==",
"dev": true,
"license": "MIT",
"dependencies": {
"yocto-queue": "^0.1.0"
},
"engines": {
"node": ">=10"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/jest-runner/node_modules/picomatch": {
"version": "4.0.2",
"resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.2.tgz",
"integrity": "sha512-M7BAV6Rlcy5u+m6oPhAPFgJTzAioX/6B0DxyvDlo9l8+T3nLKbrczg2WLUyzd45L8RqfUMyGPzekbMvX2Ldkwg==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">=12"
},
"funding": {
"url": "https://github.com/sponsors/jonschlinkert"
}
},
"node_modules/jest-runner/node_modules/pretty-format": {
"version": "30.0.0-alpha.6",
"resolved": "https://registry.npmjs.org/pretty-format/-/pretty-format-30.0.0-alpha.6.tgz",
"integrity": "sha512-xkeffkZoqQmRrcNewpOsUCKNOl+CkPqjt3Ld749uz1S7/O7GuPNPv2fZk3v/1U/FE8/B4Zz0llVL80MKON1tOQ==",
"dev": true,
"license": "MIT",
"dependencies": {
"@jest/schemas": "30.0.0-alpha.6",
"ansi-styles": "^5.0.0",
"react-is": "^18.0.0"
},
"engines": {
"node": "^16.10.0 || ^18.12.0 || >=20.0.0"
}
},
"node_modules/jest-runner/node_modules/source-map-support": {
"version": "0.5.13",
"resolved": "https://registry.npmjs.org/source-map-support/-/source-map-support-0.5.13.tgz",
"integrity": "sha512-SHSKFHadjVA5oR4PPqhtAVdcBWwRYVd6g6cAXnIbRiIwc2EhPrTuKUBdSLvlEKyIP3GCf89fltvcZiP9MMFA1w==",
"dev": true,
"license": "MIT",
"dependencies": {
"buffer-from": "^1.0.0",
"source-map": "^0.6.0"
}
},
"node_modules/jest-runtime": {
"version": "30.0.0-alpha.6",
"resolved": "https://registry.npmjs.org/jest-runtime/-/jest-runtime-30.0.0-alpha.6.tgz",
"integrity": "sha512-p7w7DSFFzwHyR4HsNXca/p32VpL9MLT1c71+VplFJIEgeRHvyqxrARentlul6uJniwtlqvZrVVf5baCQ5a5GUw==",
"dev": true,
"license": "MIT",
"dependencies": {
"@jest/environment": "30.0.0-alpha.6",
"@jest/fake-timers": "30.0.0-alpha.6",
"@jest/globals": "30.0.0-alpha.6",
"@jest/source-map": "30.0.0-alpha.6",
"@jest/test-result": "30.0.0-alpha.6",
"@jest/transform": "30.0.0-alpha.6",
"@jest/types": "30.0.0-alpha.6",
"@types/node": "*",
"chalk": "^4.0.0",
"cjs-module-lexer": "^1.0.0",
"collect-v8-coverage": "^1.0.0",
"glob": "^10.3.10",
"graceful-fs": "^4.2.9",
"jest-haste-map": "30.0.0-alpha.6",
"jest-message-util": "30.0.0-alpha.6",
"jest-mock": "30.0.0-alpha.6",
"jest-regex-util": "30.0.0-alpha.6",
"jest-resolve": "30.0.0-alpha.6",
"jest-snapshot": "30.0.0-alpha.6",
"jest-util": "30.0.0-alpha.6",
"slash": "^3.0.0",
"strip-bom": "^4.0.0"
},
"engines": {
"node": "^16.10.0 || ^18.12.0 || >=20.0.0"
}
},
"node_modules/jest-runtime/node_modules/@jest/schemas": {
"version": "30.0.0-alpha.6",
"resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-30.0.0-alpha.6.tgz",
"integrity": "sha512-Ukr3kR/VsBq8+JHU92xArhSJeFQHVHs5T1laPO00GrrNzv3DvoHn3/EVVagGn9CHbLeAyJHXFRHYxq3+520kiA==",
"dev": true,
"license": "MIT",
"dependencies": {
"@sinclair/typebox": "^0.33.0"
},
"engines": {
"node": "^16.10.0 || ^18.12.0 || >=20.0.0"
}
},
"node_modules/jest-runtime/node_modules/@jest/types": {
"version": "30.0.0-alpha.6",
"resolved": "https://registry.npmjs.org/@jest/types/-/types-30.0.0-alpha.6.tgz",
"integrity": "sha512-qUjAm8uvIR7oExn/Fp7/bvn58HSZng5itQDM9x0vaxXWxxGH/8MDmqX/h7OUBz9ka+KfYRaTxe4Y6wiM8+nphw==",
"dev": true,
"license": "MIT",
"dependencies": {
"@jest/pattern": "30.0.0-alpha.6",
"@jest/schemas": "30.0.0-alpha.6",
"@types/istanbul-lib-coverage": "^2.0.0",
"@types/istanbul-reports": "^3.0.0",
"@types/node": "*",
"@types/yargs": "^17.0.8",
"chalk": "^4.0.0"
},
"engines": {
"node": "^16.10.0 || ^18.12.0 || >=20.0.0"
}
},
"node_modules/jest-runtime/node_modules/@sinclair/typebox": {
"version": "0.33.17",
"resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.33.17.tgz",
"integrity": "sha512-75232GRx3wp3P7NP+yc4nRK3XUAnaQShxTAzapgmQrgs0QvSq0/mOJGoZXRpH15cFCKyys+4laCPbBselqJ5Ag==",
"dev": true,
"license": "MIT"
},
"node_modules/jest-runtime/node_modules/ansi-styles": {
"version": "5.2.0",
"resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-5.2.0.tgz",
"integrity": "sha512-Cxwpt2SfTzTtXcfOlzGEee8O+c+MmUgGrNiBcXnuWxuFJHe6a5Hz7qwhwe5OgaSYI0IJvkLqWX1ASG+cJOkEiA==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">=10"
},
"funding": {
"url": "https://github.com/chalk/ansi-styles?sponsor=1"
}
},
"node_modules/jest-runtime/node_modules/brace-expansion": {
"version": "2.0.2",
"resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.2.tgz",
"integrity": "sha512-Jt0vHyM+jmUBqojB7E1NIYadt0vI0Qxjxd2TErW94wDz+E2LAm5vKMXXwg6ZZBTHPuUlDgQHKXvjGBdfcF1ZDQ==",
"dev": true,
"license": "MIT",
"dependencies": {
"balanced-match": "^1.0.0"
}
},
"node_modules/jest-runtime/node_modules/ci-info": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/ci-info/-/ci-info-4.0.0.tgz",
"integrity": "sha512-TdHqgGf9odd8SXNuxtUBVx8Nv+qZOejE6qyqiy5NtbYYQOeFa6zmHkxlPzmaLxWWHsU6nJmB7AETdVPi+2NBUg==",
"dev": true,
"funding": [
{
"type": "github",
"url": "https://github.com/sponsors/sibiraj-s"
}
],
"license": "MIT",
"engines": {
"node": ">=8"
}
},
"node_modules/jest-runtime/node_modules/glob": {
"version": "10.4.5",
"resolved": "https://registry.npmjs.org/glob/-/glob-10.4.5.tgz",
"integrity": "sha512-7Bv8RF0k6xjo7d4A/PxYLbUCfb6c+Vpd2/mB2yRDlew7Jb5hEXiCD9ibfO7wpk8i4sevK6DFny9h7EYbM3/sHg==",
"dev": true,
"license": "ISC",
"dependencies": {
"foreground-child": "^3.1.0",
"jackspeak": "^3.1.2",
"minimatch": "^9.0.4",
"minipass": "^7.1.2",
"package-json-from-dist": "^1.0.0",
"path-scurry": "^1.11.1"
},
"bin": {
"glob": "dist/esm/bin.mjs"
},
"funding": {
"url": "https://github.com/sponsors/isaacs"
}
},
"node_modules/jest-runtime/node_modules/jest-message-util": {
"version": "30.0.0-alpha.6",
"resolved": "https://registry.npmjs.org/jest-message-util/-/jest-message-util-30.0.0-alpha.6.tgz",
"integrity": "sha512-XAGJqkrBo7m3bFxWqiNqL0PyAWGf1XHR6bTve90MjBKJuIzhJsounGTzBNUw8JoU7Uzcj5Z6ZmEhaE3CDnwjfw==",
"dev": true,
"license": "MIT",
"dependencies": {
"@babel/code-frame": "^7.12.13",
"@jest/types": "30.0.0-alpha.6",
"@types/stack-utils": "^2.0.0",
"chalk": "^4.0.0",
"graceful-fs": "^4.2.9",
"micromatch": "^4.0.7",
"pretty-format": "30.0.0-alpha.6",
"slash": "^3.0.0",
"stack-utils": "^2.0.3"
},
"engines": {
"node": "^16.10.0 || ^18.12.0 || >=20.0.0"
}
},
"node_modules/jest-runtime/node_modules/jest-util": {
"version": "30.0.0-alpha.6",
"resolved": "https://registry.npmjs.org/jest-util/-/jest-util-30.0.0-alpha.6.tgz",
"integrity": "sha512-JlimakOVDyoMC8TEG+knoufxUqLG+Btihf1G8o5sHxz54C6oL54Wetfepp+Nhuj/1hSL0sQtkovvjlEycf9i0w==",
"dev": true,
"license": "MIT",
"dependencies": {
"@jest/types": "30.0.0-alpha.6",
"@types/node": "*",
"chalk": "^4.0.0",
"ci-info": "^4.0.0",
"graceful-fs": "^4.2.9",
"picomatch": "^4.0.0"
},
"engines": {
"node": "^16.10.0 || ^18.12.0 || >=20.0.0"
}
},
"node_modules/jest-runtime/node_modules/minimatch": {
"version": "9.0.5",
"resolved": "https://registry.npmjs.org/minimatch/-/minimatch-9.0.5.tgz",
"integrity": "sha512-G6T0ZX48xgozx7587koeX9Ys2NYy6Gmv//P89sEte9V9whIapMNF4idKxnW2QtCcLiTWlb/wfCabAtAFWhhBow==",
"dev": true,
"license": "ISC",
"dependencies": {
"brace-expansion": "^2.0.1"
},
"engines": {
"node": ">=16 || 14 >=14.17"
},
"funding": {
"url": "https://github.com/sponsors/isaacs"
}
},
"node_modules/jest-runtime/node_modules/picomatch": {
"version": "4.0.2",
"resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.2.tgz",
"integrity": "sha512-M7BAV6Rlcy5u+m6oPhAPFgJTzAioX/6B0DxyvDlo9l8+T3nLKbrczg2WLUyzd45L8RqfUMyGPzekbMvX2Ldkwg==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">=12"
},
"funding": {
"url": "https://github.com/sponsors/jonschlinkert"
}
},
"node_modules/jest-runtime/node_modules/pretty-format": {
"version": "30.0.0-alpha.6",
"resolved": "https://registry.npmjs.org/pretty-format/-/pretty-format-30.0.0-alpha.6.tgz",
"integrity": "sha512-xkeffkZoqQmRrcNewpOsUCKNOl+CkPqjt3Ld749uz1S7/O7GuPNPv2fZk3v/1U/FE8/B4Zz0llVL80MKON1tOQ==",
"dev": true,
"license": "MIT",
"dependencies": {
"@jest/schemas": "30.0.0-alpha.6",
"ansi-styles": "^5.0.0",
"react-is": "^18.0.0"
},
"engines": {
"node": "^16.10.0 || ^18.12.0 || >=20.0.0"
}
},
"node_modules/jest-snapshot": {
"version": "30.0.0-alpha.6",
"resolved": "https://registry.npmjs.org/jest-snapshot/-/jest-snapshot-30.0.0-alpha.6.tgz",
"integrity": "sha512-YCBUxSNJ9YGch3tyQdxQkOUitbmXahHL6UhSQeSMERFfX1UMrHyEDHggglocCUg4G3jdU8YzshxOJ/oaR6Ph8w==",
"dev": true,
"license": "MIT",
"dependencies": {
"@babel/core": "^7.11.6",
"@babel/generator": "^7.7.2",
"@babel/plugin-syntax-jsx": "^7.7.2",
"@babel/plugin-syntax-typescript": "^7.7.2",
"@babel/types": "^7.3.3",
"@jest/expect-utils": "30.0.0-alpha.6",
"@jest/snapshot-utils": "30.0.0-alpha.6",
"@jest/transform": "30.0.0-alpha.6",
"@jest/types": "30.0.0-alpha.6",
"babel-preset-current-node-syntax": "^1.0.0",
"chalk": "^4.0.0",
"expect": "30.0.0-alpha.6",
"graceful-fs": "^4.2.9",
"jest-diff": "30.0.0-alpha.6",
"jest-get-type": "30.0.0-alpha.6",
"jest-matcher-utils": "30.0.0-alpha.6",
"jest-message-util": "30.0.0-alpha.6",
"jest-util": "30.0.0-alpha.6",
"pretty-format": "30.0.0-alpha.6",
"semver": "^7.5.3",
"synckit": "^0.9.0"
},
"engines": {
"node": "^16.10.0 || ^18.12.0 || >=20.0.0"
}
},
"node_modules/jest-snapshot/node_modules/@jest/expect-utils": {
"version": "30.0.0-alpha.6",
"resolved": "https://registry.npmjs.org/@jest/expect-utils/-/expect-utils-30.0.0-alpha.6.tgz",
"integrity": "sha512-QMySMhaCUl0ZQd7Tx5X3fVWY5jtQxZNrTll0OyavdQ70ZTLgk0kU9K+XovcMWO26MK9R5EX7bBgD/j7w9hUM4w==",
"dev": true,
"license": "MIT",
"dependencies": {
"jest-get-type": "30.0.0-alpha.6"
},
"engines": {
"node": "^16.10.0 || ^18.12.0 || >=20.0.0"
}
},
"node_modules/jest-snapshot/node_modules/@jest/schemas": {
"version": "30.0.0-alpha.6",
"resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-30.0.0-alpha.6.tgz",
"integrity": "sha512-Ukr3kR/VsBq8+JHU92xArhSJeFQHVHs5T1laPO00GrrNzv3DvoHn3/EVVagGn9CHbLeAyJHXFRHYxq3+520kiA==",
"dev": true,
"license": "MIT",
"dependencies": {
"@sinclair/typebox": "^0.33.0"
},
"engines": {
"node": "^16.10.0 || ^18.12.0 || >=20.0.0"
}
},
"node_modules/jest-snapshot/node_modules/@jest/types": {
"version": "30.0.0-alpha.6",
"resolved": "https://registry.npmjs.org/@jest/types/-/types-30.0.0-alpha.6.tgz",
"integrity": "sha512-qUjAm8uvIR7oExn/Fp7/bvn58HSZng5itQDM9x0vaxXWxxGH/8MDmqX/h7OUBz9ka+KfYRaTxe4Y6wiM8+nphw==",
"dev": true,
"license": "MIT",
"dependencies": {
"@jest/pattern": "30.0.0-alpha.6",
"@jest/schemas": "30.0.0-alpha.6",
"@types/istanbul-lib-coverage": "^2.0.0",
"@types/istanbul-reports": "^3.0.0",
"@types/node": "*",
"@types/yargs": "^17.0.8",
"chalk": "^4.0.0"
},
"engines": {
"node": "^16.10.0 || ^18.12.0 || >=20.0.0"
}
},
"node_modules/jest-snapshot/node_modules/@sinclair/typebox": {
"version": "0.33.17",
"resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.33.17.tgz",
"integrity": "sha512-75232GRx3wp3P7NP+yc4nRK3XUAnaQShxTAzapgmQrgs0QvSq0/mOJGoZXRpH15cFCKyys+4laCPbBselqJ5Ag==",
"dev": true,
"license": "MIT"
},
"node_modules/jest-snapshot/node_modules/ansi-styles": {
"version": "5.2.0",
"resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-5.2.0.tgz",
"integrity": "sha512-Cxwpt2SfTzTtXcfOlzGEee8O+c+MmUgGrNiBcXnuWxuFJHe6a5Hz7qwhwe5OgaSYI0IJvkLqWX1ASG+cJOkEiA==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">=10"
},
"funding": {
"url": "https://github.com/chalk/ansi-styles?sponsor=1"
}
},
"node_modules/jest-snapshot/node_modules/ci-info": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/ci-info/-/ci-info-4.0.0.tgz",
"integrity": "sha512-TdHqgGf9odd8SXNuxtUBVx8Nv+qZOejE6qyqiy5NtbYYQOeFa6zmHkxlPzmaLxWWHsU6nJmB7AETdVPi+2NBUg==",
"dev": true,
"funding": [
{
"type": "github",
"url": "https://github.com/sponsors/sibiraj-s"
}
],
"license": "MIT",
"engines": {
"node": ">=8"
}
},
"node_modules/jest-snapshot/node_modules/diff-sequences": {
"version": "30.0.0-alpha.6",
"resolved": "https://registry.npmjs.org/diff-sequences/-/diff-sequences-30.0.0-alpha.6.tgz",
"integrity": "sha512-DVGt3/yzbneMUTuupsMqyfSXMnU2fE0lVsC9uFsJmRpluvSi7ZhrS0GX5tnMna6Ta788FGfOUx+irI/+cAZ4EA==",
"dev": true,
"license": "MIT",
"engines": {
"node": "^16.10.0 || ^18.12.0 || >=20.0.0"
}
},
"node_modules/jest-snapshot/node_modules/expect": {
"version": "30.0.0-alpha.6",
"resolved": "https://registry.npmjs.org/expect/-/expect-30.0.0-alpha.6.tgz",
"integrity": "sha512-WVi2V4iHKw/vHEyye00Q9CSZz7KHDbJkJyteUI8kTih9jiyMl3bIk7wLYFcY9D1Blnadlyb5w5NBuNjQBow99g==",
"dev": true,
"license": "MIT",
"dependencies": {
"@jest/expect-utils": "30.0.0-alpha.6",
"jest-get-type": "30.0.0-alpha.6",
"jest-matcher-utils": "30.0.0-alpha.6",
"jest-message-util": "30.0.0-alpha.6",
"jest-mock": "30.0.0-alpha.6",
"jest-util": "30.0.0-alpha.6"
},
"engines": {
"node": "^16.10.0 || ^18.12.0 || >=20.0.0"
}
},
"node_modules/jest-snapshot/node_modules/jest-diff": {
"version": "30.0.0-alpha.6",
"resolved": "https://registry.npmjs.org/jest-diff/-/jest-diff-30.0.0-alpha.6.tgz",
"integrity": "sha512-43j1DoYwVKrkbB67a2gC0ijjIY9biF0JSPXv7H6zlOkzNlqYg8hSDzrurLNo6zGKatW4JSBLE79LmXPJPj1m6A==",
"dev": true,
"license": "MIT",
"dependencies": {
"chalk": "^4.0.0",
"diff-sequences": "30.0.0-alpha.6",
"jest-get-type": "30.0.0-alpha.6",
"pretty-format": "30.0.0-alpha.6"
},
"engines": {
"node": "^16.10.0 || ^18.12.0 || >=20.0.0"
}
},
"node_modules/jest-snapshot/node_modules/jest-get-type": {
"version": "30.0.0-alpha.6",
"resolved": "https://registry.npmjs.org/jest-get-type/-/jest-get-type-30.0.0-alpha.6.tgz",
"integrity": "sha512-lJEoQdCY4ICN6+T0lJ9BODKuqPOEpCv2NnJsEO1nmsK0fbWZmN/pgOPHVqLfK8i3jZpUmgupJ1w8r36mc8iiBQ==",
"dev": true,
"license": "MIT",
"engines": {
"node": "^16.10.0 || ^18.12.0 || >=20.0.0"
}
},
"node_modules/jest-snapshot/node_modules/jest-matcher-utils": {
"version": "30.0.0-alpha.6",
"resolved": "https://registry.npmjs.org/jest-matcher-utils/-/jest-matcher-utils-30.0.0-alpha.6.tgz",
"integrity": "sha512-jaq7+HznsK54G0qzu96ZwfMEKHmlPiDqg6qG2p/hVQzr6Y/qVMRh8abI9Y1lX6SSXkr+S9mPAkmOsuJNLTLYmQ==",
"dev": true,
"license": "MIT",
"dependencies": {
"chalk": "^4.0.0",
"jest-diff": "30.0.0-alpha.6",
"jest-get-type": "30.0.0-alpha.6",
"pretty-format": "30.0.0-alpha.6"
},
"engines": {
"node": "^16.10.0 || ^18.12.0 || >=20.0.0"
}
},
"node_modules/jest-snapshot/node_modules/jest-message-util": {
"version": "30.0.0-alpha.6",
"resolved": "https://registry.npmjs.org/jest-message-util/-/jest-message-util-30.0.0-alpha.6.tgz",
"integrity": "sha512-XAGJqkrBo7m3bFxWqiNqL0PyAWGf1XHR6bTve90MjBKJuIzhJsounGTzBNUw8JoU7Uzcj5Z6ZmEhaE3CDnwjfw==",
"dev": true,
"license": "MIT",
"dependencies": {
"@babel/code-frame": "^7.12.13",
"@jest/types": "30.0.0-alpha.6",
"@types/stack-utils": "^2.0.0",
"chalk": "^4.0.0",
"graceful-fs": "^4.2.9",
"micromatch": "^4.0.7",
"pretty-format": "30.0.0-alpha.6",
"slash": "^3.0.0",
"stack-utils": "^2.0.3"
},
"engines": {
"node": "^16.10.0 || ^18.12.0 || >=20.0.0"
}
},
"node_modules/jest-snapshot/node_modules/jest-util": {
"version": "30.0.0-alpha.6",
"resolved": "https://registry.npmjs.org/jest-util/-/jest-util-30.0.0-alpha.6.tgz",
"integrity": "sha512-JlimakOVDyoMC8TEG+knoufxUqLG+Btihf1G8o5sHxz54C6oL54Wetfepp+Nhuj/1hSL0sQtkovvjlEycf9i0w==",
"dev": true,
"license": "MIT",
"dependencies": {
"@jest/types": "30.0.0-alpha.6",
"@types/node": "*",
"chalk": "^4.0.0",
"ci-info": "^4.0.0",
"graceful-fs": "^4.2.9",
"picomatch": "^4.0.0"
},
"engines": {
"node": "^16.10.0 || ^18.12.0 || >=20.0.0"
}
},
"node_modules/jest-snapshot/node_modules/picomatch": {
"version": "4.0.2",
"resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.2.tgz",
"integrity": "sha512-M7BAV6Rlcy5u+m6oPhAPFgJTzAioX/6B0DxyvDlo9l8+T3nLKbrczg2WLUyzd45L8RqfUMyGPzekbMvX2Ldkwg==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">=12"
},
"funding": {
"url": "https://github.com/sponsors/jonschlinkert"
}
},
"node_modules/jest-snapshot/node_modules/pretty-format": {
"version": "30.0.0-alpha.6",
"resolved": "https://registry.npmjs.org/pretty-format/-/pretty-format-30.0.0-alpha.6.tgz",
"integrity": "sha512-xkeffkZoqQmRrcNewpOsUCKNOl+CkPqjt3Ld749uz1S7/O7GuPNPv2fZk3v/1U/FE8/B4Zz0llVL80MKON1tOQ==",
"dev": true,
"license": "MIT",
"dependencies": {
"@jest/schemas": "30.0.0-alpha.6",
"ansi-styles": "^5.0.0",
"react-is": "^18.0.0"
},
"engines": {
"node": "^16.10.0 || ^18.12.0 || >=20.0.0"
}
},
"node_modules/jest-util": {
"version": "29.6.1",
"resolved": "https://registry.npmjs.org/jest-util/-/jest-util-29.6.1.tgz",
"integrity": "sha512-NRFCcjc+/uO3ijUVyNOQJluf8PtGCe/W6cix36+M3cTFgiYqFOOW5MgN4JOOcvbUhcKTYVd1CvHz/LWi8d16Mg==",
"dev": true,
"dependencies": {
"@jest/types": "^29.6.1",
"@types/node": "*",
"chalk": "^4.0.0",
"ci-info": "^3.2.0",
"graceful-fs": "^4.2.9",
"picomatch": "^2.2.3"
},
"engines": {
"node": "^14.15.0 || ^16.10.0 || >=18.0.0"
}
},
"node_modules/jest-validate": {
"version": "30.0.0-alpha.6",
"resolved": "https://registry.npmjs.org/jest-validate/-/jest-validate-30.0.0-alpha.6.tgz",
"integrity": "sha512-sINLwCenOUeJVzS5p+o1NhwKsY0de5Es0J7bsaSuZJQGRY67W20idceflr+aZ2akrKgvvqU8Tsg6lkFQyq+a6Q==",
"dev": true,
"license": "MIT",
"dependencies": {
"@jest/types": "30.0.0-alpha.6",
"camelcase": "^6.2.0",
"chalk": "^4.0.0",
"jest-get-type": "30.0.0-alpha.6",
"leven": "^3.1.0",
"pretty-format": "30.0.0-alpha.6"
},
"engines": {
"node": "^16.10.0 || ^18.12.0 || >=20.0.0"
}
},
"node_modules/jest-validate/node_modules/@jest/schemas": {
"version": "30.0.0-alpha.6",
"resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-30.0.0-alpha.6.tgz",
"integrity": "sha512-Ukr3kR/VsBq8+JHU92xArhSJeFQHVHs5T1laPO00GrrNzv3DvoHn3/EVVagGn9CHbLeAyJHXFRHYxq3+520kiA==",
"dev": true,
"license": "MIT",
"dependencies": {
"@sinclair/typebox": "^0.33.0"
},
"engines": {
"node": "^16.10.0 || ^18.12.0 || >=20.0.0"
}
},
"node_modules/jest-validate/node_modules/@jest/types": {
"version": "30.0.0-alpha.6",
"resolved": "https://registry.npmjs.org/@jest/types/-/types-30.0.0-alpha.6.tgz",
"integrity": "sha512-qUjAm8uvIR7oExn/Fp7/bvn58HSZng5itQDM9x0vaxXWxxGH/8MDmqX/h7OUBz9ka+KfYRaTxe4Y6wiM8+nphw==",
"dev": true,
"license": "MIT",
"dependencies": {
"@jest/pattern": "30.0.0-alpha.6",
"@jest/schemas": "30.0.0-alpha.6",
"@types/istanbul-lib-coverage": "^2.0.0",
"@types/istanbul-reports": "^3.0.0",
"@types/node": "*",
"@types/yargs": "^17.0.8",
"chalk": "^4.0.0"
},
"engines": {
"node": "^16.10.0 || ^18.12.0 || >=20.0.0"
}
},
"node_modules/jest-validate/node_modules/@sinclair/typebox": {
"version": "0.33.17",
"resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.33.17.tgz",
"integrity": "sha512-75232GRx3wp3P7NP+yc4nRK3XUAnaQShxTAzapgmQrgs0QvSq0/mOJGoZXRpH15cFCKyys+4laCPbBselqJ5Ag==",
"dev": true,
"license": "MIT"
},
"node_modules/jest-validate/node_modules/ansi-styles": {
"version": "5.2.0",
"resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-5.2.0.tgz",
"integrity": "sha512-Cxwpt2SfTzTtXcfOlzGEee8O+c+MmUgGrNiBcXnuWxuFJHe6a5Hz7qwhwe5OgaSYI0IJvkLqWX1ASG+cJOkEiA==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">=10"
},
"funding": {
"url": "https://github.com/chalk/ansi-styles?sponsor=1"
}
},
"node_modules/jest-validate/node_modules/camelcase": {
"version": "6.3.0",
"resolved": "https://registry.npmjs.org/camelcase/-/camelcase-6.3.0.tgz",
"integrity": "sha512-Gmy6FhYlCY7uOElZUSbxo2UCDH8owEk996gkbrpsgGtrJLM3J7jGxl9Ic7Qwwj4ivOE5AWZWRMecDdF7hqGjFA==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">=10"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/jest-validate/node_modules/jest-get-type": {
"version": "30.0.0-alpha.6",
"resolved": "https://registry.npmjs.org/jest-get-type/-/jest-get-type-30.0.0-alpha.6.tgz",
"integrity": "sha512-lJEoQdCY4ICN6+T0lJ9BODKuqPOEpCv2NnJsEO1nmsK0fbWZmN/pgOPHVqLfK8i3jZpUmgupJ1w8r36mc8iiBQ==",
"dev": true,
"license": "MIT",
"engines": {
"node": "^16.10.0 || ^18.12.0 || >=20.0.0"
}
},
"node_modules/jest-validate/node_modules/pretty-format": {
"version": "30.0.0-alpha.6",
"resolved": "https://registry.npmjs.org/pretty-format/-/pretty-format-30.0.0-alpha.6.tgz",
"integrity": "sha512-xkeffkZoqQmRrcNewpOsUCKNOl+CkPqjt3Ld749uz1S7/O7GuPNPv2fZk3v/1U/FE8/B4Zz0llVL80MKON1tOQ==",
"dev": true,
"license": "MIT",
"dependencies": {
"@jest/schemas": "30.0.0-alpha.6",
"ansi-styles": "^5.0.0",
"react-is": "^18.0.0"
},
"engines": {
"node": "^16.10.0 || ^18.12.0 || >=20.0.0"
}
},
"node_modules/jest-watcher": {
"version": "30.0.0-alpha.6",
"resolved": "https://registry.npmjs.org/jest-watcher/-/jest-watcher-30.0.0-alpha.6.tgz",
"integrity": "sha512-+zL1y3GSJG8EOxVSc2p0dndis0rNDcwKTs4b1bpNTI0XneeTiZlCpRBNYI+sqBl/eZtJBrQdiBRSYz7kJqg7NQ==",
"dev": true,
"license": "MIT",
"dependencies": {
"@jest/test-result": "30.0.0-alpha.6",
"@jest/types": "30.0.0-alpha.6",
"@types/node": "*",
"ansi-escapes": "^4.2.1",
"chalk": "^4.0.0",
"emittery": "^0.13.1",
"jest-util": "30.0.0-alpha.6",
"string-length": "^4.0.1"
},
"engines": {
"node": "^16.10.0 || ^18.12.0 || >=20.0.0"
}
},
"node_modules/jest-watcher/node_modules/@jest/schemas": {
"version": "30.0.0-alpha.6",
"resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-30.0.0-alpha.6.tgz",
"integrity": "sha512-Ukr3kR/VsBq8+JHU92xArhSJeFQHVHs5T1laPO00GrrNzv3DvoHn3/EVVagGn9CHbLeAyJHXFRHYxq3+520kiA==",
"dev": true,
"license": "MIT",
"dependencies": {
"@sinclair/typebox": "^0.33.0"
},
"engines": {
"node": "^16.10.0 || ^18.12.0 || >=20.0.0"
}
},
"node_modules/jest-watcher/node_modules/@jest/types": {
"version": "30.0.0-alpha.6",
"resolved": "https://registry.npmjs.org/@jest/types/-/types-30.0.0-alpha.6.tgz",
"integrity": "sha512-qUjAm8uvIR7oExn/Fp7/bvn58HSZng5itQDM9x0vaxXWxxGH/8MDmqX/h7OUBz9ka+KfYRaTxe4Y6wiM8+nphw==",
"dev": true,
"license": "MIT",
"dependencies": {
"@jest/pattern": "30.0.0-alpha.6",
"@jest/schemas": "30.0.0-alpha.6",
"@types/istanbul-lib-coverage": "^2.0.0",
"@types/istanbul-reports": "^3.0.0",
"@types/node": "*",
"@types/yargs": "^17.0.8",
"chalk": "^4.0.0"
},
"engines": {
"node": "^16.10.0 || ^18.12.0 || >=20.0.0"
}
},
"node_modules/jest-watcher/node_modules/@sinclair/typebox": {
"version": "0.33.17",
"resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.33.17.tgz",
"integrity": "sha512-75232GRx3wp3P7NP+yc4nRK3XUAnaQShxTAzapgmQrgs0QvSq0/mOJGoZXRpH15cFCKyys+4laCPbBselqJ5Ag==",
"dev": true,
"license": "MIT"
},
"node_modules/jest-watcher/node_modules/ci-info": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/ci-info/-/ci-info-4.0.0.tgz",
"integrity": "sha512-TdHqgGf9odd8SXNuxtUBVx8Nv+qZOejE6qyqiy5NtbYYQOeFa6zmHkxlPzmaLxWWHsU6nJmB7AETdVPi+2NBUg==",
"dev": true,
"funding": [
{
"type": "github",
"url": "https://github.com/sponsors/sibiraj-s"
}
],
"license": "MIT",
"engines": {
"node": ">=8"
}
},
"node_modules/jest-watcher/node_modules/jest-util": {
"version": "30.0.0-alpha.6",
"resolved": "https://registry.npmjs.org/jest-util/-/jest-util-30.0.0-alpha.6.tgz",
"integrity": "sha512-JlimakOVDyoMC8TEG+knoufxUqLG+Btihf1G8o5sHxz54C6oL54Wetfepp+Nhuj/1hSL0sQtkovvjlEycf9i0w==",
"dev": true,
"license": "MIT",
"dependencies": {
"@jest/types": "30.0.0-alpha.6",
"@types/node": "*",
"chalk": "^4.0.0",
"ci-info": "^4.0.0",
"graceful-fs": "^4.2.9",
"picomatch": "^4.0.0"
},
"engines": {
"node": "^16.10.0 || ^18.12.0 || >=20.0.0"
}
},
"node_modules/jest-watcher/node_modules/picomatch": {
"version": "4.0.2",
"resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.2.tgz",
"integrity": "sha512-M7BAV6Rlcy5u+m6oPhAPFgJTzAioX/6B0DxyvDlo9l8+T3nLKbrczg2WLUyzd45L8RqfUMyGPzekbMvX2Ldkwg==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">=12"
},
"funding": {
"url": "https://github.com/sponsors/jonschlinkert"
}
},
"node_modules/jest-worker": {
"version": "27.5.1",
"resolved": "https://registry.npmjs.org/jest-worker/-/jest-worker-27.5.1.tgz",
"integrity": "sha512-7vuh85V5cdDofPyxn58nrPjBktZo0u9x1g8WtjQol+jZDaE+fhN+cIvTj11GndBnMnyfrUOG1sZQxCdjKh+DKg==",
"dev": true,
"dependencies": {
"@types/node": "*",
"merge-stream": "^2.0.0",
"supports-color": "^8.0.0"
},
"engines": {
"node": ">= 10.13.0"
}
},
"node_modules/jest/node_modules/@jest/schemas": {
"version": "30.0.0-alpha.6",
"resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-30.0.0-alpha.6.tgz",
"integrity": "sha512-Ukr3kR/VsBq8+JHU92xArhSJeFQHVHs5T1laPO00GrrNzv3DvoHn3/EVVagGn9CHbLeAyJHXFRHYxq3+520kiA==",
"dev": true,
"license": "MIT",
"dependencies": {
"@sinclair/typebox": "^0.33.0"
},
"engines": {
"node": "^16.10.0 || ^18.12.0 || >=20.0.0"
}
},
"node_modules/jest/node_modules/@jest/types": {
"version": "30.0.0-alpha.6",
"resolved": "https://registry.npmjs.org/@jest/types/-/types-30.0.0-alpha.6.tgz",
"integrity": "sha512-qUjAm8uvIR7oExn/Fp7/bvn58HSZng5itQDM9x0vaxXWxxGH/8MDmqX/h7OUBz9ka+KfYRaTxe4Y6wiM8+nphw==",
"dev": true,
"license": "MIT",
"dependencies": {
"@jest/pattern": "30.0.0-alpha.6",
"@jest/schemas": "30.0.0-alpha.6",
"@types/istanbul-lib-coverage": "^2.0.0",
"@types/istanbul-reports": "^3.0.0",
"@types/node": "*",
"@types/yargs": "^17.0.8",
"chalk": "^4.0.0"
},
"engines": {
"node": "^16.10.0 || ^18.12.0 || >=20.0.0"
}
},
"node_modules/jest/node_modules/@sinclair/typebox": {
"version": "0.33.17",
"resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.33.17.tgz",
"integrity": "sha512-75232GRx3wp3P7NP+yc4nRK3XUAnaQShxTAzapgmQrgs0QvSq0/mOJGoZXRpH15cFCKyys+4laCPbBselqJ5Ag==",
"dev": true,
"license": "MIT"
},
"node_modules/js-tokens": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz",
"integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==",
"dev": true,
"license": "MIT"
},
"node_modules/js-yaml": {
"version": "3.14.1",
"resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.14.1.tgz",
"integrity": "sha512-okMH7OXXJ7YrN9Ok3/SXrnu4iX9yOk+25nqX4imS2npuvTYDmo/QEZoqwZkYaIDk3jVvBOTOIEgEhaLOynBS9g==",
"dev": true,
"license": "MIT",
"dependencies": {
"argparse": "^1.0.7",
"esprima": "^4.0.0"
},
"bin": {
"js-yaml": "bin/js-yaml.js"
}
},
"node_modules/js-yaml/node_modules/argparse": {
"version": "1.0.10",
"resolved": "https://registry.npmjs.org/argparse/-/argparse-1.0.10.tgz",
"integrity": "sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg==",
"dev": true,
"license": "MIT",
"dependencies": {
"sprintf-js": "~1.0.2"
}
},
"node_modules/js2xmlparser": {
"version": "4.0.2",
"resolved": "https://registry.npmjs.org/js2xmlparser/-/js2xmlparser-4.0.2.tgz",
"integrity": "sha512-6n4D8gLlLf1n5mNLQPRfViYzu9RATblzPEtm1SthMX1Pjao0r9YI9nw7ZIfRxQMERS87mcswrg+r/OYrPRX6jA==",
"dev": true,
"license": "Apache-2.0",
"dependencies": {
"xmlcreate": "^2.0.4"
}
},
"node_modules/jsdoc": {
"version": "4.0.4",
"resolved": "https://registry.npmjs.org/jsdoc/-/jsdoc-4.0.4.tgz",
"integrity": "sha512-zeFezwyXeG4syyYHbvh1A967IAqq/67yXtXvuL5wnqCkFZe8I0vKfm+EO+YEvLguo6w9CDUbrAXVtJSHh2E8rw==",
"dev": true,
"license": "Apache-2.0",
"dependencies": {
"@babel/parser": "^7.20.15",
"@jsdoc/salty": "^0.2.1",
"@types/markdown-it": "^14.1.1",
"bluebird": "^3.7.2",
"catharsis": "^0.9.0",
"escape-string-regexp": "^2.0.0",
"js2xmlparser": "^4.0.2",
"klaw": "^3.0.0",
"markdown-it": "^14.1.0",
"markdown-it-anchor": "^8.6.7",
"marked": "^4.0.10",
"mkdirp": "^1.0.4",
"requizzle": "^0.2.3",
"strip-json-comments": "^3.1.0",
"underscore": "~1.13.2"
},
"bin": {
"jsdoc": "jsdoc.js"
},
"engines": {
"node": ">=12.0.0"
}
},
"node_modules/jsdoc-api": {
"version": "9.3.4",
"resolved": "https://registry.npmjs.org/jsdoc-api/-/jsdoc-api-9.3.4.tgz",
"integrity": "sha512-di8lggLACEttpyAZ6WjKKafUP4wC4prAGjt40nMl7quDpp2nD7GmLt6/WxhRu9Q6IYoAAySsNeidBXYVAMwlqg==",
"dev": true,
"license": "MIT",
"dependencies": {
"array-back": "^6.2.2",
"cache-point": "^3.0.0",
"current-module-paths": "^1.1.2",
"file-set": "^5.2.2",
"jsdoc": "^4.0.4",
"object-to-spawn-args": "^2.0.1",
"walk-back": "^5.1.1"
},
"engines": {
"node": ">=12.17"
},
"peerDependencies": {
"@75lb/nature": "latest"
},
"peerDependenciesMeta": {
"@75lb/nature": {
"optional": true
}
}
},
"node_modules/jsdoc-parse": {
"version": "6.2.4",
"resolved": "https://registry.npmjs.org/jsdoc-parse/-/jsdoc-parse-6.2.4.tgz",
"integrity": "sha512-MQA+lCe3ioZd0uGbyB3nDCDZcKgKC7m/Ivt0LgKZdUoOlMJxUWJQ3WI6GeyHp9ouznKaCjlp7CU9sw5k46yZTw==",
"dev": true,
"license": "MIT",
"dependencies": {
"array-back": "^6.2.2",
"find-replace": "^5.0.1",
"lodash.omit": "^4.5.0",
"sort-array": "^5.0.0"
},
"engines": {
"node": ">=12"
}
},
"node_modules/jsdoc-to-markdown": {
"version": "9.1.1",
"resolved": "https://registry.npmjs.org/jsdoc-to-markdown/-/jsdoc-to-markdown-9.1.1.tgz",
"integrity": "sha512-QqYVSo58iHXpD5Jwi1u4AFeuMcQp4jfk7SmWzvXKc3frM9Kop17/OHudmi0phzkT/K137Rlroc9Q0y+95XpUsw==",
"dev": true,
"license": "MIT",
"dependencies": {
"array-back": "^6.2.2",
"command-line-args": "^6.0.1",
"command-line-usage": "^7.0.3",
"config-master": "^3.1.0",
"dmd": "^7.1.1",
"jsdoc-api": "^9.3.4",
"jsdoc-parse": "^6.2.4",
"walk-back": "^5.1.1"
},
"bin": {
"jsdoc2md": "bin/cli.js"
},
"engines": {
"node": ">=12.17"
},
"peerDependencies": {
"@75lb/nature": "latest"
},
"peerDependenciesMeta": {
"@75lb/nature": {
"optional": true
}
}
},
"node_modules/jsesc": {
"version": "3.0.2",
"resolved": "https://registry.npmjs.org/jsesc/-/jsesc-3.0.2.tgz",
"integrity": "sha512-xKqzzWXDttJuOcawBt4KnKHHIf5oQ/Cxax+0PWFG+DFDgHNAdi+TXECADI+RYiFUMmx8792xsMbbgXj4CwnP4g==",
"dev": true,
"license": "MIT",
"bin": {
"jsesc": "bin/jsesc"
},
"engines": {
"node": ">=6"
}
},
"node_modules/json-parse-even-better-errors": {
"version": "2.3.1",
"resolved": "https://registry.npmjs.org/json-parse-even-better-errors/-/json-parse-even-better-errors-2.3.1.tgz",
"integrity": "sha512-xyFwyhro/JEof6Ghe2iz2NcXoj2sloNsWr/XsERDK/oiPCfaNhl5ONfp+jQdAZRQQ0IJWNzH9zIZF7li91kh2w==",
"dev": true
},
"node_modules/json-schema-traverse": {
"version": "0.4.1",
"resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz",
"integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==",
"dev": true
},
"node_modules/json-stringify-safe": {
"version": "5.0.1",
"resolved": "https://registry.npmjs.org/json-stringify-safe/-/json-stringify-safe-5.0.1.tgz",
"integrity": "sha512-ZClg6AaYvamvYEE82d3Iyd3vSSIjQ+odgjaTzRuO3s7toCdFKczob2i0zCh7JE8kWn17yvAWhUVxvqGwUalsRA==",
"license": "ISC"
},
"node_modules/json5": {
"version": "2.2.3",
"resolved": "https://registry.npmjs.org/json5/-/json5-2.2.3.tgz",
"integrity": "sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg==",
"dev": true,
"license": "MIT",
"bin": {
"json5": "lib/cli.js"
},
"engines": {
"node": ">=6"
}
},
"node_modules/kind-of": {
"version": "6.0.3",
"resolved": "https://registry.npmjs.org/kind-of/-/kind-of-6.0.3.tgz",
"integrity": "sha512-dcS1ul+9tmeD95T+x28/ehLgd9mENa3LsvDTtzm3vyBEO7RPptvAD+t44WVXaUjTBRcrpFeFlC8WCruUR456hw==",
"dev": true,
"engines": {
"node": ">=0.10.0"
}
},
"node_modules/klaw": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/klaw/-/klaw-3.0.0.tgz",
"integrity": "sha512-0Fo5oir+O9jnXu5EefYbVK+mHMBeEVEy2cmctR1O1NECcCkPRreJKrS6Qt/j3KC2C148Dfo9i3pCmCMsdqGr0g==",
"dev": true,
"license": "MIT",
"dependencies": {
"graceful-fs": "^4.1.9"
}
},
"node_modules/launch-editor": {
"version": "2.9.1",
"resolved": "https://registry.npmjs.org/launch-editor/-/launch-editor-2.9.1.tgz",
"integrity": "sha512-Gcnl4Bd+hRO9P9icCP/RVVT2o8SFlPXofuCxvA2SaZuH45whSvf5p8x5oih5ftLiVhEI4sp5xDY+R+b3zJBh5w==",
"dev": true,
"license": "MIT",
"dependencies": {
"picocolors": "^1.0.0",
"shell-quote": "^1.8.1"
}
},
"node_modules/leven": {
"version": "3.1.0",
"resolved": "https://registry.npmjs.org/leven/-/leven-3.1.0.tgz",
"integrity": "sha512-qsda+H8jTaUaN/x5vzW2rzc+8Rw4TAQ/4KjB46IwK5VH+IlVeeeje/EoZRpiXvIqjFgK84QffqPztGI3VBLG1A==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">=6"
}
},
"node_modules/lines-and-columns": {
"version": "1.2.4",
"resolved": "https://registry.npmjs.org/lines-and-columns/-/lines-and-columns-1.2.4.tgz",
"integrity": "sha512-7ylylesZQ/PV29jhEDl3Ufjo6ZX7gCqJr5F7PKrqc93v7fzSymt1BpwEU8nAUXs8qzzvqhbjhK5QZg6Mt/HkBg==",
"dev": true,
"license": "MIT"
},
"node_modules/linkify-it": {
"version": "5.0.0",
"resolved": "https://registry.npmjs.org/linkify-it/-/linkify-it-5.0.0.tgz",
"integrity": "sha512-5aHCbzQRADcdP+ATqnDuhhJ/MRIqDkZX5pyjFHRRysS8vZ5AbqGEoFIb6pYHPZ+L/OC2Lc+xT8uHVVR5CAK/wQ==",
"dev": true,
"license": "MIT",
"dependencies": {
"uc.micro": "^2.0.0"
}
},
"node_modules/loader-runner": {
"version": "4.3.0",
"resolved": "https://registry.npmjs.org/loader-runner/-/loader-runner-4.3.0.tgz",
"integrity": "sha512-3R/1M+yS3j5ou80Me59j7F9IMs4PXs3VqRrm0TU3AbKPxlmpoY1TNscJV/oGJXo8qCatFGTfDbY6W6ipGOYXfg==",
"dev": true,
"engines": {
"node": ">=6.11.5"
}
},
"node_modules/locate-path": {
"version": "5.0.0",
"resolved": "https://registry.npmjs.org/locate-path/-/locate-path-5.0.0.tgz",
"integrity": "sha512-t7hw9pI+WvuwNJXwk5zVHpyhIqzg2qTlklJOf0mVxGSbe3Fp2VieZcduNYjaLDoy6p9uGpQEGWG87WpMKlNq8g==",
"dev": true,
"dependencies": {
"p-locate": "^4.1.0"
},
"engines": {
"node": ">=8"
}
},
"node_modules/lodash": {
"version": "4.17.21",
"resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.21.tgz",
"integrity": "sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==",
"dev": true
},
"node_modules/lodash.camelcase": {
"version": "4.3.0",
"resolved": "https://registry.npmjs.org/lodash.camelcase/-/lodash.camelcase-4.3.0.tgz",
"integrity": "sha512-TwuEnCnxbc3rAvhf/LbG7tJUDzhqXyFnv3dtzLOPgCG/hODL7WFnsbwktkD7yUV0RrreP/l1PALq/YSg6VvjlA==",
"dev": true,
"license": "MIT"
},
"node_modules/lodash.omit": {
"version": "4.5.0",
"resolved": "https://registry.npmjs.org/lodash.omit/-/lodash.omit-4.5.0.tgz",
"integrity": "sha512-XeqSp49hNGmlkj2EJlfrQFIzQ6lXdNro9sddtQzcJY8QaoC2GO0DT7xaIokHeyM+mIT0mPMlPvkYzg2xCuHdZg==",
"dev": true,
"license": "MIT"
},
"node_modules/long": {
"version": "5.2.3",
"resolved": "https://registry.npmjs.org/long/-/long-5.2.3.tgz",
"integrity": "sha512-lcHwpNoggQTObv5apGNCTdJrO69eHOZMi4BNC+rTLER8iHAqGrUVeLh/irVIM7zTw2bOXA8T6uNPeujwOLg/2Q=="
},
"node_modules/lru-cache": {
"version": "5.1.1",
"resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-5.1.1.tgz",
"integrity": "sha512-KpNARQA3Iwv+jTA0utUVVbrh+Jlrr1Fv0e56GGzAFOXN7dk/FviaDW8LHmK52DlcH4WP2n6gI8vN1aesBFgo9w==",
"dev": true,
"license": "ISC",
"dependencies": {
"yallist": "^3.0.2"
}
},
"node_modules/make-dir": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/make-dir/-/make-dir-4.0.0.tgz",
"integrity": "sha512-hXdUTZYIVOt1Ex//jAQi+wTZZpUpwBj/0QsOzqegb3rGMMeJiSEu5xLHnYfBrRV4RH2+OCSOO95Is/7x1WJ4bw==",
"dev": true,
"license": "MIT",
"dependencies": {
"semver": "^7.5.3"
},
"engines": {
"node": ">=10"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/makeerror": {
"version": "1.0.12",
"resolved": "https://registry.npmjs.org/makeerror/-/makeerror-1.0.12.tgz",
"integrity": "sha512-JmqCvUhmt43madlpFzG4BQzG2Z3m6tvQDNKdClZnO3VbIudJYmxsT0FNJMeiB2+JTSlTQTSbU8QdesVmwJcmLg==",
"dev": true,
"license": "BSD-3-Clause",
"dependencies": {
"tmpl": "1.0.5"
}
},
"node_modules/markdown-it": {
"version": "14.1.0",
"resolved": "https://registry.npmjs.org/markdown-it/-/markdown-it-14.1.0.tgz",
"integrity": "sha512-a54IwgWPaeBCAAsv13YgmALOF1elABB08FxO9i+r4VFk5Vl4pKokRPeX8u5TCgSsPi6ec1otfLjdOpVcgbpshg==",
"dev": true,
"license": "MIT",
"dependencies": {
"argparse": "^2.0.1",
"entities": "^4.4.0",
"linkify-it": "^5.0.0",
"mdurl": "^2.0.0",
"punycode.js": "^2.3.1",
"uc.micro": "^2.1.0"
},
"bin": {
"markdown-it": "bin/markdown-it.mjs"
}
},
"node_modules/markdown-it-anchor": {
"version": "8.6.7",
"resolved": "https://registry.npmjs.org/markdown-it-anchor/-/markdown-it-anchor-8.6.7.tgz",
"integrity": "sha512-FlCHFwNnutLgVTflOYHPW2pPcl2AACqVzExlkGQNsi4CJgqOHN7YTgDd4LuhgN1BFO3TS0vLAruV1Td6dwWPJA==",
"dev": true,
"license": "Unlicense",
"peerDependencies": {
"@types/markdown-it": "*",
"markdown-it": "*"
}
},
"node_modules/marked": {
"version": "4.3.0",
"resolved": "https://registry.npmjs.org/marked/-/marked-4.3.0.tgz",
"integrity": "sha512-PRsaiG84bK+AMvxziE/lCFss8juXjNaWzVbN5tXAm4XjeaS9NAHhop+PjQxz2A9h8Q4M/xGmzP8vqNwy6JeK0A==",
"dev": true,
"license": "MIT",
"bin": {
"marked": "bin/marked.js"
},
"engines": {
"node": ">= 12"
}
},
"node_modules/matcher": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/matcher/-/matcher-3.0.0.tgz",
"integrity": "sha512-OkeDaAZ/bQCxeFAozM55PKcKU0yJMPGifLwV4Qgjitu+5MoAfSQN4lsLJeXZ1b8w0x+/Emda6MZgXS1jvsapng==",
"license": "MIT",
"dependencies": {
"escape-string-regexp": "^4.0.0"
},
"engines": {
"node": ">=10"
}
},
"node_modules/matcher/node_modules/escape-string-regexp": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-4.0.0.tgz",
"integrity": "sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA==",
"license": "MIT",
"engines": {
"node": ">=10"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/mdurl": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/mdurl/-/mdurl-2.0.0.tgz",
"integrity": "sha512-Lf+9+2r+Tdp5wXDXC4PcIBjTDtq4UKjCPMQhKIuzpJNW0b96kVqSwW0bT7FhRSfmAiFYgP+SCRvdrDozfh0U5w==",
"dev": true,
"license": "MIT"
},
"node_modules/media-typer": {
"version": "0.3.0",
"resolved": "https://registry.npmjs.org/media-typer/-/media-typer-0.3.0.tgz",
"integrity": "sha512-dq+qelQ9akHpcOl/gUVRTxVIOkAJ1wR3QAvb4RsVjS8oVoFjDGTc679wJYmUmknUF5HwMLOgb5O+a3KxfWapPQ==",
"dev": true,
"engines": {
"node": ">= 0.6"
}
},
"node_modules/memfs": {
"version": "4.14.1",
"resolved": "https://registry.npmjs.org/memfs/-/memfs-4.14.1.tgz",
"integrity": "sha512-Fq5CMEth+2iprLJ5mNizRcWuiwRZYjNkUD0zKk224jZunE9CRacTRDK8QLALbMBlNX2y3nY6lKZbesCwDwacig==",
"dev": true,
"license": "Apache-2.0",
"dependencies": {
"@jsonjoy.com/json-pack": "^1.0.3",
"@jsonjoy.com/util": "^1.3.0",
"tree-dump": "^1.0.1",
"tslib": "^2.0.0"
},
"engines": {
"node": ">= 4.0.0"
},
"funding": {
"type": "github",
"url": "https://github.com/sponsors/streamich"
}
},
"node_modules/merge-descriptors": {
"version": "1.0.3",
"resolved": "https://registry.npmjs.org/merge-descriptors/-/merge-descriptors-1.0.3.tgz",
"integrity": "sha512-gaNvAS7TZ897/rVaZ0nMtAyxNyi/pdbjbAwUpFQpN70GqnVfOiXpeUUMKRBmzXaSQ8DdTX4/0ms62r2K+hE6mQ==",
"dev": true,
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/merge-stream": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/merge-stream/-/merge-stream-2.0.0.tgz",
"integrity": "sha512-abv/qOcuPfk3URPfDzmZU1LKmuw8kT+0nIHvKrKgFrwifol/doWcdA4ZqsWQ8ENrFKkd67Mfpo/LovbIUsbt3w==",
"dev": true
},
"node_modules/merge2": {
"version": "1.4.1",
"resolved": "https://registry.npmjs.org/merge2/-/merge2-1.4.1.tgz",
"integrity": "sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">= 8"
}
},
"node_modules/methods": {
"version": "1.1.2",
"resolved": "https://registry.npmjs.org/methods/-/methods-1.1.2.tgz",
"integrity": "sha512-iclAHeNqNm68zFtnZ0e+1L2yUIdvzNoauKU4WBA3VvH/vPFieF7qfRlwUZU+DA9P9bPXIS90ulxoUoCH23sV2w==",
"dev": true,
"engines": {
"node": ">= 0.6"
}
},
"node_modules/micromatch": {
"version": "4.0.8",
"resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.8.tgz",
"integrity": "sha512-PXwfBhYu0hBCPw8Dn0E+WDYb7af3dSLVWKi3HGv84IdF4TyFoC0ysxFd0Goxw7nSv4T/PzEJQxsYsEiFCKo2BA==",
"dev": true,
"dependencies": {
"braces": "^3.0.3",
"picomatch": "^2.3.1"
},
"engines": {
"node": ">=8.6"
}
},
"node_modules/mime": {
"version": "1.6.0",
"resolved": "https://registry.npmjs.org/mime/-/mime-1.6.0.tgz",
"integrity": "sha512-x0Vn8spI+wuJ1O6S7gnbaQg8Pxh4NNHb7KSINmEWKiPE4RKOplvijn+NkmYmmRgP68mc70j2EbeTFRsrswaQeg==",
"dev": true,
"bin": {
"mime": "cli.js"
},
"engines": {
"node": ">=4"
}
},
"node_modules/mime-db": {
"version": "1.52.0",
"resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz",
"integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==",
"dev": true,
"engines": {
"node": ">= 0.6"
}
},
"node_modules/mime-types": {
"version": "2.1.35",
"resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz",
"integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==",
"dev": true,
"dependencies": {
"mime-db": "1.52.0"
},
"engines": {
"node": ">= 0.6"
}
},
"node_modules/mimic-fn": {
"version": "2.1.0",
"resolved": "https://registry.npmjs.org/mimic-fn/-/mimic-fn-2.1.0.tgz",
"integrity": "sha512-OqbOk5oEQeAZ8WXWydlu9HJjz9WVdEIvamMCcXmuqUYjTknH/sqsWvhQ3vgwKFRR1HpjvNBKQ37nbJgYzGqGcg==",
"dev": true,
"engines": {
"node": ">=6"
}
},
"node_modules/minimalistic-assert": {
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/minimalistic-assert/-/minimalistic-assert-1.0.1.tgz",
"integrity": "sha512-UtJcAD4yEaGtjPezWuO9wC4nwUnVH/8/Im3yEHQP4b67cXlD/Qr9hdITCU1xDbSEXg2XKNaP8jsReV7vQd00/A==",
"dev": true
},
"node_modules/minimatch": {
"version": "3.1.2",
"resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz",
"integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==",
"dev": true,
"dependencies": {
"brace-expansion": "^1.1.7"
},
"engines": {
"node": "*"
}
},
"node_modules/minimist": {
"version": "1.2.8",
"resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.8.tgz",
"integrity": "sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA==",
"dev": true,
"license": "MIT",
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/minipass": {
"version": "7.1.2",
"resolved": "https://registry.npmjs.org/minipass/-/minipass-7.1.2.tgz",
"integrity": "sha512-qOOzS1cBTWYF4BH8fVePDBOO9iptMnGUEZwNc/cMWnTV2nVLZ7VoNWEPHkYczZA0pdoA7dl6e7FL659nX9S2aw==",
"engines": {
"node": ">=16 || 14 >=14.17"
}
},
"node_modules/minizlib": {
"version": "3.0.1",
"resolved": "https://registry.npmjs.org/minizlib/-/minizlib-3.0.1.tgz",
"integrity": "sha512-umcy022ILvb5/3Djuu8LWeqUa8D68JaBzlttKeMWen48SjabqS3iY5w/vzeMzMUNhLDifyhbOwKDSznB1vvrwg==",
"dependencies": {
"minipass": "^7.0.4",
"rimraf": "^5.0.5"
},
"engines": {
"node": ">= 18"
}
},
"node_modules/minizlib/node_modules/brace-expansion": {
"version": "2.0.2",
"resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.2.tgz",
"integrity": "sha512-Jt0vHyM+jmUBqojB7E1NIYadt0vI0Qxjxd2TErW94wDz+E2LAm5vKMXXwg6ZZBTHPuUlDgQHKXvjGBdfcF1ZDQ==",
"license": "MIT",
"dependencies": {
"balanced-match": "^1.0.0"
}
},
"node_modules/minizlib/node_modules/glob": {
"version": "10.4.1",
"resolved": "https://registry.npmjs.org/glob/-/glob-10.4.1.tgz",
"integrity": "sha512-2jelhlq3E4ho74ZyVLN03oKdAZVUa6UDZzFLVH1H7dnoax+y9qyaq8zBkfDIggjniU19z0wU18y16jMB2eyVIw==",
"dependencies": {
"foreground-child": "^3.1.0",
"jackspeak": "^3.1.2",
"minimatch": "^9.0.4",
"minipass": "^7.1.2",
"path-scurry": "^1.11.1"
},
"bin": {
"glob": "dist/esm/bin.mjs"
},
"engines": {
"node": ">=16 || 14 >=14.18"
},
"funding": {
"url": "https://github.com/sponsors/isaacs"
}
},
"node_modules/minizlib/node_modules/minimatch": {
"version": "9.0.4",
"resolved": "https://registry.npmjs.org/minimatch/-/minimatch-9.0.4.tgz",
"integrity": "sha512-KqWh+VchfxcMNRAJjj2tnsSJdNbHsVgnkBhTNrW7AjVo6OvLtxw8zfT9oLw1JSohlFzJ8jCoTgaoXvJ+kHt6fw==",
"dependencies": {
"brace-expansion": "^2.0.1"
},
"engines": {
"node": ">=16 || 14 >=14.17"
},
"funding": {
"url": "https://github.com/sponsors/isaacs"
}
},
"node_modules/minizlib/node_modules/rimraf": {
"version": "5.0.7",
"resolved": "https://registry.npmjs.org/rimraf/-/rimraf-5.0.7.tgz",
"integrity": "sha512-nV6YcJo5wbLW77m+8KjH8aB/7/rxQy9SZ0HY5shnwULfS+9nmTtVXAJET5NdZmCzA4fPI/Hm1wo/Po/4mopOdg==",
"dependencies": {
"glob": "^10.3.7"
},
"bin": {
"rimraf": "dist/esm/bin.mjs"
},
"engines": {
"node": ">=14.18"
},
"funding": {
"url": "https://github.com/sponsors/isaacs"
}
},
"node_modules/mkdirp": {
"version": "1.0.4",
"resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-1.0.4.tgz",
"integrity": "sha512-vVqVZQyf3WLx2Shd0qJ9xuvqgAyKPLAiqITEtqW0oIUjzo3PePDd6fW9iFz30ef7Ysp/oiWqbhszeGWW2T6Gzw==",
"dev": true,
"license": "MIT",
"bin": {
"mkdirp": "bin/cmd.js"
},
"engines": {
"node": ">=10"
}
},
"node_modules/ms": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz",
"integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==",
"dev": true
},
"node_modules/multicast-dns": {
"version": "7.2.5",
"resolved": "https://registry.npmjs.org/multicast-dns/-/multicast-dns-7.2.5.tgz",
"integrity": "sha512-2eznPJP8z2BFLX50tf0LuODrpINqP1RVIm/CObbTcBRITQgmC/TjcREF1NeTBzIcR5XO/ukWo+YHOjBbFwIupg==",
"dev": true,
"license": "MIT",
"dependencies": {
"dns-packet": "^5.2.2",
"thunky": "^1.0.2"
},
"bin": {
"multicast-dns": "cli.js"
}
},
"node_modules/natural-compare": {
"version": "1.4.0",
"resolved": "https://registry.npmjs.org/natural-compare/-/natural-compare-1.4.0.tgz",
"integrity": "sha512-OWND8ei3VtNC9h7V60qff3SVobHr996CTwgxubgyQYEpg290h9J0buyECNNJexkFm5sOajh5G116RYA1c8ZMSw==",
"dev": true,
"license": "MIT"
},
"node_modules/negotiator": {
"version": "0.6.3",
"resolved": "https://registry.npmjs.org/negotiator/-/negotiator-0.6.3.tgz",
"integrity": "sha512-+EUsqGPLsM+j/zdChZjsnX51g4XrHFOIXwfnCVPGlQk/k5giakcKsuxCObBRu6DSm9opw/O6slWbJdghQM4bBg==",
"dev": true,
"engines": {
"node": ">= 0.6"
}
},
"node_modules/neo-async": {
"version": "2.6.2",
"resolved": "https://registry.npmjs.org/neo-async/-/neo-async-2.6.2.tgz",
"integrity": "sha512-Yd3UES5mWCSqR+qNT93S3UoYUkqAZ9lLg8a7g9rimsWmYGK8cVToA4/sF3RrshdyV3sAGMXVUmpMYOw+dLpOuw==",
"dev": true
},
"node_modules/node-forge": {
"version": "1.3.1",
"resolved": "https://registry.npmjs.org/node-forge/-/node-forge-1.3.1.tgz",
"integrity": "sha512-dPEtOeMvF9VMcYV/1Wb8CPoVAXtp6MKMlcbAt4ddqmGqUJ6fQZFXkNZNkNlfevtNkGtaSoXf/vNNNSvgrdXwtA==",
"dev": true,
"license": "(BSD-3-Clause OR GPL-2.0)",
"engines": {
"node": ">= 6.13.0"
}
},
"node_modules/node-int64": {
"version": "0.4.0",
"resolved": "https://registry.npmjs.org/node-int64/-/node-int64-0.4.0.tgz",
"integrity": "sha512-O5lz91xSOeoXP6DulyHfllpq+Eg00MWitZIbtPfoSEvqIHdl5gfcY6hYzDWnj0qD5tz52PI08u9qUvSVeUBeHw==",
"dev": true,
"license": "MIT"
},
"node_modules/node-releases": {
"version": "2.0.18",
"resolved": "https://registry.npmjs.org/node-releases/-/node-releases-2.0.18.tgz",
"integrity": "sha512-d9VeXT4SJ7ZeOqGX6R5EM022wpL+eWPooLI+5UpWn2jCT1aosUQEhQP214x33Wkwx3JQMvIm+tIoVOdodFS40g==",
"dev": true
},
"node_modules/normalize-path": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/normalize-path/-/normalize-path-3.0.0.tgz",
"integrity": "sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==",
"dev": true,
"engines": {
"node": ">=0.10.0"
}
},
"node_modules/npm-run-path": {
"version": "4.0.1",
"resolved": "https://registry.npmjs.org/npm-run-path/-/npm-run-path-4.0.1.tgz",
"integrity": "sha512-S48WzZW777zhNIrn7gxOlISNAqi9ZC/uQFnRdbeIHhZhCA6UqpkOT8T1G7BvfdgP4Er8gF4sUbaS0i7QvIfCWw==",
"dev": true,
"dependencies": {
"path-key": "^3.0.0"
},
"engines": {
"node": ">=8"
}
},
"node_modules/object-inspect": {
"version": "1.13.2",
"resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.13.2.tgz",
"integrity": "sha512-IRZSRuzJiynemAXPYtPe5BoI/RESNYR7TYm50MC5Mqbd3Jmw5y790sErYw3V6SryFJD64b74qQQs9wn5Bg/k3g==",
"dev": true,
"engines": {
"node": ">= 0.4"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/object-keys": {
"version": "1.1.1",
"resolved": "https://registry.npmjs.org/object-keys/-/object-keys-1.1.1.tgz",
"integrity": "sha512-NuAESUOUMrlIXOfHKzD6bpPu3tYt3xvjNdRIQ+FeT0lNb4K8WR70CaDxhuNguS2XG+GjkyMwOzsN5ZktImfhLA==",
"license": "MIT",
"engines": {
"node": ">= 0.4"
}
},
"node_modules/object-to-spawn-args": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/object-to-spawn-args/-/object-to-spawn-args-2.0.1.tgz",
"integrity": "sha512-6FuKFQ39cOID+BMZ3QaphcC8Y4cw6LXBLyIgPU+OhIYwviJamPAn+4mITapnSBQrejB+NNp+FMskhD8Cq+Ys3w==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">=8.0.0"
}
},
"node_modules/obuf": {
"version": "1.1.2",
"resolved": "https://registry.npmjs.org/obuf/-/obuf-1.1.2.tgz",
"integrity": "sha512-PX1wu0AmAdPqOL1mWhqmlOd8kOIZQwGZw6rh7uby9fTc5lhaOWFLX3I6R1hrF9k3zUY40e6igsLGkDXK92LJNg==",
"dev": true
},
"node_modules/on-finished": {
"version": "2.4.1",
"resolved": "https://registry.npmjs.org/on-finished/-/on-finished-2.4.1.tgz",
"integrity": "sha512-oVlzkg3ENAhCk2zdv7IJwd/QUD4z2RxRwpkcGY8psCVcCYZNq4wYnVWALHM+brtuJjePWiYF/ClmuDr8Ch5+kg==",
"dev": true,
"dependencies": {
"ee-first": "1.1.1"
},
"engines": {
"node": ">= 0.8"
}
},
"node_modules/on-headers": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/on-headers/-/on-headers-1.1.0.tgz",
"integrity": "sha512-737ZY3yNnXy37FHkQxPzt4UZ2UWPWiCZWLvFZ4fu5cueciegX0zGPnrlY6bwRg4FdQOe9YU8MkmJwGhoMybl8A==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">= 0.8"
}
},
"node_modules/once": {
"version": "1.4.0",
"resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz",
"integrity": "sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==",
"dev": true,
"dependencies": {
"wrappy": "1"
}
},
"node_modules/onetime": {
"version": "5.1.2",
"resolved": "https://registry.npmjs.org/onetime/-/onetime-5.1.2.tgz",
"integrity": "sha512-kbpaSSGJTWdAY5KPVeMOKXSrPtr8C8C7wodJbcsd51jRnmD+GZu8Y0VoU6Dm5Z4vWr0Ig/1NKuWRKf7j5aaYSg==",
"dev": true,
"dependencies": {
"mimic-fn": "^2.1.0"
},
"engines": {
"node": ">=6"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/onnxruntime-common": {
"version": "1.21.0",
"resolved": "https://registry.npmjs.org/onnxruntime-common/-/onnxruntime-common-1.21.0.tgz",
"integrity": "sha512-Q632iLLrtCAVOTO65dh2+mNbQir/QNTVBG3h/QdZBpns7mZ0RYbLRBgGABPbpU9351AgYy7SJf1WaeVwMrBFPQ==",
"license": "MIT"
},
"node_modules/onnxruntime-node": {
"version": "1.21.0",
"resolved": "https://registry.npmjs.org/onnxruntime-node/-/onnxruntime-node-1.21.0.tgz",
"integrity": "sha512-NeaCX6WW2L8cRCSqy3bInlo5ojjQqu2fD3D+9W5qb5irwxhEyWKXeH2vZ8W9r6VxaMPUan+4/7NDwZMtouZxEw==",
"hasInstallScript": true,
"license": "MIT",
"os": [
"win32",
"darwin",
"linux"
],
"dependencies": {
"global-agent": "^3.0.0",
"onnxruntime-common": "1.21.0",
"tar": "^7.0.1"
}
},
"node_modules/onnxruntime-web": {
"version": "1.22.0-dev.20250409-89f8206ba4",
"resolved": "https://registry.npmjs.org/onnxruntime-web/-/onnxruntime-web-1.22.0-dev.20250409-89f8206ba4.tgz",
"integrity": "sha512-0uS76OPgH0hWCPrFKlL8kYVV7ckM7t/36HfbgoFw6Nd0CZVVbQC4PkrR8mBX8LtNUFZO25IQBqV2Hx2ho3FlbQ==",
"license": "MIT",
"dependencies": {
"flatbuffers": "^25.1.24",
"guid-typescript": "^1.0.9",
"long": "^5.2.3",
"onnxruntime-common": "1.22.0-dev.20250409-89f8206ba4",
"platform": "^1.3.6",
"protobufjs": "^7.2.4"
}
},
"node_modules/onnxruntime-web/node_modules/onnxruntime-common": {
"version": "1.22.0-dev.20250409-89f8206ba4",
"resolved": "https://registry.npmjs.org/onnxruntime-common/-/onnxruntime-common-1.22.0-dev.20250409-89f8206ba4.tgz",
"integrity": "sha512-vDJMkfCfb0b1A836rgHj+ORuZf4B4+cc2bASQtpeoJLueuFc5DuYwjIZUBrSvx/fO5IrLjLz+oTrB3pcGlhovQ==",
"license": "MIT"
},
"node_modules/open": {
"version": "10.1.0",
"resolved": "https://registry.npmjs.org/open/-/open-10.1.0.tgz",
"integrity": "sha512-mnkeQ1qP5Ue2wd+aivTD3NHd/lZ96Lu0jgf0pwktLPtx6cTZiH7tyeGRRHs0zX0rbrahXPnXlUnbeXyaBBuIaw==",
"dev": true,
"license": "MIT",
"dependencies": {
"default-browser": "^5.2.1",
"define-lazy-prop": "^3.0.0",
"is-inside-container": "^1.0.0",
"is-wsl": "^3.1.0"
},
"engines": {
"node": ">=18"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/p-limit": {
"version": "2.3.0",
"resolved": "https://registry.npmjs.org/p-limit/-/p-limit-2.3.0.tgz",
"integrity": "sha512-//88mFWSJx8lxCzwdAABTJL2MyWB12+eIY7MDL2SqLmAkeKU9qxRvWuSyTjm3FUmpBEMuFfckAIqEaVGUDxb6w==",
"dev": true,
"dependencies": {
"p-try": "^2.0.0"
},
"engines": {
"node": ">=6"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/p-locate": {
"version": "4.1.0",
"resolved": "https://registry.npmjs.org/p-locate/-/p-locate-4.1.0.tgz",
"integrity": "sha512-R79ZZ/0wAxKGu3oYMlz8jy/kbhsNrS7SKZ7PxEHBgJ5+F2mtFW2fK2cOtBh1cHYkQsbzFV7I+EoRKe6Yt0oK7A==",
"dev": true,
"dependencies": {
"p-limit": "^2.2.0"
},
"engines": {
"node": ">=8"
}
},
"node_modules/p-retry": {
"version": "6.2.1",
"resolved": "https://registry.npmjs.org/p-retry/-/p-retry-6.2.1.tgz",
"integrity": "sha512-hEt02O4hUct5wtwg4H4KcWgDdm+l1bOaEy/hWzd8xtXB9BqxTWBBhb+2ImAtH4Cv4rPjV76xN3Zumqk3k3AhhQ==",
"dev": true,
"license": "MIT",
"dependencies": {
"@types/retry": "0.12.2",
"is-network-error": "^1.0.0",
"retry": "^0.13.1"
},
"engines": {
"node": ">=16.17"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/p-try": {
"version": "2.2.0",
"resolved": "https://registry.npmjs.org/p-try/-/p-try-2.2.0.tgz",
"integrity": "sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ==",
"dev": true,
"engines": {
"node": ">=6"
}
},
"node_modules/package-json-from-dist": {
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/package-json-from-dist/-/package-json-from-dist-1.0.1.tgz",
"integrity": "sha512-UEZIS3/by4OC8vL3P2dTXRETpebLI2NiI5vIrjaD/5UtrkFX/tNbwjTSRAGC/+7CAo2pIcBaRgWmcBBHcsaCIw==",
"dev": true,
"license": "BlueOak-1.0.0"
},
"node_modules/parse-json": {
"version": "5.2.0",
"resolved": "https://registry.npmjs.org/parse-json/-/parse-json-5.2.0.tgz",
"integrity": "sha512-ayCKvm/phCGxOkYRSCM82iDwct8/EonSEgCSxWxD7ve6jHggsFl4fZVQBPRNgQoKiuV/odhFrGzQXZwbifC8Rg==",
"dev": true,
"license": "MIT",
"dependencies": {
"@babel/code-frame": "^7.0.0",
"error-ex": "^1.3.1",
"json-parse-even-better-errors": "^2.3.0",
"lines-and-columns": "^1.1.6"
},
"engines": {
"node": ">=8"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/parseurl": {
"version": "1.3.3",
"resolved": "https://registry.npmjs.org/parseurl/-/parseurl-1.3.3.tgz",
"integrity": "sha512-CiyeOxFT/JZyN5m0z9PfXw4SCBJ6Sygz1Dpl0wqjlhDEGGBP1GnsUVEL0p63hoG1fcj3fHynXi9NYO4nWOL+qQ==",
"dev": true,
"engines": {
"node": ">= 0.8"
}
},
"node_modules/path-exists": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/path-exists/-/path-exists-4.0.0.tgz",
"integrity": "sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==",
"dev": true,
"engines": {
"node": ">=8"
}
},
"node_modules/path-is-absolute": {
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz",
"integrity": "sha512-AVbw3UJ2e9bq64vSaS9Am0fje1Pa8pbGqTTsmXfaIiMpnr5DlDhfJOuLj9Sf95ZPVDAUerDfEk88MPmPe7UCQg==",
"dev": true,
"engines": {
"node": ">=0.10.0"
}
},
"node_modules/path-key": {
"version": "3.1.1",
"resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz",
"integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==",
"engines": {
"node": ">=8"
}
},
"node_modules/path-parse": {
"version": "1.0.7",
"resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.7.tgz",
"integrity": "sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==",
"dev": true
},
"node_modules/path-scurry": {
"version": "1.11.1",
"resolved": "https://registry.npmjs.org/path-scurry/-/path-scurry-1.11.1.tgz",
"integrity": "sha512-Xa4Nw17FS9ApQFJ9umLiJS4orGjm7ZzwUrwamcGQuHSzDyth9boKDaycYdDcZDuqYATXw4HFXgaqWTctW/v1HA==",
"dependencies": {
"lru-cache": "^10.2.0",
"minipass": "^5.0.0 || ^6.0.2 || ^7.0.0"
},
"engines": {
"node": ">=16 || 14 >=14.18"
},
"funding": {
"url": "https://github.com/sponsors/isaacs"
}
},
"node_modules/path-scurry/node_modules/lru-cache": {
"version": "10.2.2",
"resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-10.2.2.tgz",
"integrity": "sha512-9hp3Vp2/hFQUiIwKo8XCeFVnrg8Pk3TYNPIR7tJADKi5YfcF7vEaK7avFHTlSy3kOKYaJQaalfEo6YuXdceBOQ==",
"engines": {
"node": "14 || >=16.14"
}
},
"node_modules/path-to-regexp": {
"version": "0.1.12",
"resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-0.1.12.tgz",
"integrity": "sha512-RA1GjUVMnvYFxuqovrEqZoxxW5NUZqbwKtYz/Tt7nXerk0LbLblQmrsgdeOxV5SFHf0UDggjS/bSeOZwt1pmEQ==",
"dev": true,
"license": "MIT"
},
"node_modules/picocolors": {
"version": "1.1.1",
"resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.1.1.tgz",
"integrity": "sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA==",
"dev": true,
"license": "ISC"
},
"node_modules/picomatch": {
"version": "2.3.1",
"resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz",
"integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==",
"dev": true,
"engines": {
"node": ">=8.6"
},
"funding": {
"url": "https://github.com/sponsors/jonschlinkert"
}
},
"node_modules/pirates": {
"version": "4.0.6",
"resolved": "https://registry.npmjs.org/pirates/-/pirates-4.0.6.tgz",
"integrity": "sha512-saLsH7WeYYPiD25LDuLRRY/i+6HaPYr6G1OUlN39otzkSTxKnubR9RTxS3/Kk50s1g2JTgFwWQDQyplC5/SHZg==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">= 6"
}
},
"node_modules/pkg-dir": {
"version": "4.2.0",
"resolved": "https://registry.npmjs.org/pkg-dir/-/pkg-dir-4.2.0.tgz",
"integrity": "sha512-HRDzbaKjC+AOWVXxAU/x54COGeIv9eb+6CkDSQoNTt4XyWoIJvuPsXizxu/Fr23EiekbtZwmh1IcIG/l/a10GQ==",
"dev": true,
"dependencies": {
"find-up": "^4.0.0"
},
"engines": {
"node": ">=8"
}
},
"node_modules/platform": {
"version": "1.3.6",
"resolved": "https://registry.npmjs.org/platform/-/platform-1.3.6.tgz",
"integrity": "sha512-fnWVljUchTro6RiCFvCXBbNhJc2NijN7oIQxbwsyL0buWJPG85v81ehlHI9fXrJsMNgTofEoWIQeClKpgxFLrg=="
},
"node_modules/prettier": {
"version": "3.4.2",
"resolved": "https://registry.npmjs.org/prettier/-/prettier-3.4.2.tgz",
"integrity": "sha512-e9MewbtFo+Fevyuxn/4rrcDAaq0IYxPGLvObpQjiZBMAzB9IGmzlnG9RZy3FFas+eBMu2vA0CszMeduow5dIuQ==",
"dev": true,
"license": "MIT",
"bin": {
"prettier": "bin/prettier.cjs"
},
"engines": {
"node": ">=14"
},
"funding": {
"url": "https://github.com/prettier/prettier?sponsor=1"
}
},
"node_modules/pretty-format": {
"version": "29.6.1",
"resolved": "https://registry.npmjs.org/pretty-format/-/pretty-format-29.6.1.tgz",
"integrity": "sha512-7jRj+yXO0W7e4/tSJKoR7HRIHLPPjtNaUGG2xxKQnGvPNRkgWcQ0AZX6P4KBRJN4FcTBWb3sa7DVUJmocYuoog==",
"dev": true,
"dependencies": {
"@jest/schemas": "^29.6.0",
"ansi-styles": "^5.0.0",
"react-is": "^18.0.0"
},
"engines": {
"node": "^14.15.0 || ^16.10.0 || >=18.0.0"
}
},
"node_modules/pretty-format/node_modules/ansi-styles": {
"version": "5.2.0",
"resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-5.2.0.tgz",
"integrity": "sha512-Cxwpt2SfTzTtXcfOlzGEee8O+c+MmUgGrNiBcXnuWxuFJHe6a5Hz7qwhwe5OgaSYI0IJvkLqWX1ASG+cJOkEiA==",
"dev": true,
"engines": {
"node": ">=10"
},
"funding": {
"url": "https://github.com/chalk/ansi-styles?sponsor=1"
}
},
"node_modules/process-nextick-args": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/process-nextick-args/-/process-nextick-args-2.0.1.tgz",
"integrity": "sha512-3ouUOpQhtgrbOa17J7+uxOTpITYWaGP7/AhoR3+A+/1e9skrzelGi/dXzEYyvbxubEF6Wn2ypscTKiKJFFn1ag==",
"dev": true
},
"node_modules/protobufjs": {
"version": "7.2.6",
"resolved": "https://registry.npmjs.org/protobufjs/-/protobufjs-7.2.6.tgz",
"integrity": "sha512-dgJaEDDL6x8ASUZ1YqWciTRrdOuYNzoOf27oHNfdyvKqHr5i0FV7FSLU+aIeFjyFgVxrpTOtQUi0BLLBymZaBw==",
"hasInstallScript": true,
"dependencies": {
"@protobufjs/aspromise": "^1.1.2",
"@protobufjs/base64": "^1.1.2",
"@protobufjs/codegen": "^2.0.4",
"@protobufjs/eventemitter": "^1.1.0",
"@protobufjs/fetch": "^1.1.0",
"@protobufjs/float": "^1.0.2",
"@protobufjs/inquire": "^1.1.0",
"@protobufjs/path": "^1.1.2",
"@protobufjs/pool": "^1.1.0",
"@protobufjs/utf8": "^1.1.0",
"@types/node": ">=13.7.0",
"long": "^5.0.0"
},
"engines": {
"node": ">=12.0.0"
}
},
"node_modules/proxy-addr": {
"version": "2.0.7",
"resolved": "https://registry.npmjs.org/proxy-addr/-/proxy-addr-2.0.7.tgz",
"integrity": "sha512-llQsMLSUDUPT44jdrU/O37qlnifitDP+ZwrmmZcoSKyLKvtZxpyV0n2/bD/N4tBAAZ/gJEdZU7KMraoK1+XYAg==",
"dev": true,
"dependencies": {
"forwarded": "0.2.0",
"ipaddr.js": "1.9.1"
},
"engines": {
"node": ">= 0.10"
}
},
"node_modules/proxy-addr/node_modules/ipaddr.js": {
"version": "1.9.1",
"resolved": "https://registry.npmjs.org/ipaddr.js/-/ipaddr.js-1.9.1.tgz",
"integrity": "sha512-0KI/607xoxSToH7GjN1FfSbLoU0+btTicjsQSWQlh/hZykN8KpmMf7uYwPW3R+akZ6R/w18ZlXSHBYXiYUPO3g==",
"dev": true,
"engines": {
"node": ">= 0.10"
}
},
"node_modules/punycode": {
"version": "2.3.0",
"resolved": "https://registry.npmjs.org/punycode/-/punycode-2.3.0.tgz",
"integrity": "sha512-rRV+zQD8tVFys26lAGR9WUuS4iUAngJScM+ZRSKtvl5tKeZ2t5bvdNFdNHBW9FWR4guGHlgmsZ1G7BSm2wTbuA==",
"dev": true,
"engines": {
"node": ">=6"
}
},
"node_modules/punycode.js": {
"version": "2.3.1",
"resolved": "https://registry.npmjs.org/punycode.js/-/punycode.js-2.3.1.tgz",
"integrity": "sha512-uxFIHU0YlHYhDQtV4R9J6a52SLx28BCjT+4ieh7IGbgwVJWO+km431c4yRlREUAsAmt/uMjQUyQHNEPf0M39CA==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">=6"
}
},
"node_modules/pure-rand": {
"version": "6.1.0",
"resolved": "https://registry.npmjs.org/pure-rand/-/pure-rand-6.1.0.tgz",
"integrity": "sha512-bVWawvoZoBYpp6yIoQtQXHZjmz35RSVHnUOTefl8Vcjr8snTPY1wnpSPMWekcFwbxI6gtmT7rSYPFvz71ldiOA==",
"dev": true,
"funding": [
{
"type": "individual",
"url": "https://github.com/sponsors/dubzzz"
},
{
"type": "opencollective",
"url": "https://opencollective.com/fast-check"
}
],
"license": "MIT"
},
"node_modules/qs": {
"version": "6.13.0",
"resolved": "https://registry.npmjs.org/qs/-/qs-6.13.0.tgz",
"integrity": "sha512-+38qI9SOr8tfZ4QmJNplMUxqjbe7LKvvZgWdExBOmd+egZTtjLB67Gu0HRX3u/XOq7UU2Nx6nsjvS16Z9uwfpg==",
"dev": true,
"dependencies": {
"side-channel": "^1.0.6"
},
"engines": {
"node": ">=0.6"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/queue-microtask": {
"version": "1.2.3",
"resolved": "https://registry.npmjs.org/queue-microtask/-/queue-microtask-1.2.3.tgz",
"integrity": "sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A==",
"dev": true,
"funding": [
{
"type": "github",
"url": "https://github.com/sponsors/feross"
},
{
"type": "patreon",
"url": "https://www.patreon.com/feross"
},
{
"type": "consulting",
"url": "https://feross.org/support"
}
],
"license": "MIT"
},
"node_modules/randombytes": {
"version": "2.1.0",
"resolved": "https://registry.npmjs.org/randombytes/-/randombytes-2.1.0.tgz",
"integrity": "sha512-vYl3iOX+4CKUWuxGi9Ukhie6fsqXqS9FE2Zaic4tNFD2N2QQaXOMFbuKK4QmDHC0JO6B1Zp41J0LpT0oR68amQ==",
"dev": true,
"dependencies": {
"safe-buffer": "^5.1.0"
}
},
"node_modules/range-parser": {
"version": "1.2.1",
"resolved": "https://registry.npmjs.org/range-parser/-/range-parser-1.2.1.tgz",
"integrity": "sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg==",
"dev": true,
"engines": {
"node": ">= 0.6"
}
},
"node_modules/raw-body": {
"version": "2.5.2",
"resolved": "https://registry.npmjs.org/raw-body/-/raw-body-2.5.2.tgz",
"integrity": "sha512-8zGqypfENjCIqGhgXToC8aB2r7YrBX+AQAfIPs/Mlk+BtPTztOvTS01NRW/3Eh60J+a48lt8qsCzirQ6loCVfA==",
"dev": true,
"dependencies": {
"bytes": "3.1.2",
"http-errors": "2.0.0",
"iconv-lite": "0.4.24",
"unpipe": "1.0.0"
},
"engines": {
"node": ">= 0.8"
}
},
"node_modules/react-is": {
"version": "18.2.0",
"resolved": "https://registry.npmjs.org/react-is/-/react-is-18.2.0.tgz",
"integrity": "sha512-xWGDIW6x921xtzPkhiULtthJHoJvBbF3q26fzloPCK0hsvxtPVelvftw3zjbHWSkR2km9Z+4uxbDDK/6Zw9B8w==",
"dev": true
},
"node_modules/readable-stream": {
"version": "3.6.1",
"resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-3.6.1.tgz",
"integrity": "sha512-+rQmrWMYGA90yenhTYsLWAsLsqVC8osOw6PKE1HDYiO0gdPeKe/xDHNzIAIn4C91YQ6oenEhfYqqc1883qHbjQ==",
"dev": true,
"dependencies": {
"inherits": "^2.0.3",
"string_decoder": "^1.1.1",
"util-deprecate": "^1.0.1"
},
"engines": {
"node": ">= 6"
}
},
"node_modules/readdirp": {
"version": "3.6.0",
"resolved": "https://registry.npmjs.org/readdirp/-/readdirp-3.6.0.tgz",
"integrity": "sha512-hOS089on8RduqdbhvQ5Z37A0ESjsqz6qnRcffsMU3495FuTdqSm+7bhJ29JvIOsBDEEnan5DPu9t3To9VRlMzA==",
"dev": true,
"license": "MIT",
"dependencies": {
"picomatch": "^2.2.1"
},
"engines": {
"node": ">=8.10.0"
}
},
"node_modules/rechoir": {
"version": "0.8.0",
"resolved": "https://registry.npmjs.org/rechoir/-/rechoir-0.8.0.tgz",
"integrity": "sha512-/vxpCXddiX8NGfGO/mTafwjq4aFa/71pvamip0++IQk3zG8cbCj0fifNPrjjF1XMXUne91jL9OoxmdykoEtifQ==",
"dev": true,
"dependencies": {
"resolve": "^1.20.0"
},
"engines": {
"node": ">= 10.13.0"
}
},
"node_modules/require-directory": {
"version": "2.1.1",
"resolved": "https://registry.npmjs.org/require-directory/-/require-directory-2.1.1.tgz",
"integrity": "sha512-fGxEI7+wsG9xrvdjsrlmL22OMTTiHRwAMroiEeMgq8gzoLC/PQr7RsRDSTLUg/bZAZtF+TVIkHc6/4RIKrui+Q==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">=0.10.0"
}
},
"node_modules/require-from-string": {
"version": "2.0.2",
"resolved": "https://registry.npmjs.org/require-from-string/-/require-from-string-2.0.2.tgz",
"integrity": "sha512-Xf0nWe6RseziFMu+Ap9biiUbmplq6S9/p+7w7YXP/JBHhrUDDUhwa+vANyubuqfZWTveU//DYVGsDG7RKL/vEw==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">=0.10.0"
}
},
"node_modules/requires-port": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/requires-port/-/requires-port-1.0.0.tgz",
"integrity": "sha512-KigOCHcocU3XODJxsu8i/j8T9tzT4adHiecwORRQ0ZZFcp7ahwXuRU1m+yuO90C5ZUyGeGfocHDI14M3L3yDAQ==",
"dev": true
},
"node_modules/requizzle": {
"version": "0.2.4",
"resolved": "https://registry.npmjs.org/requizzle/-/requizzle-0.2.4.tgz",
"integrity": "sha512-JRrFk1D4OQ4SqovXOgdav+K8EAhSB/LJZqCz8tbX0KObcdeM15Ss59ozWMBWmmINMagCwmqn4ZNryUGpBsl6Jw==",
"dev": true,
"license": "MIT",
"dependencies": {
"lodash": "^4.17.21"
}
},
"node_modules/resolve": {
"version": "1.22.2",
"resolved": "https://registry.npmjs.org/resolve/-/resolve-1.22.2.tgz",
"integrity": "sha512-Sb+mjNHOULsBv818T40qSPeRiuWLyaGMa5ewydRLFimneixmVy2zdivRl+AF6jaYPC8ERxGDmFSiqui6SfPd+g==",
"dev": true,
"dependencies": {
"is-core-module": "^2.11.0",
"path-parse": "^1.0.7",
"supports-preserve-symlinks-flag": "^1.0.0"
},
"bin": {
"resolve": "bin/resolve"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/resolve-cwd": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/resolve-cwd/-/resolve-cwd-3.0.0.tgz",
"integrity": "sha512-OrZaX2Mb+rJCpH/6CpSqt9xFVpN++x01XnN2ie9g6P5/3xelLAkXWVADpdz1IHD/KFfEXyE6V0U01OQ3UO2rEg==",
"dev": true,
"dependencies": {
"resolve-from": "^5.0.0"
},
"engines": {
"node": ">=8"
}
},
"node_modules/resolve-from": {
"version": "5.0.0",
"resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-5.0.0.tgz",
"integrity": "sha512-qYg9KP24dD5qka9J47d0aVky0N+b4fTU89LN9iDnjB5waksiC49rvMB0PrUJQGoTmH50XPiqOvAjDfaijGxYZw==",
"dev": true,
"engines": {
"node": ">=8"
}
},
"node_modules/resolve.exports": {
"version": "2.0.2",
"resolved": "https://registry.npmjs.org/resolve.exports/-/resolve.exports-2.0.2.tgz",
"integrity": "sha512-X2UW6Nw3n/aMgDVy+0rSqgHlv39WZAlZrXCdnbyEiKm17DSqHX4MmQMaST3FbeWR5FTuRcUwYAziZajji0Y7mg==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">=10"
}
},
"node_modules/retry": {
"version": "0.13.1",
"resolved": "https://registry.npmjs.org/retry/-/retry-0.13.1.tgz",
"integrity": "sha512-XQBQ3I8W1Cge0Seh+6gjj03LbmRFWuoszgK9ooCpwYIrhhoO80pfq4cUkU5DkknwfOfFteRwlZ56PYOGYyFWdg==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">= 4"
}
},
"node_modules/reusify": {
"version": "1.0.4",
"resolved": "https://registry.npmjs.org/reusify/-/reusify-1.0.4.tgz",
"integrity": "sha512-U9nH88a3fc/ekCF1l0/UP1IosiuIjyTh7hBvXVMHYgVcfGvt897Xguj2UOLDeI5BG2m7/uwyaLVT6fbtCwTyzw==",
"dev": true,
"license": "MIT",
"engines": {
"iojs": ">=1.0.0",
"node": ">=0.10.0"
}
},
"node_modules/roarr": {
"version": "2.15.4",
"resolved": "https://registry.npmjs.org/roarr/-/roarr-2.15.4.tgz",
"integrity": "sha512-CHhPh+UNHD2GTXNYhPWLnU8ONHdI+5DI+4EYIAOaiD63rHeYlZvyh8P+in5999TTSFgUYuKUAjzRI4mdh/p+2A==",
"license": "BSD-3-Clause",
"dependencies": {
"boolean": "^3.0.1",
"detect-node": "^2.0.4",
"globalthis": "^1.0.1",
"json-stringify-safe": "^5.0.1",
"semver-compare": "^1.0.0",
"sprintf-js": "^1.1.2"
},
"engines": {
"node": ">=8.0"
}
},
"node_modules/roarr/node_modules/sprintf-js": {
"version": "1.1.3",
"resolved": "https://registry.npmjs.org/sprintf-js/-/sprintf-js-1.1.3.tgz",
"integrity": "sha512-Oo+0REFV59/rz3gfJNKQiBlwfHaSESl1pcGyABQsnnIfWOFt6JNj5gCog2U6MLZ//IGYD+nA8nI+mTShREReaA==",
"license": "BSD-3-Clause"
},
"node_modules/run-applescript": {
"version": "7.0.0",
"resolved": "https://registry.npmjs.org/run-applescript/-/run-applescript-7.0.0.tgz",
"integrity": "sha512-9by4Ij99JUr/MCFBUkDKLWK3G9HVXmabKz9U5MlIAIuvuzkiOicRYs8XJLxX+xahD+mLiiCYDqF9dKAgtzKP1A==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">=18"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/run-parallel": {
"version": "1.2.0",
"resolved": "https://registry.npmjs.org/run-parallel/-/run-parallel-1.2.0.tgz",
"integrity": "sha512-5l4VyZR86LZ/lDxZTR6jqL8AFE2S0IFLMP26AbjsLVADxHdhB/c0GUsH+y39UfCi3dzz8OlQuPmnaJOMoDHQBA==",
"dev": true,
"funding": [
{
"type": "github",
"url": "https://github.com/sponsors/feross"
},
{
"type": "patreon",
"url": "https://www.patreon.com/feross"
},
{
"type": "consulting",
"url": "https://feross.org/support"
}
],
"license": "MIT",
"dependencies": {
"queue-microtask": "^1.2.2"
}
},
"node_modules/safe-buffer": {
"version": "5.2.1",
"resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz",
"integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==",
"dev": true,
"funding": [
{
"type": "github",
"url": "https://github.com/sponsors/feross"
},
{
"type": "patreon",
"url": "https://www.patreon.com/feross"
},
{
"type": "consulting",
"url": "https://feross.org/support"
}
]
},
"node_modules/safer-buffer": {
"version": "2.1.2",
"resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz",
"integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==",
"dev": true
},
"node_modules/schema-utils": {
"version": "3.3.0",
"resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-3.3.0.tgz",
"integrity": "sha512-pN/yOAvcC+5rQ5nERGuwrjLlYvLTbCibnZ1I7B1LaiAz9BRBlE9GMgE/eqV30P7aJQUf7Ddimy/RsbYO/GrVGg==",
"dev": true,
"dependencies": {
"@types/json-schema": "^7.0.8",
"ajv": "^6.12.5",
"ajv-keywords": "^3.5.2"
},
"engines": {
"node": ">= 10.13.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/webpack"
}
},
"node_modules/select-hose": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/select-hose/-/select-hose-2.0.0.tgz",
"integrity": "sha512-mEugaLK+YfkijB4fx0e6kImuJdCIt2LxCRcbEYPqRGCs4F2ogyfZU5IAZRdjCP8JPq2AtdNoC/Dux63d9Kiryg==",
"dev": true
},
"node_modules/selfsigned": {
"version": "2.4.1",
"resolved": "https://registry.npmjs.org/selfsigned/-/selfsigned-2.4.1.tgz",
"integrity": "sha512-th5B4L2U+eGLq1TVh7zNRGBapioSORUeymIydxgFpwww9d2qyKvtuPU2jJuHvYAwwqi2Y596QBL3eEqcPEYL8Q==",
"dev": true,
"license": "MIT",
"dependencies": {
"@types/node-forge": "^1.3.0",
"node-forge": "^1"
},
"engines": {
"node": ">=10"
}
},
"node_modules/semver": {
"version": "7.7.1",
"resolved": "https://registry.npmjs.org/semver/-/semver-7.7.1.tgz",
"integrity": "sha512-hlq8tAfn0m/61p4BVRcPzIGr6LKiMwo4VM6dGi6pt4qcRkmNzTcWq6eCEjEh+qXjkMDvPlOFFSGwQjoEa6gyMA==",
"license": "ISC",
"bin": {
"semver": "bin/semver.js"
},
"engines": {
"node": ">=10"
}
},
"node_modules/semver-compare": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/semver-compare/-/semver-compare-1.0.0.tgz",
"integrity": "sha512-YM3/ITh2MJ5MtzaM429anh+x2jiLVjqILF4m4oyQB18W7Ggea7BfqdH/wGMK7dDiMghv/6WG7znWMwUDzJiXow==",
"license": "MIT"
},
"node_modules/send": {
"version": "0.19.0",
"resolved": "https://registry.npmjs.org/send/-/send-0.19.0.tgz",
"integrity": "sha512-dW41u5VfLXu8SJh5bwRmyYUbAoSB3c9uQh6L8h/KtsFREPWpbX1lrljJo186Jc4nmci/sGUZ9a0a0J2zgfq2hw==",
"dev": true,
"dependencies": {
"debug": "2.6.9",
"depd": "2.0.0",
"destroy": "1.2.0",
"encodeurl": "~1.0.2",
"escape-html": "~1.0.3",
"etag": "~1.8.1",
"fresh": "0.5.2",
"http-errors": "2.0.0",
"mime": "1.6.0",
"ms": "2.1.3",
"on-finished": "2.4.1",
"range-parser": "~1.2.1",
"statuses": "2.0.1"
},
"engines": {
"node": ">= 0.8.0"
}
},
"node_modules/send/node_modules/encodeurl": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/encodeurl/-/encodeurl-1.0.2.tgz",
"integrity": "sha512-TPJXq8JqFaVYm2CWmPvnP2Iyo4ZSM7/QKcSmuMLDObfpH5fi7RUGmd/rTDf+rut/saiDiQEeVTNgAmJEdAOx0w==",
"dev": true,
"engines": {
"node": ">= 0.8"
}
},
"node_modules/send/node_modules/ms": {
"version": "2.1.3",
"resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz",
"integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==",
"dev": true
},
"node_modules/serialize-error": {
"version": "7.0.1",
"resolved": "https://registry.npmjs.org/serialize-error/-/serialize-error-7.0.1.tgz",
"integrity": "sha512-8I8TjW5KMOKsZQTvoxjuSIa7foAwPWGOts+6o7sgjz41/qMD9VQHEDxi6PBvK2l0MXUmqZyNpUK+T2tQaaElvw==",
"license": "MIT",
"dependencies": {
"type-fest": "^0.13.1"
},
"engines": {
"node": ">=10"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/serialize-error/node_modules/type-fest": {
"version": "0.13.1",
"resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.13.1.tgz",
"integrity": "sha512-34R7HTnG0XIJcBSn5XhDd7nNFPRcXYRZrBB2O2jdKqYODldSzBAqzsWoZYYvduky73toYS/ESqxPvkDf/F0XMg==",
"license": "(MIT OR CC0-1.0)",
"engines": {
"node": ">=10"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/serialize-javascript": {
"version": "6.0.2",
"resolved": "https://registry.npmjs.org/serialize-javascript/-/serialize-javascript-6.0.2.tgz",
"integrity": "sha512-Saa1xPByTTq2gdeFZYLLo+RFE35NHZkAbqZeWNd3BpzppeVisAqpDjcp8dyf6uIvEqJRd46jemmyA4iFIeVk8g==",
"dev": true,
"dependencies": {
"randombytes": "^2.1.0"
}
},
"node_modules/serve-index": {
"version": "1.9.1",
"resolved": "https://registry.npmjs.org/serve-index/-/serve-index-1.9.1.tgz",
"integrity": "sha512-pXHfKNP4qujrtteMrSBb0rc8HJ9Ms/GrXwcUtUtD5s4ewDJI8bT3Cz2zTVRMKtri49pLx2e0Ya8ziP5Ya2pZZw==",
"dev": true,
"dependencies": {
"accepts": "~1.3.4",
"batch": "0.6.1",
"debug": "2.6.9",
"escape-html": "~1.0.3",
"http-errors": "~1.6.2",
"mime-types": "~2.1.17",
"parseurl": "~1.3.2"
},
"engines": {
"node": ">= 0.8.0"
}
},
"node_modules/serve-index/node_modules/depd": {
"version": "1.1.2",
"resolved": "https://registry.npmjs.org/depd/-/depd-1.1.2.tgz",
"integrity": "sha512-7emPTl6Dpo6JRXOXjLRxck+FlLRX5847cLKEn00PLAgc3g2hTZZgr+e4c2v6QpSmLeFP3n5yUo7ft6avBK/5jQ==",
"dev": true,
"engines": {
"node": ">= 0.6"
}
},
"node_modules/serve-index/node_modules/http-errors": {
"version": "1.6.3",
"resolved": "https://registry.npmjs.org/http-errors/-/http-errors-1.6.3.tgz",
"integrity": "sha512-lks+lVC8dgGyh97jxvxeYTWQFvh4uw4yC12gVl63Cg30sjPX4wuGcdkICVXDAESr6OJGjqGA8Iz5mkeN6zlD7A==",
"dev": true,
"dependencies": {
"depd": "~1.1.2",
"inherits": "2.0.3",
"setprototypeof": "1.1.0",
"statuses": ">= 1.4.0 < 2"
},
"engines": {
"node": ">= 0.6"
}
},
"node_modules/serve-index/node_modules/inherits": {
"version": "2.0.3",
"resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.3.tgz",
"integrity": "sha512-x00IRNXNy63jwGkJmzPigoySHbaqpNuzKbBOmzK+g2OdZpQ9w+sxCN+VSB3ja7IAge2OP2qpfxTjeNcyjmW1uw==",
"dev": true
},
"node_modules/serve-index/node_modules/setprototypeof": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/setprototypeof/-/setprototypeof-1.1.0.tgz",
"integrity": "sha512-BvE/TwpZX4FXExxOxZyRGQQv651MSwmWKZGqvmPcRIjDqWub67kTKuIMx43cZZrS/cBBzwBcNDWoFxt2XEFIpQ==",
"dev": true
},
"node_modules/serve-index/node_modules/statuses": {
"version": "1.5.0",
"resolved": "https://registry.npmjs.org/statuses/-/statuses-1.5.0.tgz",
"integrity": "sha512-OpZ3zP+jT1PI7I8nemJX4AKmAX070ZkYPVWV/AaKTJl+tXCTGyVdC1a4SL8RUQYEwk/f34ZX8UTykN68FwrqAA==",
"dev": true,
"engines": {
"node": ">= 0.6"
}
},
"node_modules/serve-static": {
"version": "1.16.2",
"resolved": "https://registry.npmjs.org/serve-static/-/serve-static-1.16.2.tgz",
"integrity": "sha512-VqpjJZKadQB/PEbEwvFdO43Ax5dFBZ2UECszz8bQ7pi7wt//PWe1P6MN7eCnjsatYtBT6EuiClbjSWP2WrIoTw==",
"dev": true,
"dependencies": {
"encodeurl": "~2.0.0",
"escape-html": "~1.0.3",
"parseurl": "~1.3.3",
"send": "0.19.0"
},
"engines": {
"node": ">= 0.8.0"
}
},
"node_modules/set-function-length": {
"version": "1.2.2",
"resolved": "https://registry.npmjs.org/set-function-length/-/set-function-length-1.2.2.tgz",
"integrity": "sha512-pgRc4hJ4/sNjWCSS9AmnS40x3bNMDTknHgL5UaMBTMyJnU90EgWh1Rz+MC9eFu4BuN/UwZjKQuY/1v3rM7HMfg==",
"dev": true,
"dependencies": {
"define-data-property": "^1.1.4",
"es-errors": "^1.3.0",
"function-bind": "^1.1.2",
"get-intrinsic": "^1.2.4",
"gopd": "^1.0.1",
"has-property-descriptors": "^1.0.2"
},
"engines": {
"node": ">= 0.4"
}
},
"node_modules/setprototypeof": {
"version": "1.2.0",
"resolved": "https://registry.npmjs.org/setprototypeof/-/setprototypeof-1.2.0.tgz",
"integrity": "sha512-E5LDX7Wrp85Kil5bhZv46j8jOeboKq5JMmYM3gVGdGH8xFpPWXUMsNrlODCrkoxMEeNi/XZIwuRvY4XNwYMJpw==",
"dev": true
},
"node_modules/shallow-clone": {
"version": "3.0.1",
"resolved": "https://registry.npmjs.org/shallow-clone/-/shallow-clone-3.0.1.tgz",
"integrity": "sha512-/6KqX+GVUdqPuPPd2LxDDxzX6CAbjJehAAOKlNpqqUpAqPM6HeL8f+o3a+JsyGjn2lv0WY8UsTgUJjU9Ok55NA==",
"dev": true,
"dependencies": {
"kind-of": "^6.0.2"
},
"engines": {
"node": ">=8"
}
},
"node_modules/sharp": {
"version": "0.34.1",
"resolved": "https://registry.npmjs.org/sharp/-/sharp-0.34.1.tgz",
"integrity": "sha512-1j0w61+eVxu7DawFJtnfYcvSv6qPFvfTaqzTQ2BLknVhHTwGS8sc63ZBF4rzkWMBVKybo4S5OBtDdZahh2A1xg==",
"hasInstallScript": true,
"license": "Apache-2.0",
"dependencies": {
"color": "^4.2.3",
"detect-libc": "^2.0.3",
"semver": "^7.7.1"
},
"engines": {
"node": "^18.17.0 || ^20.3.0 || >=21.0.0"
},
"funding": {
"url": "https://opencollective.com/libvips"
},
"optionalDependencies": {
"@img/sharp-darwin-arm64": "0.34.1",
"@img/sharp-darwin-x64": "0.34.1",
"@img/sharp-libvips-darwin-arm64": "1.1.0",
"@img/sharp-libvips-darwin-x64": "1.1.0",
"@img/sharp-libvips-linux-arm": "1.1.0",
"@img/sharp-libvips-linux-arm64": "1.1.0",
"@img/sharp-libvips-linux-ppc64": "1.1.0",
"@img/sharp-libvips-linux-s390x": "1.1.0",
"@img/sharp-libvips-linux-x64": "1.1.0",
"@img/sharp-libvips-linuxmusl-arm64": "1.1.0",
"@img/sharp-libvips-linuxmusl-x64": "1.1.0",
"@img/sharp-linux-arm": "0.34.1",
"@img/sharp-linux-arm64": "0.34.1",
"@img/sharp-linux-s390x": "0.34.1",
"@img/sharp-linux-x64": "0.34.1",
"@img/sharp-linuxmusl-arm64": "0.34.1",
"@img/sharp-linuxmusl-x64": "0.34.1",
"@img/sharp-wasm32": "0.34.1",
"@img/sharp-win32-ia32": "0.34.1",
"@img/sharp-win32-x64": "0.34.1"
}
},
"node_modules/shebang-command": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz",
"integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==",
"dependencies": {
"shebang-regex": "^3.0.0"
},
"engines": {
"node": ">=8"
}
},
"node_modules/shebang-regex": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz",
"integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==",
"engines": {
"node": ">=8"
}
},
"node_modules/shell-quote": {
"version": "1.8.2",
"resolved": "https://registry.npmjs.org/shell-quote/-/shell-quote-1.8.2.tgz",
"integrity": "sha512-AzqKpGKjrj7EM6rKVQEPpB288oCfnrEIuyoT9cyF4nmGa7V8Zk6f7RRqYisX8X9m+Q7bd632aZW4ky7EhbQztA==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">= 0.4"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/side-channel": {
"version": "1.0.6",
"resolved": "https://registry.npmjs.org/side-channel/-/side-channel-1.0.6.tgz",
"integrity": "sha512-fDW/EZ6Q9RiO8eFG8Hj+7u/oW+XrPTIChwCOM2+th2A6OblDtYYIpve9m+KvI9Z4C9qSEXlaGR6bTEYHReuglA==",
"dev": true,
"dependencies": {
"call-bind": "^1.0.7",
"es-errors": "^1.3.0",
"get-intrinsic": "^1.2.4",
"object-inspect": "^1.13.1"
},
"engines": {
"node": ">= 0.4"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/signal-exit": {
"version": "3.0.7",
"resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.7.tgz",
"integrity": "sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ==",
"dev": true
},
"node_modules/simple-swizzle": {
"version": "0.2.2",
"resolved": "https://registry.npmjs.org/simple-swizzle/-/simple-swizzle-0.2.2.tgz",
"integrity": "sha512-JA//kQgZtbuY83m+xT+tXJkmJncGMTFT+C+g2h2R9uxkYIrE2yy9sgmcLhCnw57/WSD+Eh3J97FPEDFnbXnDUg==",
"dependencies": {
"is-arrayish": "^0.3.1"
}
},
"node_modules/slash": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/slash/-/slash-3.0.0.tgz",
"integrity": "sha512-g9Q1haeby36OSStwb4ntCGGGaKsaVSjQ68fBxoQcutl5fS1vuY18H3wSt3jFyFtrkx+Kz0V1G85A4MyAdDMi2Q==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">=8"
}
},
"node_modules/sockjs": {
"version": "0.3.24",
"resolved": "https://registry.npmjs.org/sockjs/-/sockjs-0.3.24.tgz",
"integrity": "sha512-GJgLTZ7vYb/JtPSSZ10hsOYIvEYsjbNU+zPdIHcUaWVNUEPivzxku31865sSSud0Da0W4lEeOPlmw93zLQchuQ==",
"dev": true,
"dependencies": {
"faye-websocket": "^0.11.3",
"uuid": "^8.3.2",
"websocket-driver": "^0.7.4"
}
},
"node_modules/sort-array": {
"version": "5.0.0",
"resolved": "https://registry.npmjs.org/sort-array/-/sort-array-5.0.0.tgz",
"integrity": "sha512-Sg9MzajSGprcSrMIxsXyNT0e0JB47RJRfJspC+7co4Z5BdNsNl8FmWI+lXEpyKq+vkMG6pHgAhqyCO+bkDTfFQ==",
"dev": true,
"license": "MIT",
"dependencies": {
"array-back": "^6.2.2",
"typical": "^7.1.1"
},
"engines": {
"node": ">=12.17"
},
"peerDependencies": {
"@75lb/nature": "^0.1.1"
},
"peerDependenciesMeta": {
"@75lb/nature": {
"optional": true
}
}
},
"node_modules/source-map": {
"version": "0.6.1",
"resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz",
"integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==",
"dev": true,
"engines": {
"node": ">=0.10.0"
}
},
"node_modules/source-map-support": {
"version": "0.5.21",
"resolved": "https://registry.npmjs.org/source-map-support/-/source-map-support-0.5.21.tgz",
"integrity": "sha512-uBHU3L3czsIyYXKX88fdrGovxdSCoTGDRZ6SYXtSRxLZUzHg5P/66Ht6uoUlHu9EZod+inXhKo3qQgwXUT/y1w==",
"dev": true,
"dependencies": {
"buffer-from": "^1.0.0",
"source-map": "^0.6.0"
}
},
"node_modules/spdy": {
"version": "4.0.2",
"resolved": "https://registry.npmjs.org/spdy/-/spdy-4.0.2.tgz",
"integrity": "sha512-r46gZQZQV+Kl9oItvl1JZZqJKGr+oEkB08A6BzkiR7593/7IbtuncXHd2YoYeTsG4157ZssMu9KYvUHLcjcDoA==",
"dev": true,
"dependencies": {
"debug": "^4.1.0",
"handle-thing": "^2.0.0",
"http-deceiver": "^1.2.7",
"select-hose": "^2.0.0",
"spdy-transport": "^3.0.0"
},
"engines": {
"node": ">=6.0.0"
}
},
"node_modules/spdy-transport": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/spdy-transport/-/spdy-transport-3.0.0.tgz",
"integrity": "sha512-hsLVFE5SjA6TCisWeJXFKniGGOpBgMLmerfO2aCyCU5s7nJ/rpAepqmFifv/GCbSbueEeAJJnmSQ2rKC/g8Fcw==",
"dev": true,
"dependencies": {
"debug": "^4.1.0",
"detect-node": "^2.0.4",
"hpack.js": "^2.1.6",
"obuf": "^1.1.2",
"readable-stream": "^3.0.6",
"wbuf": "^1.7.3"
}
},
"node_modules/spdy-transport/node_modules/debug": {
"version": "4.3.4",
"resolved": "https://registry.npmjs.org/debug/-/debug-4.3.4.tgz",
"integrity": "sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==",
"dev": true,
"dependencies": {
"ms": "2.1.2"
},
"engines": {
"node": ">=6.0"
},
"peerDependenciesMeta": {
"supports-color": {
"optional": true
}
}
},
"node_modules/spdy-transport/node_modules/ms": {
"version": "2.1.2",
"resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz",
"integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==",
"dev": true
},
"node_modules/spdy/node_modules/debug": {
"version": "4.3.4",
"resolved": "https://registry.npmjs.org/debug/-/debug-4.3.4.tgz",
"integrity": "sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==",
"dev": true,
"dependencies": {
"ms": "2.1.2"
},
"engines": {
"node": ">=6.0"
},
"peerDependenciesMeta": {
"supports-color": {
"optional": true
}
}
},
"node_modules/spdy/node_modules/ms": {
"version": "2.1.2",
"resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz",
"integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==",
"dev": true
},
"node_modules/sprintf-js": {
"version": "1.0.3",
"resolved": "https://registry.npmjs.org/sprintf-js/-/sprintf-js-1.0.3.tgz",
"integrity": "sha512-D9cPgkvLlV3t3IzL0D0YLvGA9Ahk4PcvVwUbN0dSGr1aP0Nrt4AEnTUbuGvquEC0mA64Gqt1fzirlRs5ibXx8g==",
"dev": true,
"license": "BSD-3-Clause"
},
"node_modules/stack-utils": {
"version": "2.0.6",
"resolved": "https://registry.npmjs.org/stack-utils/-/stack-utils-2.0.6.tgz",
"integrity": "sha512-XlkWvfIm6RmsWtNJx+uqtKLS8eqFbxUg0ZzLXqY0caEy9l7hruX8IpiDnjsLavoBgqCCR71TqWO8MaXYheJ3RQ==",
"dev": true,
"dependencies": {
"escape-string-regexp": "^2.0.0"
},
"engines": {
"node": ">=10"
}
},
"node_modules/statuses": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/statuses/-/statuses-2.0.1.tgz",
"integrity": "sha512-RwNA9Z/7PrK06rYLIzFMlaF+l73iwpzsqRIFgbMLbTcLD6cOao82TaWefPXQvB2fOC4AjuYSEndS7N/mTCbkdQ==",
"dev": true,
"engines": {
"node": ">= 0.8"
}
},
"node_modules/string_decoder": {
"version": "1.3.0",
"resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.3.0.tgz",
"integrity": "sha512-hkRX8U1WjJFd8LsDJ2yQ/wWWxaopEsABU1XfkM8A+j0+85JAGppt16cr1Whg6KIbb4okU6Mql6BOj+uup/wKeA==",
"dev": true,
"dependencies": {
"safe-buffer": "~5.2.0"
}
},
"node_modules/string-length": {
"version": "4.0.2",
"resolved": "https://registry.npmjs.org/string-length/-/string-length-4.0.2.tgz",
"integrity": "sha512-+l6rNN5fYHNhZZy41RXsYptCjA2Igmq4EG7kZAYFQI1E1VTXarr6ZPXBg6eq7Y6eK4FEhY6AJlyuFIb/v/S0VQ==",
"dev": true,
"license": "MIT",
"dependencies": {
"char-regex": "^1.0.2",
"strip-ansi": "^6.0.0"
},
"engines": {
"node": ">=10"
}
},
"node_modules/string-width": {
"version": "4.2.3",
"resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz",
"integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==",
"dependencies": {
"emoji-regex": "^8.0.0",
"is-fullwidth-code-point": "^3.0.0",
"strip-ansi": "^6.0.1"
},
"engines": {
"node": ">=8"
}
},
"node_modules/string-width-cjs": {
"name": "string-width",
"version": "4.2.3",
"resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz",
"integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==",
"dependencies": {
"emoji-regex": "^8.0.0",
"is-fullwidth-code-point": "^3.0.0",
"strip-ansi": "^6.0.1"
},
"engines": {
"node": ">=8"
}
},
"node_modules/strip-ansi": {
"version": "6.0.1",
"resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz",
"integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==",
"dependencies": {
"ansi-regex": "^5.0.1"
},
"engines": {
"node": ">=8"
}
},
"node_modules/strip-ansi-cjs": {
"name": "strip-ansi",
"version": "6.0.1",
"resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz",
"integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==",
"dependencies": {
"ansi-regex": "^5.0.1"
},
"engines": {
"node": ">=8"
}
},
"node_modules/strip-bom": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/strip-bom/-/strip-bom-4.0.0.tgz",
"integrity": "sha512-3xurFv5tEgii33Zi8Jtp55wEIILR9eh34FAW00PZf+JnSsTmV/ioewSgQl97JHvgjoRGwPShsWm+IdrxB35d0w==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">=8"
}
},
"node_modules/strip-final-newline": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/strip-final-newline/-/strip-final-newline-2.0.0.tgz",
"integrity": "sha512-BrpvfNAE3dcvq7ll3xVumzjKjZQ5tI1sEUIKr3Uoks0XUl45St3FlatVqef9prk4jRDzhW6WZg+3bk93y6pLjA==",
"dev": true,
"engines": {
"node": ">=6"
}
},
"node_modules/strip-json-comments": {
"version": "3.1.1",
"resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-3.1.1.tgz",
"integrity": "sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">=8"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/supports-color": {
"version": "8.1.1",
"resolved": "https://registry.npmjs.org/supports-color/-/supports-color-8.1.1.tgz",
"integrity": "sha512-MpUEN2OodtUzxvKQl72cUF7RQ5EiHsGvSsVG0ia9c5RbWGL2CI4C7EpPS8UTBIplnlzZiNuV56w+FuNxy3ty2Q==",
"dev": true,
"dependencies": {
"has-flag": "^4.0.0"
},
"engines": {
"node": ">=10"
},
"funding": {
"url": "https://github.com/chalk/supports-color?sponsor=1"
}
},
"node_modules/supports-preserve-symlinks-flag": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/supports-preserve-symlinks-flag/-/supports-preserve-symlinks-flag-1.0.0.tgz",
"integrity": "sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w==",
"dev": true,
"engines": {
"node": ">= 0.4"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/synckit": {
"version": "0.9.2",
"resolved": "https://registry.npmjs.org/synckit/-/synckit-0.9.2.tgz",
"integrity": "sha512-vrozgXDQwYO72vHjUb/HnFbQx1exDjoKzqx23aXEg2a9VIg2TSFZ8FmeZpTjUCFMYw7mpX4BE2SFu8wI7asYsw==",
"dev": true,
"license": "MIT",
"dependencies": {
"@pkgr/core": "^0.1.0",
"tslib": "^2.6.2"
},
"engines": {
"node": "^14.18.0 || >=16.0.0"
},
"funding": {
"url": "https://opencollective.com/unts"
}
},
"node_modules/table-layout": {
"version": "4.1.1",
"resolved": "https://registry.npmjs.org/table-layout/-/table-layout-4.1.1.tgz",
"integrity": "sha512-iK5/YhZxq5GO5z8wb0bY1317uDF3Zjpha0QFFLA8/trAoiLbQD0HUbMesEaxyzUgDxi2QlcbM8IvqOlEjgoXBA==",
"dev": true,
"license": "MIT",
"dependencies": {
"array-back": "^6.2.2",
"wordwrapjs": "^5.1.0"
},
"engines": {
"node": ">=12.17"
}
},
"node_modules/tapable": {
"version": "2.2.1",
"resolved": "https://registry.npmjs.org/tapable/-/tapable-2.2.1.tgz",
"integrity": "sha512-GNzQvQTOIP6RyTfE2Qxb8ZVlNmw0n88vp1szwWRimP02mnTsx3Wtn5qRdqY9w2XduFNUgvOwhNnQsjwCp+kqaQ==",
"dev": true,
"engines": {
"node": ">=6"
}
},
"node_modules/tar": {
"version": "7.2.0",
"resolved": "https://registry.npmjs.org/tar/-/tar-7.2.0.tgz",
"integrity": "sha512-hctwP0Nb4AB60bj8WQgRYaMOuJYRAPMGiQUAotms5igN8ppfQM+IvjQ5HcKu1MaZh2Wy2KWVTe563Yj8dfc14w==",
"dependencies": {
"@isaacs/fs-minipass": "^4.0.0",
"chownr": "^3.0.0",
"minipass": "^7.1.0",
"minizlib": "^3.0.1",
"mkdirp": "^3.0.1",
"yallist": "^5.0.0"
},
"engines": {
"node": ">=18"
}
},
"node_modules/tar/node_modules/mkdirp": {
"version": "3.0.1",
"resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-3.0.1.tgz",
"integrity": "sha512-+NsyUUAZDmo6YVHzL/stxSu3t9YS1iljliy3BSDrXJ/dkn1KYdmtZODGGjLcc9XLgVVpH4KshHB8XmZgMhaBXg==",
"bin": {
"mkdirp": "dist/cjs/src/bin.js"
},
"engines": {
"node": ">=10"
},
"funding": {
"url": "https://github.com/sponsors/isaacs"
}
},
"node_modules/tar/node_modules/yallist": {
"version": "5.0.0",
"resolved": "https://registry.npmjs.org/yallist/-/yallist-5.0.0.tgz",
"integrity": "sha512-YgvUTfwqyc7UXVMrB+SImsVYSmTS8X/tSrtdNZMImM+n7+QTriRXyXim0mBrTXNeqzVF0KWGgHPeiyViFFrNDw==",
"engines": {
"node": ">=18"
}
},
"node_modules/terser": {
"version": "5.31.6",
"resolved": "https://registry.npmjs.org/terser/-/terser-5.31.6.tgz",
"integrity": "sha512-PQ4DAriWzKj+qgehQ7LK5bQqCFNMmlhjR2PFFLuqGCpuCAauxemVBWwWOxo3UIwWQx8+Pr61Df++r76wDmkQBg==",
"dev": true,
"dependencies": {
"@jridgewell/source-map": "^0.3.3",
"acorn": "^8.8.2",
"commander": "^2.20.0",
"source-map-support": "~0.5.20"
},
"bin": {
"terser": "bin/terser"
},
"engines": {
"node": ">=10"
}
},
"node_modules/terser-webpack-plugin": {
"version": "5.3.10",
"resolved": "https://registry.npmjs.org/terser-webpack-plugin/-/terser-webpack-plugin-5.3.10.tgz",
"integrity": "sha512-BKFPWlPDndPs+NGGCr1U59t0XScL5317Y0UReNrHaw9/FwhPENlq6bfgs+4yPfyP51vqC1bQ4rp1EfXW5ZSH9w==",
"dev": true,
"dependencies": {
"@jridgewell/trace-mapping": "^0.3.20",
"jest-worker": "^27.4.5",
"schema-utils": "^3.1.1",
"serialize-javascript": "^6.0.1",
"terser": "^5.26.0"
},
"engines": {
"node": ">= 10.13.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/webpack"
},
"peerDependencies": {
"webpack": "^5.1.0"
},
"peerDependenciesMeta": {
"@swc/core": {
"optional": true
},
"esbuild": {
"optional": true
},
"uglify-js": {
"optional": true
}
}
},
"node_modules/test-exclude": {
"version": "6.0.0",
"resolved": "https://registry.npmjs.org/test-exclude/-/test-exclude-6.0.0.tgz",
"integrity": "sha512-cAGWPIyOHU6zlmg88jwm7VRyXnMN7iV68OGAbYDk/Mh/xC/pzVPlQtY6ngoIH/5/tciuhGfvESU8GrHrcxD56w==",
"dev": true,
"license": "ISC",
"dependencies": {
"@istanbuljs/schema": "^0.1.2",
"glob": "^7.1.4",
"minimatch": "^3.0.4"
},
"engines": {
"node": ">=8"
}
},
"node_modules/thingies": {
"version": "1.21.0",
"resolved": "https://registry.npmjs.org/thingies/-/thingies-1.21.0.tgz",
"integrity": "sha512-hsqsJsFMsV+aD4s3CWKk85ep/3I9XzYV/IXaSouJMYIoDlgyi11cBhsqYe9/geRfB0YIikBQg6raRaM+nIMP9g==",
"dev": true,
"license": "Unlicense",
"engines": {
"node": ">=10.18"
},
"peerDependencies": {
"tslib": "^2"
}
},
"node_modules/thunky": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/thunky/-/thunky-1.1.0.tgz",
"integrity": "sha512-eHY7nBftgThBqOyHGVN+l8gF0BucP09fMo0oO/Lb0w1OF80dJv+lDVpXG60WMQvkcxAkNybKsrEIE3ZtKGmPrA==",
"dev": true,
"license": "MIT"
},
"node_modules/tmpl": {
"version": "1.0.5",
"resolved": "https://registry.npmjs.org/tmpl/-/tmpl-1.0.5.tgz",
"integrity": "sha512-3f0uOEAQwIqGuWW2MVzYg8fV/QNnc/IpuJNG837rLuczAaLVHslWHZQj4IGiEl5Hs3kkbhwL9Ab7Hrsmuj+Smw==",
"dev": true,
"license": "BSD-3-Clause"
},
"node_modules/to-regex-range": {
"version": "5.0.1",
"resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz",
"integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==",
"dev": true,
"dependencies": {
"is-number": "^7.0.0"
},
"engines": {
"node": ">=8.0"
}
},
"node_modules/toidentifier": {
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/toidentifier/-/toidentifier-1.0.1.tgz",
"integrity": "sha512-o5sSPKEkg/DIQNmH43V0/uerLrpzVedkUh8tGNvaeXpfpuwjKenlSox/2O/BTlZUtEe+JG7s5YhEz608PlAHRA==",
"dev": true,
"engines": {
"node": ">=0.6"
}
},
"node_modules/tree-dump": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/tree-dump/-/tree-dump-1.0.2.tgz",
"integrity": "sha512-dpev9ABuLWdEubk+cIaI9cHwRNNDjkBBLXTwI4UCUFdQ5xXKqNXoK4FEciw/vxf+NQ7Cb7sGUyeUtORvHIdRXQ==",
"dev": true,
"license": "Apache-2.0",
"engines": {
"node": ">=10.0"
},
"funding": {
"type": "github",
"url": "https://github.com/sponsors/streamich"
},
"peerDependencies": {
"tslib": "2"
}
},
"node_modules/tslib": {
"version": "2.6.3",
"resolved": "https://registry.npmjs.org/tslib/-/tslib-2.6.3.tgz",
"integrity": "sha512-xNvxJEOUiWPGhUuUdQgAJPKOOJfGnIyKySOc09XkKsgdUV/3E2zvwZYdejjmRgPCgcym1juLH3226yA7sEFJKQ==",
"devOptional": true
},
"node_modules/type-detect": {
"version": "4.0.8",
"resolved": "https://registry.npmjs.org/type-detect/-/type-detect-4.0.8.tgz",
"integrity": "sha512-0fr/mIH1dlO+x7TlcMy+bIDqKPsw/70tVyeHW787goQjhmqaZe10uwLujubK9q9Lg6Fiho1KUKDYz0Z7k7g5/g==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">=4"
}
},
"node_modules/type-fest": {
"version": "0.21.3",
"resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.21.3.tgz",
"integrity": "sha512-t0rzBq87m3fVcduHDUFhKmyyX+9eo6WQjZvf51Ea/M0Q7+T374Jp1aUiyUl0GKxp8M/OETVHSDvmkyPgvX+X2w==",
"dev": true,
"license": "(MIT OR CC0-1.0)",
"engines": {
"node": ">=10"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/type-is": {
"version": "1.6.18",
"resolved": "https://registry.npmjs.org/type-is/-/type-is-1.6.18.tgz",
"integrity": "sha512-TkRKr9sUTxEH8MdfuCSP7VizJyzRNMjj2J2do2Jr3Kym598JVdEksuzPQCnlFPW4ky9Q+iA+ma9BGm06XQBy8g==",
"dev": true,
"dependencies": {
"media-typer": "0.3.0",
"mime-types": "~2.1.24"
},
"engines": {
"node": ">= 0.6"
}
},
"node_modules/typescript": {
"version": "5.8.2",
"resolved": "https://registry.npmjs.org/typescript/-/typescript-5.8.2.tgz",
"integrity": "sha512-aJn6wq13/afZp/jT9QZmwEjDqqvSGp1VT5GVg+f/t6/oVyrgXM6BY1h9BRh/O5p3PlUPAe+WuiEZOmb/49RqoQ==",
"dev": true,
"license": "Apache-2.0",
"bin": {
"tsc": "bin/tsc",
"tsserver": "bin/tsserver"
},
"engines": {
"node": ">=14.17"
}
},
"node_modules/typical": {
"version": "7.3.0",
"resolved": "https://registry.npmjs.org/typical/-/typical-7.3.0.tgz",
"integrity": "sha512-ya4mg/30vm+DOWfBg4YK3j2WD6TWtRkCbasOJr40CseYENzCUby/7rIvXA99JGsQHeNxLbnXdyLLxKSv3tauFw==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">=12.17"
}
},
"node_modules/uc.micro": {
"version": "2.1.0",
"resolved": "https://registry.npmjs.org/uc.micro/-/uc.micro-2.1.0.tgz",
"integrity": "sha512-ARDJmphmdvUk6Glw7y9DQ2bFkKBHwQHLi2lsaH6PPmz/Ka9sFOBsBluozhDltWmnv9u/cF6Rt87znRTPV+yp/A==",
"dev": true,
"license": "MIT"
},
"node_modules/uglify-js": {
"version": "3.19.3",
"resolved": "https://registry.npmjs.org/uglify-js/-/uglify-js-3.19.3.tgz",
"integrity": "sha512-v3Xu+yuwBXisp6QYTcH4UbH+xYJXqnq2m/LtQVWKWzYc1iehYnLixoQDN9FH6/j9/oybfd6W9Ghwkl8+UMKTKQ==",
"dev": true,
"license": "BSD-2-Clause",
"optional": true,
"bin": {
"uglifyjs": "bin/uglifyjs"
},
"engines": {
"node": ">=0.8.0"
}
},
"node_modules/underscore": {
"version": "1.13.7",
"resolved": "https://registry.npmjs.org/underscore/-/underscore-1.13.7.tgz",
"integrity": "sha512-GMXzWtsc57XAtguZgaQViUOzs0KTkk8ojr3/xAxXLITqf/3EMwxC0inyETfDFjH/Krbhuep0HNbbjI9i/q3F3g==",
"dev": true,
"license": "MIT"
},
"node_modules/undici-types": {
"version": "6.20.0",
"resolved": "https://registry.npmjs.org/undici-types/-/undici-types-6.20.0.tgz",
"integrity": "sha512-Ny6QZ2Nju20vw1SRHe3d9jVu6gJ+4e3+MMpqu7pqE5HT6WsTSlce++GQmK5UXS8mzV8DSYHrQH+Xrf2jVcuKNg==",
"license": "MIT"
},
"node_modules/unpipe": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/unpipe/-/unpipe-1.0.0.tgz",
"integrity": "sha512-pjy2bYhSsufwWlKwPc+l3cN7+wuJlK6uz0YdJEOlQDbl6jo/YlPi4mb8agUkVC8BF7V8NuzeyPNqRksA3hztKQ==",
"dev": true,
"engines": {
"node": ">= 0.8"
}
},
"node_modules/update-browserslist-db": {
"version": "1.1.1",
"resolved": "https://registry.npmjs.org/update-browserslist-db/-/update-browserslist-db-1.1.1.tgz",
"integrity": "sha512-R8UzCaa9Az+38REPiJ1tXlImTJXlVfgHZsglwBD/k6nj76ctsH1E3q4doGrukiLQd3sGQYu56r5+lo5r94l29A==",
"dev": true,
"funding": [
{
"type": "opencollective",
"url": "https://opencollective.com/browserslist"
},
{
"type": "tidelift",
"url": "https://tidelift.com/funding/github/npm/browserslist"
},
{
"type": "github",
"url": "https://github.com/sponsors/ai"
}
],
"license": "MIT",
"dependencies": {
"escalade": "^3.2.0",
"picocolors": "^1.1.0"
},
"bin": {
"update-browserslist-db": "cli.js"
},
"peerDependencies": {
"browserslist": ">= 4.21.0"
}
},
"node_modules/uri-js": {
"version": "4.4.1",
"resolved": "https://registry.npmjs.org/uri-js/-/uri-js-4.4.1.tgz",
"integrity": "sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg==",
"dev": true,
"dependencies": {
"punycode": "^2.1.0"
}
},
"node_modules/util-deprecate": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz",
"integrity": "sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==",
"dev": true
},
"node_modules/utils-merge": {
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/utils-merge/-/utils-merge-1.0.1.tgz",
"integrity": "sha512-pMZTvIkT1d+TFGvDOqodOclx0QWkkgi6Tdoa8gC8ffGAAqz9pzPTZWAybbsHHoED/ztMtkv/VoYTYyShUn81hA==",
"dev": true,
"engines": {
"node": ">= 0.4.0"
}
},
"node_modules/uuid": {
"version": "8.3.2",
"resolved": "https://registry.npmjs.org/uuid/-/uuid-8.3.2.tgz",
"integrity": "sha512-+NYs2QeMWy+GWFOEm9xnn6HCDp0l7QBD7ml8zLUmJ+93Q5NF0NocErnwkTkXVFNiX3/fpC6afS8Dhb/gz7R7eg==",
"dev": true,
"bin": {
"uuid": "dist/bin/uuid"
}
},
"node_modules/v8-to-istanbul": {
"version": "9.3.0",
"resolved": "https://registry.npmjs.org/v8-to-istanbul/-/v8-to-istanbul-9.3.0.tgz",
"integrity": "sha512-kiGUalWN+rgBJ/1OHZsBtU4rXZOfj/7rKQxULKlIzwzQSvMJUUNgPwJEEh7gU6xEVxC0ahoOBvN2YI8GH6FNgA==",
"dev": true,
"license": "ISC",
"dependencies": {
"@jridgewell/trace-mapping": "^0.3.12",
"@types/istanbul-lib-coverage": "^2.0.1",
"convert-source-map": "^2.0.0"
},
"engines": {
"node": ">=10.12.0"
}
},
"node_modules/vary": {
"version": "1.1.2",
"resolved": "https://registry.npmjs.org/vary/-/vary-1.1.2.tgz",
"integrity": "sha512-BNGbWLfd0eUPabhkXUVm0j8uuvREyTh5ovRa/dyow/BqAbZJyC+5fU+IzQOzmAKzYqYRAISoRhdQr3eIZ/PXqg==",
"dev": true,
"engines": {
"node": ">= 0.8"
}
},
"node_modules/walk-back": {
"version": "5.1.1",
"resolved": "https://registry.npmjs.org/walk-back/-/walk-back-5.1.1.tgz",
"integrity": "sha512-e/FRLDVdZQWFrAzU6Hdvpm7D7m2ina833gIKLptQykRK49mmCYHLHq7UqjPDbxbKLZkTkW1rFqbengdE3sLfdw==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">=12.17"
}
},
"node_modules/walker": {
"version": "1.0.8",
"resolved": "https://registry.npmjs.org/walker/-/walker-1.0.8.tgz",
"integrity": "sha512-ts/8E8l5b7kY0vlWLewOkDXMmPdLcVV4GmOQLyxuSswIJsweeFZtAsMF7k1Nszz+TYBQrlYRmzOnr398y1JemQ==",
"dev": true,
"license": "Apache-2.0",
"dependencies": {
"makeerror": "1.0.12"
}
},
"node_modules/watchpack": {
"version": "2.4.2",
"resolved": "https://registry.npmjs.org/watchpack/-/watchpack-2.4.2.tgz",
"integrity": "sha512-TnbFSbcOCcDgjZ4piURLCbJ3nJhznVh9kw6F6iokjiFPl8ONxe9A6nMDVXDiNbrSfLILs6vB07F7wLBrwPYzJw==",
"dev": true,
"dependencies": {
"glob-to-regexp": "^0.4.1",
"graceful-fs": "^4.1.2"
},
"engines": {
"node": ">=10.13.0"
}
},
"node_modules/wavefile": {
"version": "11.0.0",
"resolved": "https://registry.npmjs.org/wavefile/-/wavefile-11.0.0.tgz",
"integrity": "sha512-/OBiAALgWU24IG7sC84cDO/KfFuvajWc5Uec0oV2zrpOOZZDgGdOwHwgEzOrwh8jkubBk7PtZfQBIcI1OaE5Ng==",
"dev": true,
"bin": {
"wavefile": "bin/wavefile.js"
},
"engines": {
"node": ">=8"
}
},
"node_modules/wbuf": {
"version": "1.7.3",
"resolved": "https://registry.npmjs.org/wbuf/-/wbuf-1.7.3.tgz",
"integrity": "sha512-O84QOnr0icsbFGLS0O3bI5FswxzRr8/gHwWkDlQFskhSPryQXvrTMxjxGP4+iWYoauLoBvfDpkrOauZ+0iZpDA==",
"dev": true,
"dependencies": {
"minimalistic-assert": "^1.0.0"
}
},
"node_modules/webpack": {
"version": "5.97.1",
"resolved": "https://registry.npmjs.org/webpack/-/webpack-5.97.1.tgz",
"integrity": "sha512-EksG6gFY3L1eFMROS/7Wzgrii5mBAFe4rIr3r2BTfo7bcc+DWwFZ4OJ/miOuHJO/A85HwyI4eQ0F6IKXesO7Fg==",
"dev": true,
"license": "MIT",
"dependencies": {
"@types/eslint-scope": "^3.7.7",
"@types/estree": "^1.0.6",
"@webassemblyjs/ast": "^1.14.1",
"@webassemblyjs/wasm-edit": "^1.14.1",
"@webassemblyjs/wasm-parser": "^1.14.1",
"acorn": "^8.14.0",
"browserslist": "^4.24.0",
"chrome-trace-event": "^1.0.2",
"enhanced-resolve": "^5.17.1",
"es-module-lexer": "^1.2.1",
"eslint-scope": "5.1.1",
"events": "^3.2.0",
"glob-to-regexp": "^0.4.1",
"graceful-fs": "^4.2.11",
"json-parse-even-better-errors": "^2.3.1",
"loader-runner": "^4.2.0",
"mime-types": "^2.1.27",
"neo-async": "^2.6.2",
"schema-utils": "^3.2.0",
"tapable": "^2.1.1",
"terser-webpack-plugin": "^5.3.10",
"watchpack": "^2.4.1",
"webpack-sources": "^3.2.3"
},
"bin": {
"webpack": "bin/webpack.js"
},
"engines": {
"node": ">=10.13.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/webpack"
},
"peerDependenciesMeta": {
"webpack-cli": {
"optional": true
}
}
},
"node_modules/webpack-cli": {
"version": "5.1.4",
"resolved": "https://registry.npmjs.org/webpack-cli/-/webpack-cli-5.1.4.tgz",
"integrity": "sha512-pIDJHIEI9LR0yxHXQ+Qh95k2EvXpWzZ5l+d+jIo+RdSm9MiHfzazIxwwni/p7+x4eJZuvG1AJwgC4TNQ7NRgsg==",
"dev": true,
"license": "MIT",
"dependencies": {
"@discoveryjs/json-ext": "^0.5.0",
"@webpack-cli/configtest": "^2.1.1",
"@webpack-cli/info": "^2.0.2",
"@webpack-cli/serve": "^2.0.5",
"colorette": "^2.0.14",
"commander": "^10.0.1",
"cross-spawn": "^7.0.3",
"envinfo": "^7.7.3",
"fastest-levenshtein": "^1.0.12",
"import-local": "^3.0.2",
"interpret": "^3.1.1",
"rechoir": "^0.8.0",
"webpack-merge": "^5.7.3"
},
"bin": {
"webpack-cli": "bin/cli.js"
},
"engines": {
"node": ">=14.15.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/webpack"
},
"peerDependencies": {
"webpack": "5.x.x"
},
"peerDependenciesMeta": {
"@webpack-cli/generators": {
"optional": true
},
"webpack-bundle-analyzer": {
"optional": true
},
"webpack-dev-server": {
"optional": true
}
}
},
"node_modules/webpack-cli/node_modules/commander": {
"version": "10.0.1",
"resolved": "https://registry.npmjs.org/commander/-/commander-10.0.1.tgz",
"integrity": "sha512-y4Mg2tXshplEbSGzx7amzPwKKOCGuoSRP/CjEdwwk0FOGlUbq6lKuoyDZTNZkmxHdJtp54hdfY/JUrdL7Xfdug==",
"dev": true,
"engines": {
"node": ">=14"
}
},
"node_modules/webpack-dev-middleware": {
"version": "7.4.2",
"resolved": "https://registry.npmjs.org/webpack-dev-middleware/-/webpack-dev-middleware-7.4.2.tgz",
"integrity": "sha512-xOO8n6eggxnwYpy1NlzUKpvrjfJTvae5/D6WOK0S2LSo7vjmo5gCM1DbLUmFqrMTJP+W/0YZNctm7jasWvLuBA==",
"dev": true,
"license": "MIT",
"dependencies": {
"colorette": "^2.0.10",
"memfs": "^4.6.0",
"mime-types": "^2.1.31",
"on-finished": "^2.4.1",
"range-parser": "^1.2.1",
"schema-utils": "^4.0.0"
},
"engines": {
"node": ">= 18.12.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/webpack"
},
"peerDependencies": {
"webpack": "^5.0.0"
},
"peerDependenciesMeta": {
"webpack": {
"optional": true
}
}
},
"node_modules/webpack-dev-middleware/node_modules/ajv": {
"version": "8.17.1",
"resolved": "https://registry.npmjs.org/ajv/-/ajv-8.17.1.tgz",
"integrity": "sha512-B/gBuNg5SiMTrPkC+A2+cW0RszwxYmn6VYxB/inlBStS5nx6xHIt/ehKRhIMhqusl7a8LjQoZnjCs5vhwxOQ1g==",
"dev": true,
"license": "MIT",
"dependencies": {
"fast-deep-equal": "^3.1.3",
"fast-uri": "^3.0.1",
"json-schema-traverse": "^1.0.0",
"require-from-string": "^2.0.2"
},
"funding": {
"type": "github",
"url": "https://github.com/sponsors/epoberezkin"
}
},
"node_modules/webpack-dev-middleware/node_modules/ajv-keywords": {
"version": "5.1.0",
"resolved": "https://registry.npmjs.org/ajv-keywords/-/ajv-keywords-5.1.0.tgz",
"integrity": "sha512-YCS/JNFAUyr5vAuhk1DWm1CBxRHW9LbJ2ozWeemrIqpbsqKjHVxYPyi5GC0rjZIT5JxJ3virVTS8wk4i/Z+krw==",
"dev": true,
"license": "MIT",
"dependencies": {
"fast-deep-equal": "^3.1.3"
},
"peerDependencies": {
"ajv": "^8.8.2"
}
},
"node_modules/webpack-dev-middleware/node_modules/json-schema-traverse": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-1.0.0.tgz",
"integrity": "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==",
"dev": true,
"license": "MIT"
},
"node_modules/webpack-dev-middleware/node_modules/schema-utils": {
"version": "4.2.0",
"resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-4.2.0.tgz",
"integrity": "sha512-L0jRsrPpjdckP3oPug3/VxNKt2trR8TcabrM6FOAAlvC/9Phcmm+cuAgTlxBqdBR1WJx7Naj9WHw+aOmheSVbw==",
"dev": true,
"license": "MIT",
"dependencies": {
"@types/json-schema": "^7.0.9",
"ajv": "^8.9.0",
"ajv-formats": "^2.1.1",
"ajv-keywords": "^5.1.0"
},
"engines": {
"node": ">= 12.13.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/webpack"
}
},
"node_modules/webpack-dev-server": {
"version": "5.2.2",
"resolved": "https://registry.npmjs.org/webpack-dev-server/-/webpack-dev-server-5.2.2.tgz",
"integrity": "sha512-QcQ72gh8a+7JO63TAx/6XZf/CWhgMzu5m0QirvPfGvptOusAxG12w2+aua1Jkjr7hzaWDnJ2n6JFeexMHI+Zjg==",
"dev": true,
"license": "MIT",
"dependencies": {
"@types/bonjour": "^3.5.13",
"@types/connect-history-api-fallback": "^1.5.4",
"@types/express": "^4.17.21",
"@types/express-serve-static-core": "^4.17.21",
"@types/serve-index": "^1.9.4",
"@types/serve-static": "^1.15.5",
"@types/sockjs": "^0.3.36",
"@types/ws": "^8.5.10",
"ansi-html-community": "^0.0.8",
"bonjour-service": "^1.2.1",
"chokidar": "^3.6.0",
"colorette": "^2.0.10",
"compression": "^1.7.4",
"connect-history-api-fallback": "^2.0.0",
"express": "^4.21.2",
"graceful-fs": "^4.2.6",
"http-proxy-middleware": "^2.0.9",
"ipaddr.js": "^2.1.0",
"launch-editor": "^2.6.1",
"open": "^10.0.3",
"p-retry": "^6.2.0",
"schema-utils": "^4.2.0",
"selfsigned": "^2.4.1",
"serve-index": "^1.9.1",
"sockjs": "^0.3.24",
"spdy": "^4.0.2",
"webpack-dev-middleware": "^7.4.2",
"ws": "^8.18.0"
},
"bin": {
"webpack-dev-server": "bin/webpack-dev-server.js"
},
"engines": {
"node": ">= 18.12.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/webpack"
},
"peerDependencies": {
"webpack": "^5.0.0"
},
"peerDependenciesMeta": {
"webpack": {
"optional": true
},
"webpack-cli": {
"optional": true
}
}
},
"node_modules/webpack-dev-server/node_modules/ajv": {
"version": "8.17.1",
"resolved": "https://registry.npmjs.org/ajv/-/ajv-8.17.1.tgz",
"integrity": "sha512-B/gBuNg5SiMTrPkC+A2+cW0RszwxYmn6VYxB/inlBStS5nx6xHIt/ehKRhIMhqusl7a8LjQoZnjCs5vhwxOQ1g==",
"dev": true,
"license": "MIT",
"dependencies": {
"fast-deep-equal": "^3.1.3",
"fast-uri": "^3.0.1",
"json-schema-traverse": "^1.0.0",
"require-from-string": "^2.0.2"
},
"funding": {
"type": "github",
"url": "https://github.com/sponsors/epoberezkin"
}
},
"node_modules/webpack-dev-server/node_modules/ajv-keywords": {
"version": "5.1.0",
"resolved": "https://registry.npmjs.org/ajv-keywords/-/ajv-keywords-5.1.0.tgz",
"integrity": "sha512-YCS/JNFAUyr5vAuhk1DWm1CBxRHW9LbJ2ozWeemrIqpbsqKjHVxYPyi5GC0rjZIT5JxJ3virVTS8wk4i/Z+krw==",
"dev": true,
"license": "MIT",
"dependencies": {
"fast-deep-equal": "^3.1.3"
},
"peerDependencies": {
"ajv": "^8.8.2"
}
},
"node_modules/webpack-dev-server/node_modules/json-schema-traverse": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-1.0.0.tgz",
"integrity": "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==",
"dev": true,
"license": "MIT"
},
"node_modules/webpack-dev-server/node_modules/schema-utils": {
"version": "4.2.0",
"resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-4.2.0.tgz",
"integrity": "sha512-L0jRsrPpjdckP3oPug3/VxNKt2trR8TcabrM6FOAAlvC/9Phcmm+cuAgTlxBqdBR1WJx7Naj9WHw+aOmheSVbw==",
"dev": true,
"license": "MIT",
"dependencies": {
"@types/json-schema": "^7.0.9",
"ajv": "^8.9.0",
"ajv-formats": "^2.1.1",
"ajv-keywords": "^5.1.0"
},
"engines": {
"node": ">= 12.13.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/webpack"
}
},
"node_modules/webpack-merge": {
"version": "5.8.0",
"resolved": "https://registry.npmjs.org/webpack-merge/-/webpack-merge-5.8.0.tgz",
"integrity": "sha512-/SaI7xY0831XwP6kzuwhKWVKDP9t1QY1h65lAFLbZqMPIuYcD9QAW4u9STIbU9kaJbPBB/geU/gLr1wDjOhQ+Q==",
"dev": true,
"dependencies": {
"clone-deep": "^4.0.1",
"wildcard": "^2.0.0"
},
"engines": {
"node": ">=10.0.0"
}
},
"node_modules/webpack-sources": {
"version": "3.2.3",
"resolved": "https://registry.npmjs.org/webpack-sources/-/webpack-sources-3.2.3.tgz",
"integrity": "sha512-/DyMEOrDgLKKIG0fmvtz+4dUX/3Ghozwgm6iPp8KRhvn+eQf9+Q7GWxVNMk3+uCPWfdXYC4ExGBckIXdFEfH1w==",
"dev": true,
"engines": {
"node": ">=10.13.0"
}
},
"node_modules/websocket-driver": {
"version": "0.7.4",
"resolved": "https://registry.npmjs.org/websocket-driver/-/websocket-driver-0.7.4.tgz",
"integrity": "sha512-b17KeDIQVjvb0ssuSDF2cYXSg2iztliJ4B9WdsuB6J952qCPKmnVq4DyW5motImXHDC1cBT/1UezrJVsKw5zjg==",
"dev": true,
"dependencies": {
"http-parser-js": ">=0.5.1",
"safe-buffer": ">=5.1.0",
"websocket-extensions": ">=0.1.1"
},
"engines": {
"node": ">=0.8.0"
}
},
"node_modules/websocket-extensions": {
"version": "0.1.4",
"resolved": "https://registry.npmjs.org/websocket-extensions/-/websocket-extensions-0.1.4.tgz",
"integrity": "sha512-OqedPIGOfsDlo31UNwYbCFMSaO9m9G/0faIHj5/dZFDMFqPTcx6UwqyOy3COEaEOg/9VsGIpdqn62W5KhoKSpg==",
"dev": true,
"engines": {
"node": ">=0.8.0"
}
},
"node_modules/which": {
"version": "2.0.2",
"resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz",
"integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==",
"dependencies": {
"isexe": "^2.0.0"
},
"bin": {
"node-which": "bin/node-which"
},
"engines": {
"node": ">= 8"
}
},
"node_modules/wildcard": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/wildcard/-/wildcard-2.0.0.tgz",
"integrity": "sha512-JcKqAHLPxcdb9KM49dufGXn2x3ssnfjbcaQdLlfZsL9rH9wgDQjUtDxbo8NE0F6SFvydeu1VhZe7hZuHsB2/pw==",
"dev": true
},
"node_modules/wordwrap": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/wordwrap/-/wordwrap-1.0.0.tgz",
"integrity": "sha512-gvVzJFlPycKc5dZN4yPkP8w7Dc37BtP1yczEneOb4uq34pXZcvrtRTmWV8W+Ume+XCxKgbjM+nevkyFPMybd4Q==",
"dev": true,
"license": "MIT"
},
"node_modules/wordwrapjs": {
"version": "5.1.0",
"resolved": "https://registry.npmjs.org/wordwrapjs/-/wordwrapjs-5.1.0.tgz",
"integrity": "sha512-JNjcULU2e4KJwUNv6CHgI46UvDGitb6dGryHajXTDiLgg1/RiGoPSDw4kZfYnwGtEXf2ZMeIewDQgFGzkCB2Sg==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">=12.17"
}
},
"node_modules/wrap-ansi": {
"version": "7.0.0",
"resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz",
"integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==",
"dev": true,
"license": "MIT",
"dependencies": {
"ansi-styles": "^4.0.0",
"string-width": "^4.1.0",
"strip-ansi": "^6.0.0"
},
"engines": {
"node": ">=10"
},
"funding": {
"url": "https://github.com/chalk/wrap-ansi?sponsor=1"
}
},
"node_modules/wrap-ansi-cjs": {
"name": "wrap-ansi",
"version": "7.0.0",
"resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz",
"integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==",
"dependencies": {
"ansi-styles": "^4.0.0",
"string-width": "^4.1.0",
"strip-ansi": "^6.0.0"
},
"engines": {
"node": ">=10"
},
"funding": {
"url": "https://github.com/chalk/wrap-ansi?sponsor=1"
}
},
"node_modules/wrappy": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz",
"integrity": "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==",
"dev": true
},
"node_modules/write-file-atomic": {
"version": "5.0.1",
"resolved": "https://registry.npmjs.org/write-file-atomic/-/write-file-atomic-5.0.1.tgz",
"integrity": "sha512-+QU2zd6OTD8XWIJCbffaiQeH9U73qIqafo1x6V1snCWYGJf6cVE0cDR4D8xRzcEnfI21IFrUPzPGtcPf8AC+Rw==",
"dev": true,
"license": "ISC",
"dependencies": {
"imurmurhash": "^0.1.4",
"signal-exit": "^4.0.1"
},
"engines": {
"node": "^14.17.0 || ^16.13.0 || >=18.0.0"
}
},
"node_modules/write-file-atomic/node_modules/signal-exit": {
"version": "4.1.0",
"resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-4.1.0.tgz",
"integrity": "sha512-bzyZ1e88w9O1iNJbKnOlvYTrWPDl46O1bG0D3XInv+9tkPrxrN8jUUTiFlDkkmKWgn1M6CfIA13SuGqOa9Korw==",
"dev": true,
"license": "ISC",
"engines": {
"node": ">=14"
},
"funding": {
"url": "https://github.com/sponsors/isaacs"
}
},
"node_modules/ws": {
"version": "8.18.0",
"resolved": "https://registry.npmjs.org/ws/-/ws-8.18.0.tgz",
"integrity": "sha512-8VbfWfHLbbwu3+N6OKsOMpBdT4kXPDDB9cJk2bJ6mh9ucxdlnNvH1e+roYkKmN9Nxw2yjz7VzeO9oOz2zJ04Pw==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">=10.0.0"
},
"peerDependencies": {
"bufferutil": "^4.0.1",
"utf-8-validate": ">=5.0.2"
},
"peerDependenciesMeta": {
"bufferutil": {
"optional": true
},
"utf-8-validate": {
"optional": true
}
}
},
"node_modules/xmlcreate": {
"version": "2.0.4",
"resolved": "https://registry.npmjs.org/xmlcreate/-/xmlcreate-2.0.4.tgz",
"integrity": "sha512-nquOebG4sngPmGPICTS5EnxqhKbCmz5Ox5hsszI2T6U5qdrJizBc+0ilYSEjTSzU0yZcmvppztXe/5Al5fUwdg==",
"dev": true,
"license": "Apache-2.0"
},
"node_modules/y18n": {
"version": "5.0.8",
"resolved": "https://registry.npmjs.org/y18n/-/y18n-5.0.8.tgz",
"integrity": "sha512-0pfFzegeDWJHJIAmTLRP2DwHjdF5s7jo9tuztdQxAhINCdvS+3nGINqPd00AphqJR/0LhANUS6/+7SCb98YOfA==",
"dev": true,
"license": "ISC",
"engines": {
"node": ">=10"
}
},
"node_modules/yallist": {
"version": "3.1.1",
"resolved": "https://registry.npmjs.org/yallist/-/yallist-3.1.1.tgz",
"integrity": "sha512-a4UGQaWPH59mOXUYnAG2ewncQS4i4F43Tv3JoAM+s2VDAmS9NsK8GpDMLrCHPksFT7h3K6TOoUNn2pb7RoXx4g==",
"dev": true,
"license": "ISC"
},
"node_modules/yargs": {
"version": "17.7.2",
"resolved": "https://registry.npmjs.org/yargs/-/yargs-17.7.2.tgz",
"integrity": "sha512-7dSzzRQ++CKnNI/krKnYRV7JKKPUXMEh61soaHKg9mrWEhzFWhFnxPxGl+69cD1Ou63C13NUPCnmIcrvqCuM6w==",
"dev": true,
"license": "MIT",
"dependencies": {
"cliui": "^8.0.1",
"escalade": "^3.1.1",
"get-caller-file": "^2.0.5",
"require-directory": "^2.1.1",
"string-width": "^4.2.3",
"y18n": "^5.0.5",
"yargs-parser": "^21.1.1"
},
"engines": {
"node": ">=12"
}
},
"node_modules/yargs-parser": {
"version": "21.1.1",
"resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-21.1.1.tgz",
"integrity": "sha512-tVpsJW7DdjecAiFpbIB1e3qxIQsE6NoPc5/eTdrbbIC4h0LVsWhnoa3g+m2HclBIujHzsxZ4VJVA+GUuc2/LBw==",
"dev": true,
"license": "ISC",
"engines": {
"node": ">=12"
}
},
"node_modules/yocto-queue": {
"version": "0.1.0",
"resolved": "https://registry.npmjs.org/yocto-queue/-/yocto-queue-0.1.0.tgz",
"integrity": "sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">=10"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
}
}
}
| transformers.js/package-lock.json/0 | {
"file_path": "transformers.js/package-lock.json",
"repo_id": "transformers.js",
"token_count": 251206
} | 331 |
import onnx
from typing import Optional, Union
from pathlib import Path
import os
import logging
logger = logging.getLogger(__name__)
# https://github.com/onnx/onnx/pull/6556
MAXIMUM_PROTOBUF = 2147483648 # 2GiB
def strict_check_model(model_or_path: Union[onnx.ModelProto, str, Path]):
try:
onnx.checker.check_model(model_or_path, full_check=True)
except Exception as e:
if "No Op registered for" in str(e):
pass
else:
raise e
def check_and_save_model(model: onnx.ModelProto, save_path: Optional[Union[str, Path]]):
# for large models, a path must be provided instead of a ModelProto:
# https://github.com/onnx/onnx/blob/main/docs/PythonAPIOverview.md#checking-a-large-onnx-model-2gb
if model.ByteSize() < MAXIMUM_PROTOBUF:
# For the try catch, refer to https://github.com/microsoft/onnxruntime/issues/14768
strict_check_model(model)
if save_path:
# Overwrite.
save_path = Path(save_path).as_posix()
external_file_name = os.path.basename(save_path) + "_data"
# path/to/model.onnx_data
external_path = os.path.join(os.path.dirname(save_path), external_file_name)
if save_path.endswith(".onnx") and os.path.isfile(save_path):
os.remove(save_path)
if os.path.isfile(external_path):
# The new model may be below the maximum protobuf size, overwritting a model that was larger. Hence this os.remove.
os.remove(external_path)
onnx.save(
model,
save_path,
convert_attribute=True,
)
elif save_path is not None:
# path/to/model.onnx
save_path = Path(save_path).as_posix()
external_file_name = os.path.basename(save_path) + "_data"
# path/to/model.onnx_data
external_path = os.path.join(os.path.dirname(save_path), external_file_name)
if save_path.endswith(".onnx") and os.path.isfile(save_path):
os.remove(save_path)
if os.path.isfile(external_path):
os.remove(external_path)
onnx.save(
model,
save_path,
save_as_external_data=True,
all_tensors_to_one_file=True,
location=external_file_name,
convert_attribute=True,
)
else:
logger.info(
"Merged ONNX model exceeds 2GB, the model will not be checked without `save_path` given."
)
| transformers.js/scripts/utils.py/0 | {
"file_path": "transformers.js/scripts/utils.py",
"repo_id": "transformers.js",
"token_count": 1216
} | 332 |
import { GITHUB_ISSUE_URL, IMAGE_PROCESSOR_NAME } from '../../utils/constants.js';
import { getModelJSON } from '../../utils/hub.js';
import { ImageProcessor } from '../../base/image_processors_utils.js';
import * as AllImageProcessors from '../image_processors.js';
export class AutoImageProcessor {
/** @type {typeof ImageProcessor.from_pretrained} */
static async from_pretrained(pretrained_model_name_or_path, options={}) {
const preprocessorConfig = await getModelJSON(pretrained_model_name_or_path, IMAGE_PROCESSOR_NAME, true, options);
// Determine image processor class
const key = preprocessorConfig.image_processor_type ?? preprocessorConfig.feature_extractor_type;
let image_processor_class = AllImageProcessors[key?.replace(/Fast$/, '')];
if (!image_processor_class) {
if (key !== undefined) {
// Only log a warning if the class is not found and the key is set.
console.warn(`Image processor type '${key}' not found, assuming base ImageProcessor. Please report this at ${GITHUB_ISSUE_URL}.`)
}
image_processor_class = ImageProcessor;
}
// Instantiate image processor
return new image_processor_class(preprocessorConfig);
}
}
| transformers.js/src/models/auto/image_processing_auto.js/0 | {
"file_path": "transformers.js/src/models/auto/image_processing_auto.js",
"repo_id": "transformers.js",
"token_count": 475
} | 333 |
export * from './audio_spectrogram_transformer/feature_extraction_audio_spectrogram_transformer.js';
export * from './encodec/feature_extraction_encodec.js';
export * from './clap/feature_extraction_clap.js';
export * from './dac/feature_extraction_dac.js';
export * from './gemma3n/feature_extraction_gemma3n.js';
export * from './moonshine/feature_extraction_moonshine.js';
export * from './pyannote/feature_extraction_pyannote.js';
export * from './seamless_m4t/feature_extraction_seamless_m4t.js';
export * from './snac/feature_extraction_snac.js';
export * from './speecht5/feature_extraction_speecht5.js';
export * from './wav2vec2/feature_extraction_wav2vec2.js';
export * from './wespeaker/feature_extraction_wespeaker.js';
export * from './whisper/feature_extraction_whisper.js';
// For legacy support, ImageFeatureExtractor is an alias for ImageProcessor
export { ImageProcessor as ImageFeatureExtractor } from "../base/image_processors_utils.js";
| transformers.js/src/models/feature_extractors.js/0 | {
"file_path": "transformers.js/src/models/feature_extractors.js",
"repo_id": "transformers.js",
"token_count": 336
} | 334 |
import { MaskFormerImageProcessor } from "../maskformer/image_processing_maskformer.js";
// NOTE: extends MaskFormerImageProcessor
export class Mask2FormerImageProcessor extends MaskFormerImageProcessor { }
| transformers.js/src/models/mask2former/image_processing_mask2former.js/0 | {
"file_path": "transformers.js/src/models/mask2former/image_processing_mask2former.js",
"repo_id": "transformers.js",
"token_count": 53
} | 335 |
import { Processor } from "../../base/processing_utils.js";
import { AutoImageProcessor } from "../auto/image_processing_auto.js";
import { AutoTokenizer } from "../../tokenizers.js";
import { RawImage } from "../../utils/image.js";
const IMAGE_TOKEN = "<|image|>";
const IMAGE_TOKEN_PATTERN = /<\|image_\d+\|>/g;
export class Phi3VProcessor extends Processor {
static image_processor_class = AutoImageProcessor
static tokenizer_class = AutoTokenizer
/**
*
* @param {string|string[]} text
* @param {RawImage|RawImage[]} images
* @param { { padding?: boolean, truncation?: boolean, num_crops?: number } | undefined } options
* @returns {Promise<any>}
*/
async _call(text, images = null, {
padding = true,
truncation = true,
num_crops = null,
} = {}) {
if (!Array.isArray(text)) {
text = [text];
}
let text_inputs, image_inputs;
if (images) {
image_inputs = await this.image_processor(images, { num_crops });
const { num_img_tokens } = image_inputs;
// The original implementation adds a bos_token before the image tokens
// TODO: Check if this affects performance, since it looks like a bug in the original implementation
const prompt_chunks = text.map((t, i) => t.split(IMAGE_TOKEN_PATTERN).join(IMAGE_TOKEN.repeat(num_img_tokens[i])));
text_inputs = this.tokenizer(prompt_chunks, { padding, truncation });
// The model expects image tokens to be negative, so we negate the image token ids
const image_token_id = this.tokenizer.model.convert_tokens_to_ids([IMAGE_TOKEN])[0];
text_inputs.input_ids.map_(id => (id == image_token_id) ? -id : id);
} else {
text_inputs = this.tokenizer(text);
}
return {
...text_inputs,
...image_inputs,
}
}
}
| transformers.js/src/models/phi3_v/processing_phi3_v.js/0 | {
"file_path": "transformers.js/src/models/phi3_v/processing_phi3_v.js",
"repo_id": "transformers.js",
"token_count": 827
} | 336 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.