text
stringlengths
5
631k
id
stringlengths
14
178
metadata
dict
__index_level_0__
int64
0
647
# Models [[autodoc]] timm.create_model [[autodoc]] timm.list_models
pytorch-image-models/hfdocs/source/reference/models.mdx/0
{ "file_path": "pytorch-image-models/hfdocs/source/reference/models.mdx", "repo_id": "pytorch-image-models", "token_count": 29 }
257
""" NaFlex (NaViT + FlexiViT) Transforms and Collation Implements PyTorch versions of the transforms described in the NaViT and FlexiViT papers: - NaViT: https://arxiv.org/abs/2307.14995 - FlexiViT: https://arxiv.org/abs/2212.08013 Enables variable resolution/aspect ratio image handling with efficient patching. Hacked together by / Copyright 2025, Ross Wightman, Hugging Face """ import math import random import warnings from typing import Dict, List, Optional, Sequence, Tuple, Union import torch from PIL import Image from torchvision import transforms from torchvision.transforms import functional as F from torchvision.transforms.functional import InterpolationMode from .transforms import str_to_interp_mode, crop_or_pad, center_crop_or_pad def get_image_size_for_seq( image_hw: Tuple[int, int], patch_size: Union[int, Tuple[int, int]] = 16, max_seq_len: int = 1024, divisible_by_patch: bool = True, max_ratio: Optional[float] = None, eps: float = 1e-5, ) -> Tuple[float, Tuple[int, int]]: """Determine scaling ratio and image size for sequence length constraint. Calculates the scaling ratio needed so that when image_hw is scaled, the total number of resulting patches does not exceed max_seq_len. Args: image_hw: Original image dimensions (height, width). patch_size: Patch dimensions. If int, patches are square. max_seq_len: Maximum allowed sequence length. divisible_by_patch: Whether resulting dimensions must be divisible by patch_size. max_ratio: Optional cap on scaling ratio to prevent excessive upsampling. eps: Convergence threshold for binary search. Returns: Tuple of (ratio, target_hw) where ratio is the scaling factor and target_hw is the resulting (height, width) after scaling. """ # Handle patch size input, extract patch_h, patch_w if isinstance(patch_size, int): patch_h, patch_w = patch_size, patch_size else: # Assume it's a tuple/list: (patch_h, patch_w) if len(patch_size) != 2: raise ValueError("patch_size tuple must have exactly two elements (patch_h, patch_w).") patch_h, patch_w = patch_size # Safety checks if patch_h <= 0 or patch_w <= 0: raise ValueError("patch_size dimensions must be positive.") def prepare_target_hw(ratio): """Scale image_hw by ratio and optionally round dimensions to multiples of patch_h, patch_w.""" scaled_h = image_hw[0] * ratio scaled_w = image_hw[1] * ratio # If we need the result to be divisible by patch_size if divisible_by_patch: scaled_h = patch_h * math.ceil(scaled_h / patch_h) scaled_w = patch_w * math.ceil(scaled_w / patch_w) # Ensure at least one patch in each dimension scaled_h = int(max(scaled_h, patch_h)) scaled_w = int(max(scaled_w, patch_w)) return scaled_h, scaled_w def is_feasible(ratio): """Check if scaling by 'ratio' keeps patch count within max_seq_len.""" t_h, t_w = prepare_target_hw(ratio) # Each dimension is already a multiple of patch_h, patch_w if divisible_by_patch=True. # Use integer division to count patches. num_patches_h = t_h // patch_h num_patches_w = t_w // patch_w seq_len = num_patches_h * num_patches_w return seq_len <= max_seq_len # Binary search boundaries lb = eps / 10.0 rb = 100.0 # Standard binary search loop while (rb - lb) >= eps: mid = (lb + rb) / 2.0 if is_feasible(mid): lb = mid else: rb = mid # The final ratio from the binary search ratio = lb # If max_ratio is provided, clamp it to prevent upsampling beyond that threshold if max_ratio is not None: ratio = min(ratio, max_ratio) # Final checks if ratio <= eps: raise ValueError("Binary search failed - image might be too large?") if ratio >= 100.0: raise ValueError("Binary search failed - image might be too small?") # Prepare the final target dimensions with the possibly clamped ratio target_hw = prepare_target_hw(ratio) return ratio, target_hw _RANDOM_INTERPOLATION = (str_to_interp_mode('bilinear'), str_to_interp_mode('bicubic')) class ResizeToSequence(torch.nn.Module): """Resize image to fit within a maximum sequence length constraint when patchified. This maintains aspect ratio while ensuring the resulting image, when divided into patches, will not exceed the specified maximum sequence length. """ def __init__( self, patch_size: int, max_seq_len: int = 1024, divisible_by_patch: bool = True, max_ratio: Optional[float] = None, interpolation: Union[str, InterpolationMode, Tuple[InterpolationMode, ...]] = 'bicubic', ) -> None: """Initialize ResizeToSequence transform. Args: patch_size: Size of patches. max_seq_len: Maximum sequence length constraint. divisible_by_patch: Whether dimensions must be divisible by patch_size. max_ratio: Optional cap on scaling ratio. interpolation: Interpolation method or methods. """ super().__init__() self.patch_size = patch_size self.max_seq_len = max_seq_len self.divisible_by_patch = divisible_by_patch self.max_ratio = max_ratio if isinstance(interpolation, str): if interpolation == 'random': self.interpolation = _RANDOM_INTERPOLATION else: self.interpolation = str_to_interp_mode(interpolation) else: self.interpolation = interpolation def forward(self, img: torch.Tensor) -> torch.Tensor: """Resize image to maintain aspect ratio and fit sequence constraint. Args: img: Input image tensor. Returns: Resized image tensor. """ _, h, w = transforms.functional.get_dimensions(img) _, target_hw = get_image_size_for_seq( (h, w), self.patch_size, self.max_seq_len, divisible_by_patch=self.divisible_by_patch, max_ratio=self.max_ratio, ) if isinstance(self.interpolation, (tuple, list)): interpolation = random.choice(self.interpolation) else: interpolation = self.interpolation resized_img = transforms.functional.resize(img, target_hw, interpolation=interpolation, antialias=True) return resized_img class ResizeKeepRatioToSequence(torch.nn.Module): """ Resize and Keep Aspect Ratio, adapted to fit sequence length constraints. """ def __init__( self, patch_size=16, max_sequence_len=1024, divisible_by_patch=True, longest=0., interpolation='bilinear', random_scale_prob=0., random_scale_range=(0.85, 1.05), random_scale_area=False, random_aspect_prob=0., random_aspect_range=(0.9, 1.11), max_ratio=None, ): """ Args: patch_size: Size of patches (int or tuple of (patch_h, patch_w)) max_sequence_len: Maximum allowed sequence length for the resulting image divisible_by_patch: If True, ensure dimensions are divisible by patch_size longest: Float between 0-1 where 0=shortest side, 1=longest side determines scale interpolation: Interpolation method for resizing random_scale_prob: Probability of applying random scaling random_scale_range: Range for random scaling factor (min, max) random_scale_area: If True, scale factors affect area (√ factor) random_aspect_prob: Probability of applying random aspect ratio jittering random_aspect_range: Range for random aspect ratio (min, max) max_ratio: Maximum allowed scaling ratio """ super().__init__() self.patch_size = patch_size self.max_sequence_len = max_sequence_len self.divisible_by_patch = divisible_by_patch self.longest = float(longest) if interpolation == 'random': self.interpolation = _RANDOM_INTERPOLATION else: self.interpolation = str_to_interp_mode(interpolation) self.random_scale_prob = random_scale_prob self.random_scale_range = random_scale_range self.random_scale_area = random_scale_area self.random_aspect_prob = random_aspect_prob self.random_aspect_range = random_aspect_range self.max_ratio = max_ratio @staticmethod def get_params( img, patch_size, max_sequence_len, divisible_by_patch, longest, random_scale_prob=0., random_scale_range=(1.0, 1.33), random_scale_area=False, random_aspect_prob=0., random_aspect_range=(0.9, 1.11), max_ratio=None, ): """Get parameters for resizing.""" # Get image dimensions img_h, img_w = F.get_dimensions(img)[1:] # Step 1: Get the maximum allowed dimensions from sequence length constraint _, target_hw = get_image_size_for_seq( (img_h, img_w), patch_size, max_sequence_len, divisible_by_patch, max_ratio, ) target_h, target_w = target_hw # Calculate ratio based on sequence constraint ratio_h = target_h / img_h ratio_w = target_w / img_w # Apply longest blending ratio = max(ratio_h, ratio_w) * longest + min(ratio_h, ratio_w) * (1. - longest) # Apply random scaling if random_scale_prob > 0 and random.random() < random_scale_prob: ratio_factor = random.uniform(random_scale_range[0], random_scale_range[1]) if random_scale_area: # Make ratio factor equivalent to area change ratio_factor = 1. / math.sqrt(ratio_factor) ratio_factor = (ratio_factor, ratio_factor) else: ratio_factor = (1., 1.) # Apply random aspect if random_aspect_prob > 0 and random.random() < random_aspect_prob: log_aspect = (math.log(random_aspect_range[0]), math.log(random_aspect_range[1])) aspect_factor = math.exp(random.uniform(*log_aspect)) aspect_factor = math.sqrt(aspect_factor) # Apply aspect ratio jittering ratio_factor = (ratio_factor[0] / aspect_factor, ratio_factor[1] * aspect_factor) # Calculate final dimensions size = [round(dim * ratio * f) for dim, f in zip((img_h, img_w), ratio_factor)] # Ensure dimensions satisfy sequence constraint and are divisible by patch size if isinstance(patch_size, int): ph, pw = patch_size, patch_size else: ph, pw = patch_size # Ensure dimensions are at least one patch size[0] = max(size[0], ph) size[1] = max(size[1], pw) # Make divisible by patch size if needed if divisible_by_patch: size[0] = ph * math.ceil(size[0] / ph) size[1] = pw * math.ceil(size[1] / pw) # Verify we haven't exceeded sequence length num_patches_h = size[0] // ph num_patches_w = size[1] // pw seq_len = num_patches_h * num_patches_w if seq_len > max_sequence_len: # Scale back down to fit sequence constraint scale_back = math.sqrt(max_sequence_len / seq_len) size[0] = int(size[0] * scale_back) size[1] = int(size[1] * scale_back) # Ensure divisible by patch size after scaling back if divisible_by_patch: size[0] = ph * math.ceil(size[0] / ph) size[1] = pw * math.ceil(size[1] / pw) return size def forward(self, img): """ Resize the image with aspect ratio preservation and sequence length constraints. """ size = self.get_params( img, self.patch_size, self.max_sequence_len, self.divisible_by_patch, self.longest, self.random_scale_prob, self.random_scale_range, self.random_scale_area, self.random_aspect_prob, self.random_aspect_range, self.max_ratio, ) if isinstance(self.interpolation, (tuple, list)): interpolation = random.choice(self.interpolation) else: interpolation = self.interpolation return F.resize(img, size, interpolation) def __repr__(self): interpolate_str = "random" if isinstance(self.interpolation, (tuple, list)) else str(self.interpolation) return (f"{self.__class__.__name__}(patch_size={self.patch_size}, " f"max_sequence_len={self.max_sequence_len}, " f"longest={self.longest:.3f}, " f"random_scale_prob={self.random_scale_prob:.3f}, " f"random_aspect_prob={self.random_aspect_prob:.3f})") class CenterCropToSequence(torch.nn.Module): """Center crop the image such that the resulting patch sequence length meets constraints.""" def __init__( self, patch_size: int, max_seq_len: int, divisible_by_patch: bool = True, fill: Union[int, Tuple[int, int, int]] = 0, padding_mode: str = 'constant' ): super().__init__() self.patch_size = patch_size self.max_seq_len = max_seq_len self.divisible_by_patch = divisible_by_patch self.fill = fill self.padding_mode = padding_mode def forward(self, img): """Center crop the image to maintain aspect ratio and fit sequence constraint.""" _, h, w = transforms.functional.get_dimensions(img) _, target_hw = get_image_size_for_seq( (h, w), self.patch_size, self.max_seq_len, self.divisible_by_patch ) # Use center crop return center_crop_or_pad(img, target_hw, fill=self.fill, padding_mode=self.padding_mode) class RandomCropToSequence(torch.nn.Module): """Randomly crop and/or pad the image to fit sequence length constraints. This maintains aspect ratio while ensuring the resulting image, when divided into patches, will not exceed the specified maximum sequence length. Similar to CentralCropToSequence but with randomized positioning. """ def __init__( self, patch_size: int, max_sequence_len: int, divisible_by_patch: bool = True, fill: Union[int, Tuple[int, int, int]] = 0, padding_mode: str = 'constant' ): """ Args: patch_size: Size of patches (int or tuple of (patch_h, patch_w)) max_sequence_len: Maximum allowed sequence length for the resulting image divisible_by_patch: If True, resulting image dimensions will be multiples of patch_size fill: Fill value for padding padding_mode: Padding mode ('constant', 'edge', 'reflect', 'symmetric') """ super().__init__() self.patch_size = patch_size self.max_sequence_len = max_sequence_len self.divisible_by_patch = divisible_by_patch self.fill = fill self.padding_mode = padding_mode @staticmethod def get_params(img, target_size): """Get random position for crop/pad.""" _, image_height, image_width = transforms.functional.get_dimensions(img) delta_height = image_height - target_size[0] delta_width = image_width - target_size[1] # Handle both positive (crop) and negative (pad) deltas if delta_height == 0: top = 0 else: top = int(math.copysign(random.randint(0, abs(delta_height)), delta_height)) if delta_width == 0: left = 0 else: left = int(math.copysign(random.randint(0, abs(delta_width)), delta_width)) return top, left def forward(self, img): """Randomly crop or pad the image to maintain aspect ratio and fit sequence constraint.""" # Get current dimensions _, img_h, img_w = transforms.functional.get_dimensions(img) # Calculate target dimensions that satisfy sequence length # We use max_ratio=1.0 to prevent upscaling - we only want to crop or maintain current size _, target_hw = get_image_size_for_seq( (img_h, img_w), self.patch_size, self.max_sequence_len, self.divisible_by_patch, max_ratio=1.0 # Prevent upscaling ) # Get random position for crop/pad top, left = self.get_params(img, target_hw) # Apply crop or pad return crop_or_pad( img, top=top, left=left, height=target_hw[0], width=target_hw[1], fill=self.fill, padding_mode=self.padding_mode, ) def __repr__(self) -> str: return (f"{self.__class__.__name__}(patch_size={self.patch_size}, " f"max_sequence_len={self.max_sequence_len}, " f"divisible_by_patch={self.divisible_by_patch})") def _validate_range(value, name, length=2): # Validate type and length if not isinstance(value, Sequence) or len(value) != length: raise ValueError(f"{name} should be a sequence of length {length}.") # Validate order if value[0] > value[1]: warnings.warn(f"{name.capitalize()} range reversed. Swapping.") return value[1], value[0] return value class RandomResizedCropToSequence(torch.nn.Module): """ Randomly crop the input image to a subregion with varying area and aspect ratio (relative to the original), then resize that crop to a target size. The target size is determined such that patchifying the resized image (with `patch_size`) does not exceed `max_seq_len` patches, while maintaining the aspect ratio of the crop. This combines aspects of torchvision's RandomResizedCrop with sequence length constraints. Args: patch_size (int or tuple[int, int]): Patch dimensions (patch_h, patch_w) for sequence length calculation. max_seq_len (int): Maximum number of patches allowed in the final image. scale (tuple[float, float]): Range (min, max) of area fraction of the original image to crop. ratio (tuple[float, float]): Range (min, max) of aspect ratio *multipliers* for the crop, relative to the original image's aspect ratio. E.g., (0.75, 1.333) means the crop's aspect ratio will be sampled between 0.75*orig_ar and 1.333*orig_ar. Uses log-uniform sampling. interpolation (str or InterpolationMode): Interpolation mode for resizing. Can be 'bilinear', 'bicubic', 'nearest', or 'random' (chooses between bilinear and bicubic). Defaults to 'bicubic'. divisible_by_patch (bool): If True, the final image height and width will be multiples of the respective patch dimensions. Defaults to True. max_ratio (float, optional): An optional upper limit on the scaling ratio applied during resizing. Prevents excessive upsampling of the initial crop. `max_ratio=1.0` prevents any upsampling beyond the cropped size. Defaults to None (no limit). final_scale_range (tuple[float, float], optional): If provided, applies an *additional* random scaling factor to the final target size. The factor is sampled uniformly from this range, and multiplied by the size determined by `get_image_size_for_seq`. E.g., (0.8, 1.0) means the final size will be between 80% and 100% of the maximum feasible size. Defaults to None (use maximum feasible size). attempts (int): Number of attempts to sample a valid crop geometry before falling back to a center crop strategy. Defaults to 10. """ def __init__( self, patch_size: Union[int, Tuple[int, int]] = 16, max_seq_len: int = 1024, scale: Tuple[float, float] = (0.08, 1.0), ratio: Tuple[float, float] = (.8, 1.25), interpolation: Union[str, InterpolationMode] = 'bicubic', divisible_by_patch: bool = True, max_ratio: Optional[float] = None, final_scale_range: Optional[Tuple[float, float]] = None, attempts: int = 10, ): super().__init__() if isinstance(patch_size, int): self.patch_h, self.patch_w = patch_size, patch_size else: # Assume it's a tuple/list: (patch_h, patch_w) if len(patch_size) != 2: raise ValueError("patch_size tuple must have exactly two elements (patch_h, patch_w).") self.patch_h, self.patch_w = patch_size self.max_seq_len = max_seq_len self.scale = scale self.ratio = ratio self.divisible_by_patch = divisible_by_patch self.max_ratio = max_ratio self.final_scale_range = final_scale_range self.attempts = attempts if isinstance(interpolation, str): if interpolation == 'random': self.interpolation = _RANDOM_INTERPOLATION else: self.interpolation = str_to_interp_mode(interpolation) else: self.interpolation = interpolation # Validate scale and ratio self.scale = _validate_range(self.scale, "scale") self.ratio = _validate_range(self.ratio, "ratio") # Validate final_scale_range if provided if self.final_scale_range is not None: self.final_scale_range = _validate_range(self.final_scale_range, "final_scale_range") # Additional validation for final_scale_range values if not (0.0 <= self.final_scale_range[0] <= self.final_scale_range[1] <= 1.0): warnings.warn("final_scale_range values should ideally be between 0.0 and 1.0.") @staticmethod def get_params( img: torch.Tensor, scale: Tuple[float, float], ratio: Tuple[float, float], crop_attempts: int = 10, patch_h: int = 16, patch_w: int = 16, max_seq_len: int = 1024, divisible_by_patch: bool = True, max_ratio: Optional[float] = None, final_scale_range: Optional[Tuple[float, float]] = None, interpolation: Union[List[InterpolationMode], InterpolationMode] = _RANDOM_INTERPOLATION, ) -> Tuple[Tuple[int, int, int, int], Tuple[int, int], InterpolationMode]: """ Get parameters for a random sized crop relative to image aspect ratio. """ _, height, width = F.get_dimensions(img) if height <= 0 or width <= 0: raise ValueError(f"Input image must have positive dimensions, got H={height}, W={width}") area = height * width orig_aspect = width / height log_ratio = (math.log(ratio[0]), math.log(ratio[1])) for _ in range(crop_attempts): target_area = area * random.uniform(scale[0], scale[1]) aspect_ratio_factor = math.exp(random.uniform(log_ratio[0], log_ratio[1])) aspect_ratio = orig_aspect * aspect_ratio_factor # Calculate target dimensions for the crop # target_area = crop_w * crop_h, aspect_ratio = crop_w / crop_h # => crop_h = sqrt(target_area / aspect_ratio) # => crop_w = sqrt(target_area * aspect_ratio) crop_h = int(round(math.sqrt(target_area / aspect_ratio))) crop_w = int(round(math.sqrt(target_area * aspect_ratio))) if 0 < crop_w <= width and 0 < crop_h <= height: top = random.randint(0, height - crop_h) left = random.randint(0, width - crop_w) break else: # Fallback strategy, use center crop trying to respect ratio range min_aspect_ratio = orig_aspect * ratio[0] max_aspect_ratio = orig_aspect * ratio[1] if orig_aspect < min_aspect_ratio: # Original is narrower than target min, clamp width crop_w = width crop_h = min(int(round(crop_w / min_aspect_ratio)), height) elif orig_aspect > max_aspect_ratio: # Original is wider than target max, clamp height crop_h = height crop_w = min(int(round(crop_h * max_aspect_ratio)), width) else: # Aspect ratio is within range, take the largest possible crop (full image) crop_w = width crop_h = height # Ensure valid dimensions after fallback calculation crop_h = max(1, crop_h) crop_w = max(1, crop_w) top = (height - crop_h) // 2 left = (width - crop_w) // 2 # Determine max feasible size for scaling of the *cropped* region feasible_ratio, feasible_size = get_image_size_for_seq( (crop_h, crop_w), patch_size=(patch_h, patch_w), # Pass as tuple max_seq_len=max_seq_len, divisible_by_patch=divisible_by_patch, max_ratio=max_ratio, ) # Optionally apply final scale randomization final_size = feasible_size if final_scale_range is not None: min_sc, max_sc = final_scale_range scale_factor = random.uniform(min_sc, max_sc) scale_factor = min(max(scale_factor, 0.0), 1.0) # Clamp factor just in case # Calculate raw scaled size # Note: feasible_ratio already accounts for max_ratio clamp if any raw_h = crop_h * feasible_ratio * scale_factor raw_w = crop_w * feasible_ratio * scale_factor # Re-apply divisibility constraint if needed if divisible_by_patch: # Use ceil to avoid going under minimum patch size target_h = patch_h * math.ceil(raw_h / patch_h) target_w = patch_w * math.ceil(raw_w / patch_w) else: target_h = int(round(raw_h)) target_w = int(round(raw_w)) # Ensure final size is at least one patch dimension target_h = max(target_h, patch_h) target_w = max(target_w, patch_w) final_size = (target_h, target_w) # Final check: Ensure this randomized size still fits max_seq_len # (It should, as we scaled down, but rounding might theoretically push it over) num_patches_h = final_size[0] // patch_h num_patches_w = final_size[1] // patch_w if (num_patches_h * num_patches_w) > max_seq_len: # If it exceeds, revert to the original feasible_size (safest) final_size = feasible_size warnings.warn(f"Final scale randomization ({scale_factor:.2f}) resulted in size {final_size} exceeding max_seq_len={max_seq_len} after rounding. Reverting to feasible size {feasible_size}.") # Select interpolation mode if isinstance(interpolation, (tuple, list)): interpolation = random.choice(interpolation) else: interpolation = interpolation return (top, left, crop_h, crop_w), final_size, interpolation def forward(self, img: torch.Tensor) -> torch.Tensor: # Sample crop, resize, and interpolation parameters crop_params, final_size, interpolation = self.get_params( img, scale=self.scale, ratio=self.ratio, crop_attempts=self.attempts, patch_h=self.patch_h, patch_w=self.patch_w, divisible_by_patch=self.divisible_by_patch, max_seq_len=self.max_seq_len, final_scale_range=self.final_scale_range, interpolation=self.interpolation, ) top, left, crop_h, crop_w = crop_params output = F.resized_crop( img, top=top, left=left, height=crop_h, width=crop_w, size=final_size, interpolation=interpolation, antialias=True, ) return output def __repr__(self) -> str: if isinstance(self.interpolation, (tuple, list)): interpolate_str = ', '.join(str(m).split('.')[-1] for m in self.interpolation) else: interpolate_str = str(self.interpolation) format_string = self.__class__.__name__ + '(' format_string += f"patch_size=({self.patch_h}, {self.patch_w})" format_string += f", max_seq_len={self.max_seq_len}" format_string += f", scale={self.scale}" format_string += f", ratio={self.ratio}" format_string += f", interpolation=[{interpolate_str}]" format_string += f", divisible_by_patch={self.divisible_by_patch}" format_string += f", max_ratio={self.max_ratio}" format_string += f", final_scale_range={self.final_scale_range}" format_string += f", attempts={self.attempts}" format_string += ')' return format_string def patchify_image( img: torch.Tensor, patch_size: Tuple[int, int], pad: bool = True, include_info: bool = True, flatten_patches: bool = True, ) -> Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor, torch.Tensor]]: c, h, w = img.shape ph, pw = patch_size # Ensure the image is divisible by patch size if pad and (h % ph != 0 or w % pw != 0): pad_h = (ph - h % ph) % ph # amount to add on bottom pad_w = (pw - w % pw) % pw # amount to add on right img = torch.nn.functional.pad(img, (0, pad_w, 0, pad_h)) c, h, w = img.shape # Calculate number of patches in each dimension nh, nw = h // ph, w // pw # Reshape image to patches patches = img.view(c, nh, ph, nw, pw).permute(1, 3, 2, 4, 0) # [nh, nw, ph, pw, c] -> [nh * nw, ph * pw * c] or [nh * nw, ph, pw, c] patches = patches.reshape(-1, ph * pw * c) if flatten_patches else patches.reshape(-1, ph, pw, c) if include_info: # Create coordinate indices y_idx, x_idx = torch.meshgrid(torch.arange(nh), torch.arange(nw), indexing='ij') # Stack into a single coords tensor [N, 2] with (y, x) order coord = torch.stack([y_idx.reshape(-1), x_idx.reshape(-1)], dim=1) # Create type indicators (all 1s for regular patches) valid = torch.ones(nh * nw, dtype=torch.bool) return patches, coord, valid return patches class Patchify(torch.nn.Module): """Transform an image into patches with corresponding coordinates and type indicators.""" def __init__( self, patch_size: Union[int, Tuple[int, int]], flatten_patches: bool = True ): super().__init__() self.patch_size = patch_size if isinstance(patch_size, tuple) else (patch_size, patch_size) self.flatten_patches = flatten_patches def forward(self, img): """ Args: img: A PIL Image or tensor of shape [C, H, W] Returns: A dictionary containing: - patches: Tensor of shape [N, P*P*C] if flatten_patches=True, or [N, Ph, Pw, C] if flatten_patches=False - patch_coord: Tensor of shape [N, 2] with (y, x) coordinates - patch_valid: Valid indicator (all 1s for non-padding patches) """ if isinstance(img, Image.Image): # Convert PIL Image to tensor [C, H, W] img = transforms.functional.to_tensor(img) patches, coord, valid = patchify_image(img, self.patch_size, flatten_patches=self.flatten_patches) return { 'patches': patches, 'patch_coord': coord, 'patch_valid': valid, }
pytorch-image-models/timm/data/naflex_transforms.py/0
{ "file_path": "pytorch-image-models/timm/data/naflex_transforms.py", "repo_id": "pytorch-image-models", "token_count": 14531 }
258
""" Tensorflow Preprocessing Adapter Allows use of Tensorflow preprocessing pipeline in PyTorch Transform Copyright of original Tensorflow code below. Hacked together by / Copyright 2020 Ross Wightman """ # Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """ImageNet preprocessing for MnasNet.""" import tensorflow.compat.v1 as tf import numpy as np IMAGE_SIZE = 224 CROP_PADDING = 32 tf.compat.v1.disable_eager_execution() def distorted_bounding_box_crop(image_bytes, bbox, min_object_covered=0.1, aspect_ratio_range=(0.75, 1.33), area_range=(0.05, 1.0), max_attempts=100, scope=None): """Generates cropped_image using one of the bboxes randomly distorted. See `tf.image.sample_distorted_bounding_box` for more documentation. Args: image_bytes: `Tensor` of binary image data. bbox: `Tensor` of bounding boxes arranged `[1, num_boxes, coords]` where each coordinate is [0, 1) and the coordinates are arranged as `[ymin, xmin, ymax, xmax]`. If num_boxes is 0 then use the whole image. min_object_covered: An optional `float`. Defaults to `0.1`. The cropped area of the image must contain at least this fraction of any bounding box supplied. aspect_ratio_range: An optional list of `float`s. The cropped area of the image must have an aspect ratio = width / height within this range. area_range: An optional list of `float`s. The cropped area of the image must contain a fraction of the supplied image within in this range. max_attempts: An optional `int`. Number of attempts at generating a cropped region of the image of the specified constraints. After `max_attempts` failures, return the entire image. scope: Optional `str` for name scope. Returns: cropped image `Tensor` """ with tf.name_scope(scope, 'distorted_bounding_box_crop', [image_bytes, bbox]): shape = tf.image.extract_jpeg_shape(image_bytes) sample_distorted_bounding_box = tf.image.sample_distorted_bounding_box( shape, bounding_boxes=bbox, min_object_covered=min_object_covered, aspect_ratio_range=aspect_ratio_range, area_range=area_range, max_attempts=max_attempts, use_image_if_no_bounding_boxes=True) bbox_begin, bbox_size, _ = sample_distorted_bounding_box # Crop the image to the specified bounding box. offset_y, offset_x, _ = tf.unstack(bbox_begin) target_height, target_width, _ = tf.unstack(bbox_size) crop_window = tf.stack([offset_y, offset_x, target_height, target_width]) image = tf.image.decode_and_crop_jpeg(image_bytes, crop_window, channels=3) return image def _at_least_x_are_equal(a, b, x): """At least `x` of `a` and `b` `Tensors` are equal.""" match = tf.equal(a, b) match = tf.cast(match, tf.int32) return tf.greater_equal(tf.reduce_sum(match), x) def _decode_and_random_crop(image_bytes, image_size, resize_method): """Make a random crop of image_size.""" bbox = tf.constant([0.0, 0.0, 1.0, 1.0], dtype=tf.float32, shape=[1, 1, 4]) image = distorted_bounding_box_crop( image_bytes, bbox, min_object_covered=0.1, aspect_ratio_range=(3. / 4, 4. / 3.), area_range=(0.08, 1.0), max_attempts=10, scope=None) original_shape = tf.image.extract_jpeg_shape(image_bytes) bad = _at_least_x_are_equal(original_shape, tf.shape(image), 3) image = tf.cond( bad, lambda: _decode_and_center_crop(image_bytes, image_size), lambda: tf.image.resize([image], [image_size, image_size], resize_method)[0]) return image def _decode_and_center_crop(image_bytes, image_size, resize_method): """Crops to center of image with padding then scales image_size.""" shape = tf.image.extract_jpeg_shape(image_bytes) image_height = shape[0] image_width = shape[1] padded_center_crop_size = tf.cast( ((image_size / (image_size + CROP_PADDING)) * tf.cast(tf.minimum(image_height, image_width), tf.float32)), tf.int32) offset_height = ((image_height - padded_center_crop_size) + 1) // 2 offset_width = ((image_width - padded_center_crop_size) + 1) // 2 crop_window = tf.stack([offset_height, offset_width, padded_center_crop_size, padded_center_crop_size]) image = tf.image.decode_and_crop_jpeg(image_bytes, crop_window, channels=3) image = tf.image.resize([image], [image_size, image_size], resize_method)[0] return image def _flip(image): """Random horizontal image flip.""" image = tf.image.random_flip_left_right(image) return image def preprocess_for_train(image_bytes, use_bfloat16, image_size=IMAGE_SIZE, interpolation='bicubic'): """Preprocesses the given image for evaluation. Args: image_bytes: `Tensor` representing an image binary of arbitrary size. use_bfloat16: `bool` for whether to use bfloat16. image_size: image size. interpolation: image interpolation method Returns: A preprocessed image `Tensor`. """ resize_method = tf.image.ResizeMethod.BICUBIC if interpolation == 'bicubic' else tf.image.ResizeMethod.BILINEAR image = _decode_and_random_crop(image_bytes, image_size, resize_method) image = _flip(image) image = tf.reshape(image, [image_size, image_size, 3]) image = tf.image.convert_image_dtype( image, dtype=tf.bfloat16 if use_bfloat16 else tf.float32) return image def preprocess_for_eval(image_bytes, use_bfloat16, image_size=IMAGE_SIZE, interpolation='bicubic'): """Preprocesses the given image for evaluation. Args: image_bytes: `Tensor` representing an image binary of arbitrary size. use_bfloat16: `bool` for whether to use bfloat16. image_size: image size. interpolation: image interpolation method Returns: A preprocessed image `Tensor`. """ resize_method = tf.image.ResizeMethod.BICUBIC if interpolation == 'bicubic' else tf.image.ResizeMethod.BILINEAR image = _decode_and_center_crop(image_bytes, image_size, resize_method) image = tf.reshape(image, [image_size, image_size, 3]) image = tf.image.convert_image_dtype( image, dtype=tf.bfloat16 if use_bfloat16 else tf.float32) return image def preprocess_image(image_bytes, is_training=False, use_bfloat16=False, image_size=IMAGE_SIZE, interpolation='bicubic'): """Preprocesses the given image. Args: image_bytes: `Tensor` representing an image binary of arbitrary size. is_training: `bool` for whether the preprocessing is for training. use_bfloat16: `bool` for whether to use bfloat16. image_size: image size. interpolation: image interpolation method Returns: A preprocessed image `Tensor` with value range of [0, 255]. """ if is_training: return preprocess_for_train(image_bytes, use_bfloat16, image_size, interpolation) else: return preprocess_for_eval(image_bytes, use_bfloat16, image_size, interpolation) class TfPreprocessTransform: def __init__(self, is_training=False, size=224, interpolation='bicubic'): self.is_training = is_training self.size = size[0] if isinstance(size, tuple) else size self.interpolation = interpolation self._image_bytes = None self.process_image = self._build_tf_graph() self.sess = None def _build_tf_graph(self): with tf.device('/cpu:0'): self._image_bytes = tf.placeholder( shape=[], dtype=tf.string, ) img = preprocess_image( self._image_bytes, self.is_training, False, self.size, self.interpolation) return img def __call__(self, image_bytes): if self.sess is None: self.sess = tf.Session() img = self.sess.run(self.process_image, feed_dict={self._image_bytes: image_bytes}) img = img.round().clip(0, 255).astype(np.uint8) if img.ndim < 3: img = np.expand_dims(img, axis=-1) img = np.rollaxis(img, 2) # HWC to CHW return img
pytorch-image-models/timm/data/tf_preprocessing.py/0
{ "file_path": "pytorch-image-models/timm/data/tf_preprocessing.py", "repo_id": "pytorch-image-models", "token_count": 3775 }
259
""" PyTorch Conditionally Parameterized Convolution (CondConv) Paper: CondConv: Conditionally Parameterized Convolutions for Efficient Inference (https://arxiv.org/abs/1904.04971) Hacked together by / Copyright 2020 Ross Wightman """ import math from functools import partial import torch from torch import nn as nn from torch.nn import functional as F from ._fx import register_notrace_module from .helpers import to_2tuple from .conv2d_same import conv2d_same from .padding import get_padding_value def get_condconv_initializer(initializer, num_experts, expert_shape): def condconv_initializer(weight): """CondConv initializer function.""" num_params = math.prod(expert_shape) if (len(weight.shape) != 2 or weight.shape[0] != num_experts or weight.shape[1] != num_params): raise (ValueError( 'CondConv variables must have shape [num_experts, num_params]')) for i in range(num_experts): initializer(weight[i].view(expert_shape)) return condconv_initializer @register_notrace_module class CondConv2d(nn.Module): """ Conditionally Parameterized Convolution Inspired by: https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet/condconv/condconv_layers.py Grouped convolution hackery for parallel execution of the per-sample kernel filters inspired by this discussion: https://github.com/pytorch/pytorch/issues/17983 """ __constants__ = ['in_channels', 'out_channels', 'dynamic_padding'] def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, padding='', dilation=1, groups=1, bias=False, num_experts=4): super(CondConv2d, self).__init__() self.in_channels = in_channels self.out_channels = out_channels self.kernel_size = to_2tuple(kernel_size) self.stride = to_2tuple(stride) padding_val, is_padding_dynamic = get_padding_value( padding, kernel_size, stride=stride, dilation=dilation) self.dynamic_padding = is_padding_dynamic # if in forward to work with torchscript self.padding = to_2tuple(padding_val) self.dilation = to_2tuple(dilation) self.groups = groups self.num_experts = num_experts self.weight_shape = (self.out_channels, self.in_channels // self.groups) + self.kernel_size weight_num_param = 1 for wd in self.weight_shape: weight_num_param *= wd self.weight = torch.nn.Parameter(torch.Tensor(self.num_experts, weight_num_param)) if bias: self.bias_shape = (self.out_channels,) self.bias = torch.nn.Parameter(torch.Tensor(self.num_experts, self.out_channels)) else: self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self): init_weight = get_condconv_initializer( partial(nn.init.kaiming_uniform_, a=math.sqrt(5)), self.num_experts, self.weight_shape) init_weight(self.weight) if self.bias is not None: fan_in = math.prod(self.weight_shape[1:]) bound = 1 / math.sqrt(fan_in) init_bias = get_condconv_initializer( partial(nn.init.uniform_, a=-bound, b=bound), self.num_experts, self.bias_shape) init_bias(self.bias) def forward(self, x, routing_weights): B, C, H, W = x.shape weight = torch.matmul(routing_weights, self.weight) new_weight_shape = (B * self.out_channels, self.in_channels // self.groups) + self.kernel_size weight = weight.view(new_weight_shape) bias = None if self.bias is not None: bias = torch.matmul(routing_weights, self.bias) bias = bias.view(B * self.out_channels) # move batch elements with channels so each batch element can be efficiently convolved with separate kernel # reshape instead of view to work with channels_last input x = x.reshape(1, B * C, H, W) if self.dynamic_padding: out = conv2d_same( x, weight, bias, stride=self.stride, padding=self.padding, dilation=self.dilation, groups=self.groups * B) else: out = F.conv2d( x, weight, bias, stride=self.stride, padding=self.padding, dilation=self.dilation, groups=self.groups * B) out = out.permute([1, 0, 2, 3]).view(B, self.out_channels, out.shape[-2], out.shape[-1]) # Literal port (from TF definition) # x = torch.split(x, 1, 0) # weight = torch.split(weight, 1, 0) # if self.bias is not None: # bias = torch.matmul(routing_weights, self.bias) # bias = torch.split(bias, 1, 0) # else: # bias = [None] * B # out = [] # for xi, wi, bi in zip(x, weight, bias): # wi = wi.view(*self.weight_shape) # if bi is not None: # bi = bi.view(*self.bias_shape) # out.append(self.conv_fn( # xi, wi, bi, stride=self.stride, padding=self.padding, # dilation=self.dilation, groups=self.groups)) # out = torch.cat(out, 0) return out
pytorch-image-models/timm/layers/cond_conv2d.py/0
{ "file_path": "pytorch-image-models/timm/layers/cond_conv2d.py", "repo_id": "pytorch-image-models", "token_count": 2327 }
260
""" Global Context Attention Block Paper: `GCNet: Non-local Networks Meet Squeeze-Excitation Networks and Beyond` - https://arxiv.org/abs/1904.11492 Official code consulted as reference: https://github.com/xvjiarui/GCNet Hacked together by / Copyright 2021 Ross Wightman """ from torch import nn as nn import torch.nn.functional as F from .create_act import create_act_layer, get_act_layer from .helpers import make_divisible from .mlp import ConvMlp from .norm import LayerNorm2d class GlobalContext(nn.Module): def __init__(self, channels, use_attn=True, fuse_add=False, fuse_scale=True, init_last_zero=False, rd_ratio=1./8, rd_channels=None, rd_divisor=1, act_layer=nn.ReLU, gate_layer='sigmoid'): super(GlobalContext, self).__init__() act_layer = get_act_layer(act_layer) self.conv_attn = nn.Conv2d(channels, 1, kernel_size=1, bias=True) if use_attn else None if rd_channels is None: rd_channels = make_divisible(channels * rd_ratio, rd_divisor, round_limit=0.) if fuse_add: self.mlp_add = ConvMlp(channels, rd_channels, act_layer=act_layer, norm_layer=LayerNorm2d) else: self.mlp_add = None if fuse_scale: self.mlp_scale = ConvMlp(channels, rd_channels, act_layer=act_layer, norm_layer=LayerNorm2d) else: self.mlp_scale = None self.gate = create_act_layer(gate_layer) self.init_last_zero = init_last_zero self.reset_parameters() def reset_parameters(self): if self.conv_attn is not None: nn.init.kaiming_normal_(self.conv_attn.weight, mode='fan_in', nonlinearity='relu') if self.mlp_add is not None: nn.init.zeros_(self.mlp_add.fc2.weight) def forward(self, x): B, C, H, W = x.shape if self.conv_attn is not None: attn = self.conv_attn(x).reshape(B, 1, H * W) # (B, 1, H * W) attn = F.softmax(attn, dim=-1).unsqueeze(3) # (B, 1, H * W, 1) context = x.reshape(B, C, H * W).unsqueeze(1) @ attn context = context.view(B, C, 1, 1) else: context = x.mean(dim=(2, 3), keepdim=True) if self.mlp_scale is not None: mlp_x = self.mlp_scale(context) x = x * self.gate(mlp_x) if self.mlp_add is not None: mlp_x = self.mlp_add(context) x = x + mlp_x return x
pytorch-image-models/timm/layers/global_context.py/0
{ "file_path": "pytorch-image-models/timm/layers/global_context.py", "repo_id": "pytorch-image-models", "token_count": 1169 }
261
""" Normalization layers and wrappers Norm layer definitions that support fast norm and consistent channel arg order (always first arg). Hacked together by / Copyright 2022 Ross Wightman """ import numbers from typing import Tuple import torch import torch.nn as nn import torch.nn.functional as F from .fast_norm import ( is_fast_norm, fast_group_norm, fast_layer_norm, fast_rms_norm, rms_norm2d, fast_rms_norm2d, fast_simple_norm, simple_norm, ) try: from torch.nn.functional import rms_norm except ImportError: from .fast_norm import rms_norm class GroupNorm(nn.GroupNorm): _fast_norm: torch.jit.Final[bool] def __init__( self, num_channels: int, num_groups: int = 32, eps: float = 1e-5, affine: bool = True, **kwargs, ): # NOTE num_channels is swapped to first arg for consistency in swapping norm layers with BN super().__init__(num_groups, num_channels, eps=eps, affine=affine, **kwargs) self._fast_norm = is_fast_norm() # can't script unless we have these flags here (no globals) def forward(self, x): if self._fast_norm: return fast_group_norm(x, self.num_groups, self.weight, self.bias, self.eps) else: return F.group_norm(x, self.num_groups, self.weight, self.bias, self.eps) class GroupNorm1(nn.GroupNorm): """ Group Normalization with 1 group. Input: tensor in shape [B, C, *] """ _fast_norm: torch.jit.Final[bool] def __init__(self, num_channels: int, **kwargs): super().__init__(1, num_channels, **kwargs) self._fast_norm = is_fast_norm() # can't script unless we have these flags here (no globals) def forward(self, x: torch.Tensor) -> torch.Tensor: if self._fast_norm: return fast_group_norm(x, self.num_groups, self.weight, self.bias, self.eps) else: return F.group_norm(x, self.num_groups, self.weight, self.bias, self.eps) class LayerNorm(nn.LayerNorm): """ LayerNorm w/ fast norm option """ _fast_norm: torch.jit.Final[bool] def __init__( self, num_channels: int, eps: float = 1e-6, affine: bool = True, **kwargs, ): super().__init__(num_channels, eps=eps, elementwise_affine=affine, **kwargs) self._fast_norm = is_fast_norm() # can't script unless we have these flags here (no globals) def forward(self, x: torch.Tensor) -> torch.Tensor: if self._fast_norm: x = fast_layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps) else: x = F.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps) return x class LayerNormFp32(nn.LayerNorm): """ LayerNorm """ def __init__( self, num_channels: int, eps: float = 1e-6, affine: bool = True, **kwargs, ): super().__init__(num_channels, eps=eps, elementwise_affine=affine, **kwargs) def forward(self, x: torch.Tensor) -> torch.Tensor: weight = self.weight.float() if self.weight is not None else None bias = self.bias.float() if self.bias is not None else None x = F.layer_norm(x.float(), self.normalized_shape, weight, bias, self.eps).to(x.dtype) return x class LayerNorm2d(nn.LayerNorm): """ LayerNorm for channels of '2D' spatial NCHW tensors """ _fast_norm: torch.jit.Final[bool] def __init__( self, num_channels: int, eps: float = 1e-6, affine: bool = True, **kwargs, ): super().__init__(num_channels, eps=eps, elementwise_affine=affine, **kwargs) self._fast_norm = is_fast_norm() # can't script unless we have these flags here (no globals) def forward(self, x: torch.Tensor) -> torch.Tensor: x = x.permute(0, 2, 3, 1) if self._fast_norm: x = fast_layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps) else: x = F.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps) x = x.permute(0, 3, 1, 2) return x class LayerNorm2dFp32(nn.LayerNorm): """ LayerNorm for channels of '2D' spatial NCHW tensors """ def __init__( self, num_channels: int, eps: float = 1e-6, affine: bool = True, **kwargs, ): super().__init__(num_channels, eps=eps, elementwise_affine=affine, **kwargs) def forward(self, x: torch.Tensor) -> torch.Tensor: x = x.permute(0, 2, 3, 1) weight = self.weight.float() if self.weight is not None else None bias = self.bias.float() if self.bias is not None else None x = F.layer_norm(x.float(), self.normalized_shape, weight, bias, self.eps).to(x.dtype) x = x.permute(0, 3, 1, 2) return x def _is_contiguous(tensor: torch.Tensor) -> bool: # jit is oh so lovely :/ if torch.jit.is_scripting(): return tensor.is_contiguous() else: return tensor.is_contiguous(memory_format=torch.contiguous_format) def _layer_norm_cf(x: torch.Tensor, weight: torch.Tensor, bias: torch.Tensor, eps: float): s, u = torch.var_mean(x, dim=1, unbiased=False, keepdim=True) x = (x - u) * torch.rsqrt(s + eps) x = x * weight[:, None, None] + bias[:, None, None] return x def _layer_norm_cf_sqm(x: torch.Tensor, weight: torch.Tensor, bias: torch.Tensor, eps: float): u = x.mean(dim=1, keepdim=True) s = ((x * x).mean(dim=1, keepdim=True) - (u * u)).clamp(0) x = (x - u) * torch.rsqrt(s + eps) x = x * weight.view(1, -1, 1, 1) + bias.view(1, -1, 1, 1) return x class LayerNormExp2d(nn.LayerNorm): """ LayerNorm for channels_first tensors with 2d spatial dimensions (ie N, C, H, W). Experimental implementation w/ manual norm for tensors non-contiguous tensors. This improves throughput in some scenarios (tested on Ampere GPU), esp w/ channels_last layout. However, benefits are not always clear and can perform worse on other GPUs. """ def __init__(self, num_channels: int, eps: float = 1e-6): super().__init__(num_channels, eps=eps) def forward(self, x) -> torch.Tensor: if _is_contiguous(x): x = F.layer_norm( x.permute(0, 2, 3, 1), self.normalized_shape, self.weight, self.bias, self.eps).permute(0, 3, 1, 2) else: x = _layer_norm_cf(x, self.weight, self.bias, self.eps) return x class RmsNorm(nn.Module): """ RmsNorm w/ fast (apex) norm if available """ __constants__ = ['normalized_shape', 'eps', 'elementwise_affine', '_fast_norm'] normalized_shape: Tuple[int, ...] eps: float elementwise_affine: bool _fast_norm: bool def __init__( self, channels: int, eps: float = 1e-6, affine: bool = True, device=None, dtype=None, ) -> None: factory_kwargs = {'device': device, 'dtype': dtype} super().__init__() normalized_shape = channels if isinstance(normalized_shape, numbers.Integral): # mypy error: incompatible types in assignment normalized_shape = (normalized_shape,) # type: ignore[assignment] self.normalized_shape = tuple(normalized_shape) # type: ignore[arg-type] self.eps = eps self.elementwise_affine = affine self._fast_norm = is_fast_norm() # can't script unless we have these flags here (no globals) if self.elementwise_affine: self.weight = nn.Parameter(torch.empty(self.normalized_shape, **factory_kwargs)) else: self.register_parameter('weight', None) self.reset_parameters() def reset_parameters(self) -> None: if self.elementwise_affine: nn.init.ones_(self.weight) def forward(self, x: torch.Tensor) -> torch.Tensor: # NOTE fast norm fallback needs our rms norm impl, so both paths through here. # Since there is no built-in PyTorch impl, always uses APEX RmsNorm if installed. if self._fast_norm: x = fast_rms_norm(x, self.normalized_shape, self.weight, self.eps) else: x = rms_norm(x, self.normalized_shape, self.weight, self.eps) return x class RmsNormFp32(nn.Module): """ RmsNorm w/ fast (apex) norm if available """ __constants__ = ['normalized_shape', 'eps', 'elementwise_affine'] normalized_shape: Tuple[int, ...] eps: float elementwise_affine: bool def __init__( self, channels: int, eps: float = 1e-6, affine: bool = True, device=None, dtype=None, ) -> None: factory_kwargs = {'device': device, 'dtype': dtype} super().__init__() normalized_shape = channels if isinstance(normalized_shape, numbers.Integral): # mypy error: incompatible types in assignment normalized_shape = (normalized_shape,) # type: ignore[assignment] self.normalized_shape = tuple(normalized_shape) # type: ignore[arg-type] self.eps = eps self.elementwise_affine = affine if self.elementwise_affine: self.weight = nn.Parameter(torch.empty(self.normalized_shape, **factory_kwargs)) else: self.register_parameter('weight', None) self.reset_parameters() def reset_parameters(self) -> None: if self.elementwise_affine: nn.init.ones_(self.weight) def forward(self, x: torch.Tensor) -> torch.Tensor: weight = self.weight.float() if self.weight is not None else None x = rms_norm(x.float(), self.normalized_shape, weight, self.eps).to(x.dtype) return x class RmsNorm2d(nn.Module): """ RmsNorm2D for NCHW tensors, w/ fast apex or cast norm if available NOTE: It's currently (2025-05-10) faster to use an eager 2d kernel that does reduction on dim=1 than to permute and use internal PyTorch F.rms_norm, this may change if something like https://github.com/pytorch/pytorch/pull/150576 lands. """ __constants__ = ['normalized_shape', 'eps', 'elementwise_affine', '_fast_norm'] normalized_shape: Tuple[int, ...] eps: float elementwise_affine: bool _fast_norm: bool def __init__( self, channels: int, eps: float = 1e-6, affine: bool = True, device=None, dtype=None, ) -> None: factory_kwargs = {'device': device, 'dtype': dtype} super().__init__() normalized_shape = channels if isinstance(normalized_shape, numbers.Integral): # mypy error: incompatible types in assignment normalized_shape = (normalized_shape,) # type: ignore[assignment] self.normalized_shape = tuple(normalized_shape) # type: ignore[arg-type] self.eps = eps self.elementwise_affine = affine self._fast_norm = is_fast_norm() # can't script unless we have these flags here (no globals) if self.elementwise_affine: self.weight = nn.Parameter(torch.empty(self.normalized_shape, **factory_kwargs)) else: self.register_parameter('weight', None) self.reset_parameters() def reset_parameters(self) -> None: if self.elementwise_affine: nn.init.ones_(self.weight) def forward(self, x: torch.Tensor) -> torch.Tensor: # NOTE fast norm fallback needs our rms norm impl, so both paths through here. # Since there is no built-in PyTorch impl, always use APEX RmsNorm if is installed. if self._fast_norm: x = fast_rms_norm2d(x, self.normalized_shape, self.weight, self.eps) else: x = rms_norm2d(x, self.normalized_shape, self.weight, self.eps) return x class RmsNorm2dFp32(nn.Module): """ RmsNorm2D for NCHW tensors, w/ fast apex or cast norm if available NOTE: It's currently (2025-05-10) faster to use an eager 2d kernel that does reduction on dim=1 than to permute and use internal PyTorch F.rms_norm, this may change if something like https://github.com/pytorch/pytorch/pull/150576 lands. """ __constants__ = ['normalized_shape', 'eps', 'elementwise_affine'] normalized_shape: Tuple[int, ...] eps: float elementwise_affine: bool def __init__( self, channels: int, eps: float = 1e-6, affine: bool = True, device=None, dtype=None, ) -> None: factory_kwargs = {'device': device, 'dtype': dtype} super().__init__() normalized_shape = channels if isinstance(normalized_shape, numbers.Integral): # mypy error: incompatible types in assignment normalized_shape = (normalized_shape,) # type: ignore[assignment] self.normalized_shape = tuple(normalized_shape) # type: ignore[arg-type] self.eps = eps self.elementwise_affine = affine if self.elementwise_affine: self.weight = nn.Parameter(torch.empty(self.normalized_shape, **factory_kwargs)) else: self.register_parameter('weight', None) self.reset_parameters() def reset_parameters(self) -> None: if self.elementwise_affine: nn.init.ones_(self.weight) def forward(self, x: torch.Tensor) -> torch.Tensor: weight = self.weight.float() if self.weight is not None else None x = rms_norm2d(x.float(), self.normalized_shape, weight, self.eps).to(x.dtype) return x class SimpleNorm(nn.Module): """ SimpleNorm (x / std(x)) """ __constants__ = ['normalized_shape', 'eps', 'elementwise_affine', '_fast_norm'] normalized_shape: Tuple[int, ...] eps: float elementwise_affine: bool _fast_norm: bool def __init__( self, channels: int, eps: float = 1e-6, affine: bool = True, device=None, dtype=None, ) -> None: factory_kwargs = {'device': device, 'dtype': dtype} super().__init__() normalized_shape = channels if isinstance(normalized_shape, numbers.Integral): # mypy error: incompatible types in assignment normalized_shape = (normalized_shape,) # type: ignore[assignment] self.normalized_shape = tuple(normalized_shape) # type: ignore[arg-type] self.eps = eps self.elementwise_affine = affine self._fast_norm = is_fast_norm() # can't script unless we have these flags here (no globals) if self.elementwise_affine: self.weight = nn.Parameter(torch.empty(self.normalized_shape, **factory_kwargs)) else: self.register_parameter('weight', None) self.reset_parameters() def reset_parameters(self) -> None: if self.elementwise_affine: nn.init.ones_(self.weight) def forward(self, x: torch.Tensor) -> torch.Tensor: if self._fast_norm: x = fast_simple_norm(x, self.normalized_shape, self.weight, self.eps) else: x = simple_norm(x, self.normalized_shape, self.weight, self.eps) return x class SimpleNormFp32(nn.Module): """ SimpleNorm (x / std(x)) """ __constants__ = ['normalized_shape', 'eps', 'elementwise_affine'] normalized_shape: Tuple[int, ...] eps: float elementwise_affine: bool def __init__( self, channels: int, eps: float = 1e-6, affine: bool = True, device=None, dtype=None, ) -> None: factory_kwargs = {'device': device, 'dtype': dtype} super().__init__() normalized_shape = channels if isinstance(normalized_shape, numbers.Integral): # mypy error: incompatible types in assignment normalized_shape = (normalized_shape,) # type: ignore[assignment] self.normalized_shape = tuple(normalized_shape) # type: ignore[arg-type] self.eps = eps self.elementwise_affine = affine if self.elementwise_affine: self.weight = nn.Parameter(torch.empty(self.normalized_shape, **factory_kwargs)) else: self.register_parameter('weight', None) self.reset_parameters() def reset_parameters(self) -> None: if self.elementwise_affine: nn.init.ones_(self.weight) def forward(self, x: torch.Tensor) -> torch.Tensor: weight = self.weight.float() if self.weight is not None else None x = simple_norm(x.float(), self.normalized_shape, weight, self.eps).to(x.dtype) return x class SimpleNorm2d(nn.Module): """ SimpleNorm for NCHW tensors """ __constants__ = ['normalized_shape', 'eps', 'elementwise_affine', '_fast_norm'] normalized_shape: Tuple[int, ...] eps: float elementwise_affine: bool _fast_norm: bool def __init__( self, channels: int, eps: float = 1e-6, affine: bool = True, device=None, dtype=None, ) -> None: factory_kwargs = {'device': device, 'dtype': dtype} super().__init__() normalized_shape = channels if isinstance(normalized_shape, numbers.Integral): # mypy error: incompatible types in assignment normalized_shape = (normalized_shape,) # type: ignore[assignment] self.normalized_shape = tuple(normalized_shape) # type: ignore[arg-type] self.eps = eps self.elementwise_affine = affine self._fast_norm = is_fast_norm() # can't script unless we have these flags here (no globals) if self.elementwise_affine: self.weight = nn.Parameter(torch.empty(self.normalized_shape, **factory_kwargs)) else: self.register_parameter('weight', None) self.reset_parameters() def reset_parameters(self) -> None: if self.elementwise_affine: nn.init.ones_(self.weight) def forward(self, x: torch.Tensor) -> torch.Tensor: x = x.permute(0, 2, 3, 1) if self._fast_norm: x = fast_simple_norm(x, self.normalized_shape, self.weight, self.eps) else: x = simple_norm(x, self.normalized_shape, self.weight, self.eps) x = x.permute(0, 3, 1, 2) return x class SimpleNorm2dFp32(nn.Module): """ SimpleNorm for NCHW tensors """ __constants__ = ['normalized_shape', 'eps', 'elementwise_affine'] normalized_shape: Tuple[int, ...] eps: float elementwise_affine: bool def __init__( self, channels: int, eps: float = 1e-6, affine: bool = True, device=None, dtype=None, ) -> None: factory_kwargs = {'device': device, 'dtype': dtype} super().__init__() normalized_shape = channels if isinstance(normalized_shape, numbers.Integral): # mypy error: incompatible types in assignment normalized_shape = (normalized_shape,) # type: ignore[assignment] self.normalized_shape = tuple(normalized_shape) # type: ignore[arg-type] self.eps = eps self.elementwise_affine = affine if self.elementwise_affine: self.weight = nn.Parameter(torch.empty(self.normalized_shape, **factory_kwargs)) else: self.register_parameter('weight', None) self.reset_parameters() def reset_parameters(self) -> None: if self.elementwise_affine: nn.init.ones_(self.weight) def forward(self, x: torch.Tensor) -> torch.Tensor: x = x.permute(0, 2, 3, 1) weight = self.weight.float() if self.weight is not None else None x = simple_norm(x.float(), self.normalized_shape, weight, self.eps).to(x.dtype) x = x.permute(0, 3, 1, 2) return x
pytorch-image-models/timm/layers/norm.py/0
{ "file_path": "pytorch-image-models/timm/layers/norm.py", "repo_id": "pytorch-image-models", "token_count": 8998 }
262
""" Convolution with Weight Standardization (StdConv and ScaledStdConv) StdConv: @article{weightstandardization, author = {Siyuan Qiao and Huiyu Wang and Chenxi Liu and Wei Shen and Alan Yuille}, title = {Weight Standardization}, journal = {arXiv preprint arXiv:1903.10520}, year = {2019}, } Code: https://github.com/joe-siyuan-qiao/WeightStandardization ScaledStdConv: Paper: `Characterizing signal propagation to close the performance gap in unnormalized ResNets` - https://arxiv.org/abs/2101.08692 Official Deepmind JAX code: https://github.com/deepmind/deepmind-research/tree/master/nfnets Hacked together by / copyright Ross Wightman, 2021. """ import torch import torch.nn as nn import torch.nn.functional as F from ._fx import register_notrace_module from .padding import get_padding, get_padding_value, pad_same class StdConv2d(nn.Conv2d): """Conv2d with Weight Standardization. Used for BiT ResNet-V2 models. Paper: `Micro-Batch Training with Batch-Channel Normalization and Weight Standardization` - https://arxiv.org/abs/1903.10520v2 """ def __init__( self, in_channel, out_channels, kernel_size, stride=1, padding=None, dilation=1, groups=1, bias=False, eps=1e-6): if padding is None: padding = get_padding(kernel_size, stride, dilation) super().__init__( in_channel, out_channels, kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias) self.eps = eps def forward(self, x): weight = F.batch_norm( self.weight.reshape(1, self.out_channels, -1), None, None, training=True, momentum=0., eps=self.eps).reshape_as(self.weight) x = F.conv2d(x, weight, self.bias, self.stride, self.padding, self.dilation, self.groups) return x @register_notrace_module class StdConv2dSame(nn.Conv2d): """Conv2d with Weight Standardization. TF compatible SAME padding. Used for ViT Hybrid model. Paper: `Micro-Batch Training with Batch-Channel Normalization and Weight Standardization` - https://arxiv.org/abs/1903.10520v2 """ def __init__( self, in_channel, out_channels, kernel_size, stride=1, padding='SAME', dilation=1, groups=1, bias=False, eps=1e-6): padding, is_dynamic = get_padding_value(padding, kernel_size, stride=stride, dilation=dilation) super().__init__( in_channel, out_channels, kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias) self.same_pad = is_dynamic self.eps = eps def forward(self, x): if self.same_pad: x = pad_same(x, self.kernel_size, self.stride, self.dilation) weight = F.batch_norm( self.weight.reshape(1, self.out_channels, -1), None, None, training=True, momentum=0., eps=self.eps).reshape_as(self.weight) x = F.conv2d(x, weight, self.bias, self.stride, self.padding, self.dilation, self.groups) return x class ScaledStdConv2d(nn.Conv2d): """Conv2d layer with Scaled Weight Standardization. Paper: `Characterizing signal propagation to close the performance gap in unnormalized ResNets` - https://arxiv.org/abs/2101.08692 NOTE: the operations used in this impl differ slightly from the DeepMind Haiku impl. The impact is minor. """ def __init__( self, in_channels, out_channels, kernel_size, stride=1, padding=None, dilation=1, groups=1, bias=True, gamma=1.0, eps=1e-6, gain_init=1.0): if padding is None: padding = get_padding(kernel_size, stride, dilation) super().__init__( in_channels, out_channels, kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias) self.gain = nn.Parameter(torch.full((self.out_channels, 1, 1, 1), gain_init)) self.scale = gamma * self.weight[0].numel() ** -0.5 # gamma * 1 / sqrt(fan-in) self.eps = eps def forward(self, x): weight = F.batch_norm( self.weight.reshape(1, self.out_channels, -1), None, None, weight=(self.gain * self.scale).view(-1), training=True, momentum=0., eps=self.eps).reshape_as(self.weight) return F.conv2d(x, weight, self.bias, self.stride, self.padding, self.dilation, self.groups) @register_notrace_module class ScaledStdConv2dSame(nn.Conv2d): """Conv2d layer with Scaled Weight Standardization and Tensorflow-like SAME padding support Paper: `Characterizing signal propagation to close the performance gap in unnormalized ResNets` - https://arxiv.org/abs/2101.08692 NOTE: the operations used in this impl differ slightly from the DeepMind Haiku impl. The impact is minor. """ def __init__( self, in_channels, out_channels, kernel_size, stride=1, padding='SAME', dilation=1, groups=1, bias=True, gamma=1.0, eps=1e-6, gain_init=1.0): padding, is_dynamic = get_padding_value(padding, kernel_size, stride=stride, dilation=dilation) super().__init__( in_channels, out_channels, kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias) self.gain = nn.Parameter(torch.full((self.out_channels, 1, 1, 1), gain_init)) self.scale = gamma * self.weight[0].numel() ** -0.5 self.same_pad = is_dynamic self.eps = eps def forward(self, x): if self.same_pad: x = pad_same(x, self.kernel_size, self.stride, self.dilation) weight = F.batch_norm( self.weight.reshape(1, self.out_channels, -1), None, None, weight=(self.gain * self.scale).view(-1), training=True, momentum=0., eps=self.eps).reshape_as(self.weight) return F.conv2d(x, weight, self.bias, self.stride, self.padding, self.dilation, self.groups)
pytorch-image-models/timm/layers/std_conv.py/0
{ "file_path": "pytorch-image-models/timm/layers/std_conv.py", "repo_id": "pytorch-image-models", "token_count": 2510 }
263
""" PyTorch FX Based Feature Extraction Helpers Using https://pytorch.org/vision/stable/feature_extraction.html """ from typing import Callable, Dict, List, Optional, Union, Tuple, Type import torch from torch import nn from timm.layers import ( create_feature_extractor, get_graph_node_names, register_notrace_module, register_notrace_function, is_notrace_module, is_notrace_function, get_notrace_functions, get_notrace_modules, Format, ) from ._features import _get_feature_info, _get_return_layers __all__ = [ 'register_notrace_module', 'is_notrace_module', 'get_notrace_modules', 'register_notrace_function', 'is_notrace_function', 'get_notrace_functions', 'create_feature_extractor', 'get_graph_node_names', 'FeatureGraphNet', 'GraphExtractNet', ] class FeatureGraphNet(nn.Module): """ A FX Graph based feature extractor that works with the model feature_info metadata """ return_dict: torch.jit.Final[bool] def __init__( self, model: nn.Module, out_indices: Tuple[int, ...], out_map: Optional[Dict] = None, output_fmt: str = 'NCHW', return_dict: bool = False, ): super().__init__() self.feature_info = _get_feature_info(model, out_indices) if out_map is not None: assert len(out_map) == len(out_indices) self.output_fmt = Format(output_fmt) return_nodes = _get_return_layers(self.feature_info, out_map) self.graph_module = create_feature_extractor(model, return_nodes) self.return_dict = return_dict def forward(self, x): out = self.graph_module(x) if self.return_dict: return out return list(out.values()) class GraphExtractNet(nn.Module): """ A standalone feature extraction wrapper that maps dict -> list or single tensor NOTE: * one can use feature_extractor directly if dictionary output is desired * unlike FeatureGraphNet, this is intended to be used standalone and not with model feature_info metadata for builtin feature extraction mode * create_feature_extractor can be used directly if dictionary output is acceptable Args: model: model to extract features from return_nodes: node names to return features from (dict or list) squeeze_out: if only one output, and output in list format, flatten to single tensor return_dict: return as dictionary from extractor with node names as keys, ignores squeeze_out arg """ return_dict: torch.jit.Final[bool] def __init__( self, model: nn.Module, return_nodes: Union[Dict[str, str], List[str]], squeeze_out: bool = True, return_dict: bool = False, ): super().__init__() self.squeeze_out = squeeze_out self.graph_module = create_feature_extractor(model, return_nodes) self.return_dict = return_dict def forward(self, x) -> Union[List[torch.Tensor], torch.Tensor]: out = self.graph_module(x) if self.return_dict: return out out = list(out.values()) return out[0] if self.squeeze_out and len(out) == 1 else out
pytorch-image-models/timm/models/_features_fx.py/0
{ "file_path": "pytorch-image-models/timm/models/_features_fx.py", "repo_id": "pytorch-image-models", "token_count": 1325 }
264
""" CoaT architecture. Paper: Co-Scale Conv-Attentional Image Transformers - https://arxiv.org/abs/2104.06399 Official CoaT code at: https://github.com/mlpc-ucsd/CoaT Modified from timm/models/vision_transformer.py """ from typing import List, Optional, Tuple, Union import torch import torch.nn as nn import torch.nn.functional as F from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import PatchEmbed, Mlp, DropPath, to_2tuple, trunc_normal_, _assert, LayerNorm from ._builder import build_model_with_cfg from ._registry import register_model, generate_default_cfgs __all__ = ['CoaT'] class ConvRelPosEnc(nn.Module): """ Convolutional relative position encoding. """ def __init__(self, head_chs, num_heads, window): """ Initialization. Ch: Channels per head. h: Number of heads. window: Window size(s) in convolutional relative positional encoding. It can have two forms: 1. An integer of window size, which assigns all attention heads with the same window s size in ConvRelPosEnc. 2. A dict mapping window size to #attention head splits ( e.g. {window size 1: #attention head split 1, window size 2: #attention head split 2}) It will apply different window size to the attention head splits. """ super().__init__() if isinstance(window, int): # Set the same window size for all attention heads. window = {window: num_heads} self.window = window elif isinstance(window, dict): self.window = window else: raise ValueError() self.conv_list = nn.ModuleList() self.head_splits = [] for cur_window, cur_head_split in window.items(): dilation = 1 # Determine padding size. # Ref: https://discuss.pytorch.org/t/how-to-keep-the-shape-of-input-and-output-same-when-dilation-conv/14338 padding_size = (cur_window + (cur_window - 1) * (dilation - 1)) // 2 cur_conv = nn.Conv2d( cur_head_split * head_chs, cur_head_split * head_chs, kernel_size=(cur_window, cur_window), padding=(padding_size, padding_size), dilation=(dilation, dilation), groups=cur_head_split * head_chs, ) self.conv_list.append(cur_conv) self.head_splits.append(cur_head_split) self.channel_splits = [x * head_chs for x in self.head_splits] def forward(self, q, v, size: Tuple[int, int]): B, num_heads, N, C = q.shape H, W = size _assert(N == 1 + H * W, '') # Convolutional relative position encoding. q_img = q[:, :, 1:, :] # [B, h, H*W, Ch] v_img = v[:, :, 1:, :] # [B, h, H*W, Ch] v_img = v_img.transpose(-1, -2).reshape(B, num_heads * C, H, W) v_img_list = torch.split(v_img, self.channel_splits, dim=1) # Split according to channels conv_v_img_list = [] for i, conv in enumerate(self.conv_list): conv_v_img_list.append(conv(v_img_list[i])) conv_v_img = torch.cat(conv_v_img_list, dim=1) conv_v_img = conv_v_img.reshape(B, num_heads, C, H * W).transpose(-1, -2) EV_hat = q_img * conv_v_img EV_hat = F.pad(EV_hat, (0, 0, 1, 0, 0, 0)) # [B, h, N, Ch]. return EV_hat class FactorAttnConvRelPosEnc(nn.Module): """ Factorized attention with convolutional relative position encoding class. """ def __init__( self, dim, num_heads=8, qkv_bias=False, attn_drop=0., proj_drop=0., shared_crpe=None, ): super().__init__() self.num_heads = num_heads head_dim = dim // num_heads self.scale = head_dim ** -0.5 self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) self.attn_drop = nn.Dropout(attn_drop) # Note: attn_drop is actually not used. self.proj = nn.Linear(dim, dim) self.proj_drop = nn.Dropout(proj_drop) # Shared convolutional relative position encoding. self.crpe = shared_crpe def forward(self, x, size: Tuple[int, int]): B, N, C = x.shape # Generate Q, K, V. qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) q, k, v = qkv.unbind(0) # [B, h, N, Ch] # Factorized attention. k_softmax = k.softmax(dim=2) factor_att = k_softmax.transpose(-1, -2) @ v factor_att = q @ factor_att # Convolutional relative position encoding. crpe = self.crpe(q, v, size=size) # [B, h, N, Ch] # Merge and reshape. x = self.scale * factor_att + crpe x = x.transpose(1, 2).reshape(B, N, C) # [B, h, N, Ch] -> [B, N, h, Ch] -> [B, N, C] # Output projection. x = self.proj(x) x = self.proj_drop(x) return x class ConvPosEnc(nn.Module): """ Convolutional Position Encoding. Note: This module is similar to the conditional position encoding in CPVT. """ def __init__(self, dim, k=3): super(ConvPosEnc, self).__init__() self.proj = nn.Conv2d(dim, dim, k, 1, k//2, groups=dim) def forward(self, x, size: Tuple[int, int]): B, N, C = x.shape H, W = size _assert(N == 1 + H * W, '') # Extract CLS token and image tokens. cls_token, img_tokens = x[:, :1], x[:, 1:] # [B, 1, C], [B, H*W, C] # Depthwise convolution. feat = img_tokens.transpose(1, 2).view(B, C, H, W) x = self.proj(feat) + feat x = x.flatten(2).transpose(1, 2) # Combine with CLS token. x = torch.cat((cls_token, x), dim=1) return x class SerialBlock(nn.Module): """ Serial block class. Note: In this implementation, each serial block only contains a conv-attention and a FFN (MLP) module. """ def __init__( self, dim, num_heads, mlp_ratio=4., qkv_bias=False, proj_drop=0., attn_drop=0., drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, shared_cpe=None, shared_crpe=None, ): super().__init__() # Conv-Attention. self.cpe = shared_cpe self.norm1 = norm_layer(dim) self.factoratt_crpe = FactorAttnConvRelPosEnc( dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=proj_drop, shared_crpe=shared_crpe, ) self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() # MLP. self.norm2 = norm_layer(dim) mlp_hidden_dim = int(dim * mlp_ratio) self.mlp = Mlp( in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=proj_drop, ) def forward(self, x, size: Tuple[int, int]): # Conv-Attention. x = self.cpe(x, size) cur = self.norm1(x) cur = self.factoratt_crpe(cur, size) x = x + self.drop_path(cur) # MLP. cur = self.norm2(x) cur = self.mlp(cur) x = x + self.drop_path(cur) return x class ParallelBlock(nn.Module): """ Parallel block class. """ def __init__( self, dims, num_heads, mlp_ratios=[], qkv_bias=False, proj_drop=0., attn_drop=0., drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, shared_crpes=None, ): super().__init__() # Conv-Attention. self.norm12 = norm_layer(dims[1]) self.norm13 = norm_layer(dims[2]) self.norm14 = norm_layer(dims[3]) self.factoratt_crpe2 = FactorAttnConvRelPosEnc( dims[1], num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=proj_drop, shared_crpe=shared_crpes[1], ) self.factoratt_crpe3 = FactorAttnConvRelPosEnc( dims[2], num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=proj_drop, shared_crpe=shared_crpes[2], ) self.factoratt_crpe4 = FactorAttnConvRelPosEnc( dims[3], num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=proj_drop, shared_crpe=shared_crpes[3], ) self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() # MLP. self.norm22 = norm_layer(dims[1]) self.norm23 = norm_layer(dims[2]) self.norm24 = norm_layer(dims[3]) # In parallel block, we assume dimensions are the same and share the linear transformation. assert dims[1] == dims[2] == dims[3] assert mlp_ratios[1] == mlp_ratios[2] == mlp_ratios[3] mlp_hidden_dim = int(dims[1] * mlp_ratios[1]) self.mlp2 = self.mlp3 = self.mlp4 = Mlp( in_features=dims[1], hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=proj_drop, ) def upsample(self, x, factor: float, size: Tuple[int, int]): """ Feature map up-sampling. """ return self.interpolate(x, scale_factor=factor, size=size) def downsample(self, x, factor: float, size: Tuple[int, int]): """ Feature map down-sampling. """ return self.interpolate(x, scale_factor=1.0/factor, size=size) def interpolate(self, x, scale_factor: float, size: Tuple[int, int]): """ Feature map interpolation. """ B, N, C = x.shape H, W = size _assert(N == 1 + H * W, '') cls_token = x[:, :1, :] img_tokens = x[:, 1:, :] img_tokens = img_tokens.transpose(1, 2).reshape(B, C, H, W) img_tokens = F.interpolate( img_tokens, scale_factor=scale_factor, recompute_scale_factor=False, mode='bilinear', align_corners=False, ) img_tokens = img_tokens.reshape(B, C, -1).transpose(1, 2) out = torch.cat((cls_token, img_tokens), dim=1) return out def forward(self, x1, x2, x3, x4, sizes: List[Tuple[int, int]]): _, S2, S3, S4 = sizes cur2 = self.norm12(x2) cur3 = self.norm13(x3) cur4 = self.norm14(x4) cur2 = self.factoratt_crpe2(cur2, size=S2) cur3 = self.factoratt_crpe3(cur3, size=S3) cur4 = self.factoratt_crpe4(cur4, size=S4) upsample3_2 = self.upsample(cur3, factor=2., size=S3) upsample4_3 = self.upsample(cur4, factor=2., size=S4) upsample4_2 = self.upsample(cur4, factor=4., size=S4) downsample2_3 = self.downsample(cur2, factor=2., size=S2) downsample3_4 = self.downsample(cur3, factor=2., size=S3) downsample2_4 = self.downsample(cur2, factor=4., size=S2) cur2 = cur2 + upsample3_2 + upsample4_2 cur3 = cur3 + upsample4_3 + downsample2_3 cur4 = cur4 + downsample3_4 + downsample2_4 x2 = x2 + self.drop_path(cur2) x3 = x3 + self.drop_path(cur3) x4 = x4 + self.drop_path(cur4) # MLP. cur2 = self.norm22(x2) cur3 = self.norm23(x3) cur4 = self.norm24(x4) cur2 = self.mlp2(cur2) cur3 = self.mlp3(cur3) cur4 = self.mlp4(cur4) x2 = x2 + self.drop_path(cur2) x3 = x3 + self.drop_path(cur3) x4 = x4 + self.drop_path(cur4) return x1, x2, x3, x4 class CoaT(nn.Module): """ CoaT class. """ def __init__( self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dims=(64, 128, 320, 512), serial_depths=(3, 4, 6, 3), parallel_depth=0, num_heads=8, mlp_ratios=(4, 4, 4, 4), qkv_bias=True, drop_rate=0., proj_drop_rate=0., attn_drop_rate=0., drop_path_rate=0., norm_layer=LayerNorm, return_interm_layers=False, out_features=None, crpe_window=None, global_pool='token', ): super().__init__() assert global_pool in ('token', 'avg') crpe_window = crpe_window or {3: 2, 5: 3, 7: 3} self.return_interm_layers = return_interm_layers self.out_features = out_features self.embed_dims = embed_dims self.num_features = self.head_hidden_size = embed_dims[-1] self.num_classes = num_classes self.global_pool = global_pool # Patch embeddings. img_size = to_2tuple(img_size) self.patch_embed1 = PatchEmbed( img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dims[0], norm_layer=nn.LayerNorm) self.patch_embed2 = PatchEmbed( img_size=[x // 4 for x in img_size], patch_size=2, in_chans=embed_dims[0], embed_dim=embed_dims[1], norm_layer=nn.LayerNorm) self.patch_embed3 = PatchEmbed( img_size=[x // 8 for x in img_size], patch_size=2, in_chans=embed_dims[1], embed_dim=embed_dims[2], norm_layer=nn.LayerNorm) self.patch_embed4 = PatchEmbed( img_size=[x // 16 for x in img_size], patch_size=2, in_chans=embed_dims[2], embed_dim=embed_dims[3], norm_layer=nn.LayerNorm) # Class tokens. self.cls_token1 = nn.Parameter(torch.zeros(1, 1, embed_dims[0])) self.cls_token2 = nn.Parameter(torch.zeros(1, 1, embed_dims[1])) self.cls_token3 = nn.Parameter(torch.zeros(1, 1, embed_dims[2])) self.cls_token4 = nn.Parameter(torch.zeros(1, 1, embed_dims[3])) # Convolutional position encodings. self.cpe1 = ConvPosEnc(dim=embed_dims[0], k=3) self.cpe2 = ConvPosEnc(dim=embed_dims[1], k=3) self.cpe3 = ConvPosEnc(dim=embed_dims[2], k=3) self.cpe4 = ConvPosEnc(dim=embed_dims[3], k=3) # Convolutional relative position encodings. self.crpe1 = ConvRelPosEnc(head_chs=embed_dims[0] // num_heads, num_heads=num_heads, window=crpe_window) self.crpe2 = ConvRelPosEnc(head_chs=embed_dims[1] // num_heads, num_heads=num_heads, window=crpe_window) self.crpe3 = ConvRelPosEnc(head_chs=embed_dims[2] // num_heads, num_heads=num_heads, window=crpe_window) self.crpe4 = ConvRelPosEnc(head_chs=embed_dims[3] // num_heads, num_heads=num_heads, window=crpe_window) # Disable stochastic depth. dpr = drop_path_rate assert dpr == 0.0 skwargs = dict( num_heads=num_heads, qkv_bias=qkv_bias, proj_drop=proj_drop_rate, attn_drop=attn_drop_rate, drop_path=dpr, norm_layer=norm_layer, ) # Serial blocks 1. self.serial_blocks1 = nn.ModuleList([ SerialBlock( dim=embed_dims[0], mlp_ratio=mlp_ratios[0], shared_cpe=self.cpe1, shared_crpe=self.crpe1, **skwargs, ) for _ in range(serial_depths[0])] ) # Serial blocks 2. self.serial_blocks2 = nn.ModuleList([ SerialBlock( dim=embed_dims[1], mlp_ratio=mlp_ratios[1], shared_cpe=self.cpe2, shared_crpe=self.crpe2, **skwargs, ) for _ in range(serial_depths[1])] ) # Serial blocks 3. self.serial_blocks3 = nn.ModuleList([ SerialBlock( dim=embed_dims[2], mlp_ratio=mlp_ratios[2], shared_cpe=self.cpe3, shared_crpe=self.crpe3, **skwargs, ) for _ in range(serial_depths[2])] ) # Serial blocks 4. self.serial_blocks4 = nn.ModuleList([ SerialBlock( dim=embed_dims[3], mlp_ratio=mlp_ratios[3], shared_cpe=self.cpe4, shared_crpe=self.crpe4, **skwargs, ) for _ in range(serial_depths[3])] ) # Parallel blocks. self.parallel_depth = parallel_depth if self.parallel_depth > 0: self.parallel_blocks = nn.ModuleList([ ParallelBlock( dims=embed_dims, mlp_ratios=mlp_ratios, shared_crpes=(self.crpe1, self.crpe2, self.crpe3, self.crpe4), **skwargs, ) for _ in range(parallel_depth)] ) else: self.parallel_blocks = None # Classification head(s). if not self.return_interm_layers: if self.parallel_blocks is not None: self.norm2 = norm_layer(embed_dims[1]) self.norm3 = norm_layer(embed_dims[2]) else: self.norm2 = self.norm3 = None self.norm4 = norm_layer(embed_dims[3]) if self.parallel_depth > 0: # CoaT series: Aggregate features of last three scales for classification. assert embed_dims[1] == embed_dims[2] == embed_dims[3] self.aggregate = torch.nn.Conv1d(in_channels=3, out_channels=1, kernel_size=1) self.head_drop = nn.Dropout(drop_rate) self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() else: # CoaT-Lite series: Use feature of last scale for classification. self.aggregate = None self.head_drop = nn.Dropout(drop_rate) self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() # Initialize weights. trunc_normal_(self.cls_token1, std=.02) trunc_normal_(self.cls_token2, std=.02) trunc_normal_(self.cls_token3, std=.02) trunc_normal_(self.cls_token4, std=.02) self.apply(self._init_weights) def _init_weights(self, m): if isinstance(m, nn.Linear): trunc_normal_(m.weight, std=.02) if isinstance(m, nn.Linear) and m.bias is not None: nn.init.constant_(m.bias, 0) elif isinstance(m, nn.LayerNorm): nn.init.constant_(m.bias, 0) nn.init.constant_(m.weight, 1.0) @torch.jit.ignore def no_weight_decay(self): return {'cls_token1', 'cls_token2', 'cls_token3', 'cls_token4'} @torch.jit.ignore def set_grad_checkpointing(self, enable=True): assert not enable, 'gradient checkpointing not supported' @torch.jit.ignore def group_matcher(self, coarse=False): matcher = dict( stem1=r'^cls_token1|patch_embed1|crpe1|cpe1', serial_blocks1=r'^serial_blocks1\.(\d+)', stem2=r'^cls_token2|patch_embed2|crpe2|cpe2', serial_blocks2=r'^serial_blocks2\.(\d+)', stem3=r'^cls_token3|patch_embed3|crpe3|cpe3', serial_blocks3=r'^serial_blocks3\.(\d+)', stem4=r'^cls_token4|patch_embed4|crpe4|cpe4', serial_blocks4=r'^serial_blocks4\.(\d+)', parallel_blocks=[ # FIXME (partially?) overlap parallel w/ serial blocks?? (r'^parallel_blocks\.(\d+)', None), (r'^norm|aggregate', (99999,)), ] ) return matcher @torch.jit.ignore def get_classifier(self) -> nn.Module: return self.head def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None): self.num_classes = num_classes if global_pool is not None: assert global_pool in ('token', 'avg') self.global_pool = global_pool self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() def forward_features(self, x0): B = x0.shape[0] # Serial blocks 1. x1 = self.patch_embed1(x0) H1, W1 = self.patch_embed1.grid_size x1 = insert_cls(x1, self.cls_token1) for blk in self.serial_blocks1: x1 = blk(x1, size=(H1, W1)) x1_nocls = remove_cls(x1).reshape(B, H1, W1, -1).permute(0, 3, 1, 2).contiguous() # Serial blocks 2. x2 = self.patch_embed2(x1_nocls) H2, W2 = self.patch_embed2.grid_size x2 = insert_cls(x2, self.cls_token2) for blk in self.serial_blocks2: x2 = blk(x2, size=(H2, W2)) x2_nocls = remove_cls(x2).reshape(B, H2, W2, -1).permute(0, 3, 1, 2).contiguous() # Serial blocks 3. x3 = self.patch_embed3(x2_nocls) H3, W3 = self.patch_embed3.grid_size x3 = insert_cls(x3, self.cls_token3) for blk in self.serial_blocks3: x3 = blk(x3, size=(H3, W3)) x3_nocls = remove_cls(x3).reshape(B, H3, W3, -1).permute(0, 3, 1, 2).contiguous() # Serial blocks 4. x4 = self.patch_embed4(x3_nocls) H4, W4 = self.patch_embed4.grid_size x4 = insert_cls(x4, self.cls_token4) for blk in self.serial_blocks4: x4 = blk(x4, size=(H4, W4)) x4_nocls = remove_cls(x4).reshape(B, H4, W4, -1).permute(0, 3, 1, 2).contiguous() # Only serial blocks: Early return. if self.parallel_blocks is None: if not torch.jit.is_scripting() and self.return_interm_layers: # Return intermediate features for down-stream tasks (e.g. Deformable DETR and Detectron2). feat_out = {} if 'x1_nocls' in self.out_features: feat_out['x1_nocls'] = x1_nocls if 'x2_nocls' in self.out_features: feat_out['x2_nocls'] = x2_nocls if 'x3_nocls' in self.out_features: feat_out['x3_nocls'] = x3_nocls if 'x4_nocls' in self.out_features: feat_out['x4_nocls'] = x4_nocls return feat_out else: # Return features for classification. x4 = self.norm4(x4) return x4 # Parallel blocks. for blk in self.parallel_blocks: x2, x3, x4 = self.cpe2(x2, (H2, W2)), self.cpe3(x3, (H3, W3)), self.cpe4(x4, (H4, W4)) x1, x2, x3, x4 = blk(x1, x2, x3, x4, sizes=[(H1, W1), (H2, W2), (H3, W3), (H4, W4)]) if not torch.jit.is_scripting() and self.return_interm_layers: # Return intermediate features for down-stream tasks (e.g. Deformable DETR and Detectron2). feat_out = {} if 'x1_nocls' in self.out_features: x1_nocls = remove_cls(x1).reshape(B, H1, W1, -1).permute(0, 3, 1, 2).contiguous() feat_out['x1_nocls'] = x1_nocls if 'x2_nocls' in self.out_features: x2_nocls = remove_cls(x2).reshape(B, H2, W2, -1).permute(0, 3, 1, 2).contiguous() feat_out['x2_nocls'] = x2_nocls if 'x3_nocls' in self.out_features: x3_nocls = remove_cls(x3).reshape(B, H3, W3, -1).permute(0, 3, 1, 2).contiguous() feat_out['x3_nocls'] = x3_nocls if 'x4_nocls' in self.out_features: x4_nocls = remove_cls(x4).reshape(B, H4, W4, -1).permute(0, 3, 1, 2).contiguous() feat_out['x4_nocls'] = x4_nocls return feat_out else: x2 = self.norm2(x2) x3 = self.norm3(x3) x4 = self.norm4(x4) return [x2, x3, x4] def forward_head(self, x_feat: Union[torch.Tensor, List[torch.Tensor]], pre_logits: bool = False): if isinstance(x_feat, list): assert self.aggregate is not None if self.global_pool == 'avg': x = torch.cat([xl[:, 1:].mean(dim=1, keepdim=True) for xl in x_feat], dim=1) # [B, 3, C] else: x = torch.stack([xl[:, 0] for xl in x_feat], dim=1) # [B, 3, C] x = self.aggregate(x).squeeze(dim=1) # Shape: [B, C] else: x = x_feat[:, 1:].mean(dim=1) if self.global_pool == 'avg' else x_feat[:, 0] x = self.head_drop(x) return x if pre_logits else self.head(x) def forward(self, x) -> torch.Tensor: if not torch.jit.is_scripting() and self.return_interm_layers: # Return intermediate features (for down-stream tasks). return self.forward_features(x) else: # Return features for classification. x_feat = self.forward_features(x) x = self.forward_head(x_feat) return x def insert_cls(x, cls_token): """ Insert CLS token. """ cls_tokens = cls_token.expand(x.shape[0], -1, -1) x = torch.cat((cls_tokens, x), dim=1) return x def remove_cls(x): """ Remove CLS token. """ return x[:, 1:, :] def checkpoint_filter_fn(state_dict, model): out_dict = {} state_dict = state_dict.get('model', state_dict) for k, v in state_dict.items(): # original model had unused norm layers, removing them requires filtering pretrained checkpoints if k.startswith('norm1') or \ (k.startswith('norm2') and getattr(model, 'norm2', None) is None) or \ (k.startswith('norm3') and getattr(model, 'norm3', None) is None) or \ (k.startswith('norm4') and getattr(model, 'norm4', None) is None) or \ (k.startswith('aggregate') and getattr(model, 'aggregate', None) is None) or \ (k.startswith('head') and getattr(model, 'head', None) is None): continue out_dict[k] = v return out_dict def _create_coat(variant, pretrained=False, default_cfg=None, **kwargs): if kwargs.get('features_only', None): raise RuntimeError('features_only not implemented for Vision Transformer models.') model = build_model_with_cfg( CoaT, variant, pretrained, pretrained_filter_fn=checkpoint_filter_fn, **kwargs, ) return model def _cfg_coat(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, 'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True, 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'patch_embed1.proj', 'classifier': 'head', **kwargs } default_cfgs = generate_default_cfgs({ 'coat_tiny.in1k': _cfg_coat(hf_hub_id='timm/'), 'coat_mini.in1k': _cfg_coat(hf_hub_id='timm/'), 'coat_small.in1k': _cfg_coat(hf_hub_id='timm/'), 'coat_lite_tiny.in1k': _cfg_coat(hf_hub_id='timm/'), 'coat_lite_mini.in1k': _cfg_coat(hf_hub_id='timm/'), 'coat_lite_small.in1k': _cfg_coat(hf_hub_id='timm/'), 'coat_lite_medium.in1k': _cfg_coat(hf_hub_id='timm/'), 'coat_lite_medium_384.in1k': _cfg_coat( hf_hub_id='timm/', input_size=(3, 384, 384), crop_pct=1.0, crop_mode='squash', ), }) @register_model def coat_tiny(pretrained=False, **kwargs) -> CoaT: model_cfg = dict( patch_size=4, embed_dims=[152, 152, 152, 152], serial_depths=[2, 2, 2, 2], parallel_depth=6) model = _create_coat('coat_tiny', pretrained=pretrained, **dict(model_cfg, **kwargs)) return model @register_model def coat_mini(pretrained=False, **kwargs) -> CoaT: model_cfg = dict( patch_size=4, embed_dims=[152, 216, 216, 216], serial_depths=[2, 2, 2, 2], parallel_depth=6) model = _create_coat('coat_mini', pretrained=pretrained, **dict(model_cfg, **kwargs)) return model @register_model def coat_small(pretrained=False, **kwargs) -> CoaT: model_cfg = dict( patch_size=4, embed_dims=[152, 320, 320, 320], serial_depths=[2, 2, 2, 2], parallel_depth=6, **kwargs) model = _create_coat('coat_small', pretrained=pretrained, **dict(model_cfg, **kwargs)) return model @register_model def coat_lite_tiny(pretrained=False, **kwargs) -> CoaT: model_cfg = dict( patch_size=4, embed_dims=[64, 128, 256, 320], serial_depths=[2, 2, 2, 2], mlp_ratios=[8, 8, 4, 4]) model = _create_coat('coat_lite_tiny', pretrained=pretrained, **dict(model_cfg, **kwargs)) return model @register_model def coat_lite_mini(pretrained=False, **kwargs) -> CoaT: model_cfg = dict( patch_size=4, embed_dims=[64, 128, 320, 512], serial_depths=[2, 2, 2, 2], mlp_ratios=[8, 8, 4, 4]) model = _create_coat('coat_lite_mini', pretrained=pretrained, **dict(model_cfg, **kwargs)) return model @register_model def coat_lite_small(pretrained=False, **kwargs) -> CoaT: model_cfg = dict( patch_size=4, embed_dims=[64, 128, 320, 512], serial_depths=[3, 4, 6, 3], mlp_ratios=[8, 8, 4, 4]) model = _create_coat('coat_lite_small', pretrained=pretrained, **dict(model_cfg, **kwargs)) return model @register_model def coat_lite_medium(pretrained=False, **kwargs) -> CoaT: model_cfg = dict( patch_size=4, embed_dims=[128, 256, 320, 512], serial_depths=[3, 6, 10, 8]) model = _create_coat('coat_lite_medium', pretrained=pretrained, **dict(model_cfg, **kwargs)) return model @register_model def coat_lite_medium_384(pretrained=False, **kwargs) -> CoaT: model_cfg = dict( img_size=384, patch_size=4, embed_dims=[128, 256, 320, 512], serial_depths=[3, 6, 10, 8]) model = _create_coat('coat_lite_medium_384', pretrained=pretrained, **dict(model_cfg, **kwargs)) return model
pytorch-image-models/timm/models/coat.py/0
{ "file_path": "pytorch-image-models/timm/models/coat.py", "repo_id": "pytorch-image-models", "token_count": 15596 }
265
""" EfficientViT (by MSRA) Paper: `EfficientViT: Memory Efficient Vision Transformer with Cascaded Group Attention` - https://arxiv.org/abs/2305.07027 Adapted from official impl at https://github.com/microsoft/Cream/tree/main/EfficientViT """ __all__ = ['EfficientVitMsra'] import itertools from collections import OrderedDict from typing import Dict, List, Optional, Tuple, Union import torch import torch.nn as nn from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import SqueezeExcite, SelectAdaptivePool2d, trunc_normal_, _assert from ._builder import build_model_with_cfg from ._features import feature_take_indices from ._manipulate import checkpoint, checkpoint_seq from ._registry import register_model, generate_default_cfgs class ConvNorm(torch.nn.Sequential): def __init__(self, in_chs, out_chs, ks=1, stride=1, pad=0, dilation=1, groups=1, bn_weight_init=1): super().__init__() self.conv = nn.Conv2d(in_chs, out_chs, ks, stride, pad, dilation, groups, bias=False) self.bn = nn.BatchNorm2d(out_chs) torch.nn.init.constant_(self.bn.weight, bn_weight_init) torch.nn.init.constant_(self.bn.bias, 0) @torch.no_grad() def fuse(self): c, bn = self.conv, self.bn w = bn.weight / (bn.running_var + bn.eps)**0.5 w = c.weight * w[:, None, None, None] b = bn.bias - bn.running_mean * bn.weight / \ (bn.running_var + bn.eps)**0.5 m = torch.nn.Conv2d( w.size(1) * self.conv.groups, w.size(0), w.shape[2:], stride=self.conv.stride, padding=self.conv.padding, dilation=self.conv.dilation, groups=self.conv.groups) m.weight.data.copy_(w) m.bias.data.copy_(b) return m class NormLinear(torch.nn.Sequential): def __init__(self, in_features, out_features, bias=True, std=0.02, drop=0.): super().__init__() self.bn = nn.BatchNorm1d(in_features) self.drop = nn.Dropout(drop) self.linear = nn.Linear(in_features, out_features, bias=bias) trunc_normal_(self.linear.weight, std=std) if self.linear.bias is not None: nn.init.constant_(self.linear.bias, 0) @torch.no_grad() def fuse(self): bn, linear = self.bn, self.linear w = bn.weight / (bn.running_var + bn.eps)**0.5 b = bn.bias - self.bn.running_mean * \ self.bn.weight / (bn.running_var + bn.eps)**0.5 w = linear.weight * w[None, :] if linear.bias is None: b = b @ self.linear.weight.T else: b = (linear.weight @ b[:, None]).view(-1) + self.linear.bias m = torch.nn.Linear(w.size(1), w.size(0)) m.weight.data.copy_(w) m.bias.data.copy_(b) return m class PatchMerging(torch.nn.Module): def __init__(self, dim, out_dim): super().__init__() hid_dim = int(dim * 4) self.conv1 = ConvNorm(dim, hid_dim, 1, 1, 0) self.act = torch.nn.ReLU() self.conv2 = ConvNorm(hid_dim, hid_dim, 3, 2, 1, groups=hid_dim) self.se = SqueezeExcite(hid_dim, .25) self.conv3 = ConvNorm(hid_dim, out_dim, 1, 1, 0) def forward(self, x): x = self.conv3(self.se(self.act(self.conv2(self.act(self.conv1(x)))))) return x class ResidualDrop(torch.nn.Module): def __init__(self, m, drop=0.): super().__init__() self.m = m self.drop = drop def forward(self, x): if self.training and self.drop > 0: return x + self.m(x) * torch.rand( x.size(0), 1, 1, 1, device=x.device).ge_(self.drop).div(1 - self.drop).detach() else: return x + self.m(x) class ConvMlp(torch.nn.Module): def __init__(self, ed, h): super().__init__() self.pw1 = ConvNorm(ed, h) self.act = torch.nn.ReLU() self.pw2 = ConvNorm(h, ed, bn_weight_init=0) def forward(self, x): x = self.pw2(self.act(self.pw1(x))) return x class CascadedGroupAttention(torch.nn.Module): attention_bias_cache: Dict[str, torch.Tensor] r""" Cascaded Group Attention. Args: dim (int): Number of input channels. key_dim (int): The dimension for query and key. num_heads (int): Number of attention heads. attn_ratio (int): Multiplier for the query dim for value dimension. resolution (int): Input resolution, correspond to the window size. kernels (List[int]): The kernel size of the dw conv on query. """ def __init__( self, dim, key_dim, num_heads=8, attn_ratio=4, resolution=14, kernels=(5, 5, 5, 5), ): super().__init__() self.num_heads = num_heads self.scale = key_dim ** -0.5 self.key_dim = key_dim self.val_dim = int(attn_ratio * key_dim) self.attn_ratio = attn_ratio qkvs = [] dws = [] for i in range(num_heads): qkvs.append(ConvNorm(dim // (num_heads), self.key_dim * 2 + self.val_dim)) dws.append(ConvNorm(self.key_dim, self.key_dim, kernels[i], 1, kernels[i] // 2, groups=self.key_dim)) self.qkvs = torch.nn.ModuleList(qkvs) self.dws = torch.nn.ModuleList(dws) self.proj = torch.nn.Sequential( torch.nn.ReLU(), ConvNorm(self.val_dim * num_heads, dim, bn_weight_init=0) ) points = list(itertools.product(range(resolution), range(resolution))) N = len(points) attention_offsets = {} idxs = [] for p1 in points: for p2 in points: offset = (abs(p1[0] - p2[0]), abs(p1[1] - p2[1])) if offset not in attention_offsets: attention_offsets[offset] = len(attention_offsets) idxs.append(attention_offsets[offset]) self.attention_biases = torch.nn.Parameter(torch.zeros(num_heads, len(attention_offsets))) self.register_buffer('attention_bias_idxs', torch.LongTensor(idxs).view(N, N), persistent=False) self.attention_bias_cache = {} @torch.no_grad() def train(self, mode=True): super().train(mode) if mode and self.attention_bias_cache: self.attention_bias_cache = {} # clear ab cache def get_attention_biases(self, device: torch.device) -> torch.Tensor: if torch.jit.is_tracing() or self.training: return self.attention_biases[:, self.attention_bias_idxs] else: device_key = str(device) if device_key not in self.attention_bias_cache: self.attention_bias_cache[device_key] = self.attention_biases[:, self.attention_bias_idxs] return self.attention_bias_cache[device_key] def forward(self, x): B, C, H, W = x.shape feats_in = x.chunk(len(self.qkvs), dim=1) feats_out = [] feat = feats_in[0] attn_bias = self.get_attention_biases(x.device) for head_idx, (qkv, dws) in enumerate(zip(self.qkvs, self.dws)): if head_idx > 0: feat = feat + feats_in[head_idx] feat = qkv(feat) q, k, v = feat.view(B, -1, H, W).split([self.key_dim, self.key_dim, self.val_dim], dim=1) q = dws(q) q, k, v = q.flatten(2), k.flatten(2), v.flatten(2) q = q * self.scale attn = q.transpose(-2, -1) @ k attn = attn + attn_bias[head_idx] attn = attn.softmax(dim=-1) feat = v @ attn.transpose(-2, -1) feat = feat.view(B, self.val_dim, H, W) feats_out.append(feat) x = self.proj(torch.cat(feats_out, 1)) return x class LocalWindowAttention(torch.nn.Module): r""" Local Window Attention. Args: dim (int): Number of input channels. key_dim (int): The dimension for query and key. num_heads (int): Number of attention heads. attn_ratio (int): Multiplier for the query dim for value dimension. resolution (int): Input resolution. window_resolution (int): Local window resolution. kernels (List[int]): The kernel size of the dw conv on query. """ def __init__( self, dim, key_dim, num_heads=8, attn_ratio=4, resolution=14, window_resolution=7, kernels=(5, 5, 5, 5), ): super().__init__() self.dim = dim self.num_heads = num_heads self.resolution = resolution assert window_resolution > 0, 'window_size must be greater than 0' self.window_resolution = window_resolution window_resolution = min(window_resolution, resolution) self.attn = CascadedGroupAttention( dim, key_dim, num_heads, attn_ratio=attn_ratio, resolution=window_resolution, kernels=kernels, ) def forward(self, x): H = W = self.resolution B, C, H_, W_ = x.shape # Only check this for classification models _assert(H == H_, f'input feature has wrong size, expect {(H, W)}, got {(H_, W_)}') _assert(W == W_, f'input feature has wrong size, expect {(H, W)}, got {(H_, W_)}') if H <= self.window_resolution and W <= self.window_resolution: x = self.attn(x) else: x = x.permute(0, 2, 3, 1) pad_b = (self.window_resolution - H % self.window_resolution) % self.window_resolution pad_r = (self.window_resolution - W % self.window_resolution) % self.window_resolution x = torch.nn.functional.pad(x, (0, 0, 0, pad_r, 0, pad_b)) pH, pW = H + pad_b, W + pad_r nH = pH // self.window_resolution nW = pW // self.window_resolution # window partition, BHWC -> B(nHh)(nWw)C -> BnHnWhwC -> (BnHnW)hwC -> (BnHnW)Chw x = x.view(B, nH, self.window_resolution, nW, self.window_resolution, C).transpose(2, 3) x = x.reshape(B * nH * nW, self.window_resolution, self.window_resolution, C).permute(0, 3, 1, 2) x = self.attn(x) # window reverse, (BnHnW)Chw -> (BnHnW)hwC -> BnHnWhwC -> B(nHh)(nWw)C -> BHWC x = x.permute(0, 2, 3, 1).view(B, nH, nW, self.window_resolution, self.window_resolution, C) x = x.transpose(2, 3).reshape(B, pH, pW, C) x = x[:, :H, :W].contiguous() x = x.permute(0, 3, 1, 2) return x class EfficientVitBlock(torch.nn.Module): """ A basic EfficientVit building block. Args: dim (int): Number of input channels. key_dim (int): Dimension for query and key in the token mixer. num_heads (int): Number of attention heads. attn_ratio (int): Multiplier for the query dim for value dimension. resolution (int): Input resolution. window_resolution (int): Local window resolution. kernels (List[int]): The kernel size of the dw conv on query. """ def __init__( self, dim, key_dim, num_heads=8, attn_ratio=4, resolution=14, window_resolution=7, kernels=[5, 5, 5, 5], ): super().__init__() self.dw0 = ResidualDrop(ConvNorm(dim, dim, 3, 1, 1, groups=dim, bn_weight_init=0.)) self.ffn0 = ResidualDrop(ConvMlp(dim, int(dim * 2))) self.mixer = ResidualDrop( LocalWindowAttention( dim, key_dim, num_heads, attn_ratio=attn_ratio, resolution=resolution, window_resolution=window_resolution, kernels=kernels, ) ) self.dw1 = ResidualDrop(ConvNorm(dim, dim, 3, 1, 1, groups=dim, bn_weight_init=0.)) self.ffn1 = ResidualDrop(ConvMlp(dim, int(dim * 2))) def forward(self, x): return self.ffn1(self.dw1(self.mixer(self.ffn0(self.dw0(x))))) class EfficientVitStage(torch.nn.Module): def __init__( self, in_dim, out_dim, key_dim, downsample=('', 1), num_heads=8, attn_ratio=4, resolution=14, window_resolution=7, kernels=[5, 5, 5, 5], depth=1, ): super().__init__() if downsample[0] == 'subsample': self.resolution = (resolution - 1) // downsample[1] + 1 down_blocks = [] down_blocks.append(( 'res1', torch.nn.Sequential( ResidualDrop(ConvNorm(in_dim, in_dim, 3, 1, 1, groups=in_dim)), ResidualDrop(ConvMlp(in_dim, int(in_dim * 2))), ) )) down_blocks.append(('patchmerge', PatchMerging(in_dim, out_dim))) down_blocks.append(( 'res2', torch.nn.Sequential( ResidualDrop(ConvNorm(out_dim, out_dim, 3, 1, 1, groups=out_dim)), ResidualDrop(ConvMlp(out_dim, int(out_dim * 2))), ) )) self.downsample = nn.Sequential(OrderedDict(down_blocks)) else: assert in_dim == out_dim self.downsample = nn.Identity() self.resolution = resolution blocks = [] for d in range(depth): blocks.append(EfficientVitBlock(out_dim, key_dim, num_heads, attn_ratio, self.resolution, window_resolution, kernels)) self.blocks = nn.Sequential(*blocks) def forward(self, x): x = self.downsample(x) x = self.blocks(x) return x class PatchEmbedding(torch.nn.Sequential): def __init__(self, in_chans, dim): super().__init__() self.add_module('conv1', ConvNorm(in_chans, dim // 8, 3, 2, 1)) self.add_module('relu1', torch.nn.ReLU()) self.add_module('conv2', ConvNorm(dim // 8, dim // 4, 3, 2, 1)) self.add_module('relu2', torch.nn.ReLU()) self.add_module('conv3', ConvNorm(dim // 4, dim // 2, 3, 2, 1)) self.add_module('relu3', torch.nn.ReLU()) self.add_module('conv4', ConvNorm(dim // 2, dim, 3, 2, 1)) self.patch_size = 16 class EfficientVitMsra(nn.Module): def __init__( self, img_size=224, in_chans=3, num_classes=1000, embed_dim=(64, 128, 192), key_dim=(16, 16, 16), depth=(1, 2, 3), num_heads=(4, 4, 4), window_size=(7, 7, 7), kernels=(5, 5, 5, 5), down_ops=(('', 1), ('subsample', 2), ('subsample', 2)), global_pool='avg', drop_rate=0., ): super(EfficientVitMsra, self).__init__() self.grad_checkpointing = False self.num_classes = num_classes self.drop_rate = drop_rate # Patch embedding self.patch_embed = PatchEmbedding(in_chans, embed_dim[0]) stride = self.patch_embed.patch_size resolution = img_size // self.patch_embed.patch_size attn_ratio = [embed_dim[i] / (key_dim[i] * num_heads[i]) for i in range(len(embed_dim))] # Build EfficientVit blocks self.feature_info = [] stages = [] pre_ed = embed_dim[0] for i, (ed, kd, dpth, nh, ar, wd, do) in enumerate( zip(embed_dim, key_dim, depth, num_heads, attn_ratio, window_size, down_ops)): stage = EfficientVitStage( in_dim=pre_ed, out_dim=ed, key_dim=kd, downsample=do, num_heads=nh, attn_ratio=ar, resolution=resolution, window_resolution=wd, kernels=kernels, depth=dpth, ) pre_ed = ed if do[0] == 'subsample' and i != 0: stride *= do[1] resolution = stage.resolution stages.append(stage) self.feature_info += [dict(num_chs=ed, reduction=stride, module=f'stages.{i}')] self.stages = nn.Sequential(*stages) if global_pool == 'avg': self.global_pool = SelectAdaptivePool2d(pool_type=global_pool, flatten=True) else: assert num_classes == 0 self.global_pool = nn.Identity() self.num_features = self.head_hidden_size = embed_dim[-1] self.head = NormLinear( self.num_features, num_classes, drop=self.drop_rate) if num_classes > 0 else torch.nn.Identity() @torch.jit.ignore def no_weight_decay(self): return {x for x in self.state_dict().keys() if 'attention_biases' in x} @torch.jit.ignore def group_matcher(self, coarse=False): matcher = dict( stem=r'^patch_embed', blocks=r'^stages\.(\d+)' if coarse else [ (r'^stages\.(\d+).downsample', (0,)), (r'^stages\.(\d+)\.\w+\.(\d+)', None), ] ) return matcher @torch.jit.ignore def set_grad_checkpointing(self, enable=True): self.grad_checkpointing = enable @torch.jit.ignore def get_classifier(self) -> nn.Module: return self.head.linear def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None): self.num_classes = num_classes if global_pool is not None: if global_pool == 'avg': self.global_pool = SelectAdaptivePool2d(pool_type=global_pool, flatten=True) else: assert num_classes == 0 self.global_pool = nn.Identity() self.head = NormLinear( self.num_features, num_classes, drop=self.drop_rate) if num_classes > 0 else torch.nn.Identity() def forward_intermediates( self, x: torch.Tensor, indices: Optional[Union[int, List[int]]] = None, norm: bool = False, stop_early: bool = False, output_fmt: str = 'NCHW', intermediates_only: bool = False, ) -> Union[List[torch.Tensor], Tuple[torch.Tensor, List[torch.Tensor]]]: """ Forward features that returns intermediates. Args: x: Input image tensor indices: Take last n blocks if int, all if None, select matching indices if sequence norm: Apply norm layer to compatible intermediates stop_early: Stop iterating over blocks when last desired intermediate hit output_fmt: Shape of intermediate feature outputs intermediates_only: Only return intermediate features Returns: """ assert output_fmt in ('NCHW',), 'Output shape must be NCHW.' intermediates = [] take_indices, max_index = feature_take_indices(len(self.stages), indices) # forward pass x = self.patch_embed(x) if torch.jit.is_scripting() or not stop_early: # can't slice blocks in torchscript stages = self.stages else: stages = self.stages[:max_index + 1] for feat_idx, stage in enumerate(stages): if self.grad_checkpointing and not torch.jit.is_scripting(): x = checkpoint(stage, x) else: x = stage(x) if feat_idx in take_indices: intermediates.append(x) if intermediates_only: return intermediates return x, intermediates def prune_intermediate_layers( self, indices: Union[int, List[int]] = 1, prune_norm: bool = False, prune_head: bool = True, ): """ Prune layers not required for specified intermediates. """ take_indices, max_index = feature_take_indices(len(self.stages), indices) self.stages = self.stages[:max_index + 1] # truncate blocks w/ stem as idx 0 if prune_head: self.reset_classifier(0, '') return take_indices def forward_features(self, x): x = self.patch_embed(x) if self.grad_checkpointing and not torch.jit.is_scripting(): x = checkpoint_seq(self.stages, x) else: x = self.stages(x) return x def forward_head(self, x, pre_logits: bool = False): x = self.global_pool(x) return x if pre_logits else self.head(x) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x # def checkpoint_filter_fn(state_dict, model): # if 'model' in state_dict.keys(): # state_dict = state_dict['model'] # tmp_dict = {} # out_dict = {} # target_keys = model.state_dict().keys() # target_keys = [k for k in target_keys if k.startswith('stages.')] # # for k, v in state_dict.items(): # if 'attention_bias_idxs' in k: # continue # k = k.split('.') # if k[-2] == 'c': # k[-2] = 'conv' # if k[-2] == 'l': # k[-2] = 'linear' # k = '.'.join(k) # tmp_dict[k] = v # # for k, v in tmp_dict.items(): # if k.startswith('patch_embed'): # k = k.split('.') # k[1] = 'conv' + str(int(k[1]) // 2 + 1) # k = '.'.join(k) # elif k.startswith('blocks'): # kw = '.'.join(k.split('.')[2:]) # find_kw = [a for a in list(sorted(tmp_dict.keys())) if kw in a] # idx = find_kw.index(k) # k = [a for a in target_keys if kw in a][idx] # out_dict[k] = v # # return out_dict def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'patch_embed.conv1.conv', 'classifier': 'head.linear', 'fixed_input_size': True, 'pool_size': (4, 4), **kwargs, } default_cfgs = generate_default_cfgs({ 'efficientvit_m0.r224_in1k': _cfg( hf_hub_id='timm/', #url='https://github.com/xinyuliu-jeffrey/EfficientVit_Model_Zoo/releases/download/v1.0/efficientvit_m0.pth' ), 'efficientvit_m1.r224_in1k': _cfg( hf_hub_id='timm/', #url='https://github.com/xinyuliu-jeffrey/EfficientVit_Model_Zoo/releases/download/v1.0/efficientvit_m1.pth' ), 'efficientvit_m2.r224_in1k': _cfg( hf_hub_id='timm/', #url='https://github.com/xinyuliu-jeffrey/EfficientVit_Model_Zoo/releases/download/v1.0/efficientvit_m2.pth' ), 'efficientvit_m3.r224_in1k': _cfg( hf_hub_id='timm/', #url='https://github.com/xinyuliu-jeffrey/EfficientVit_Model_Zoo/releases/download/v1.0/efficientvit_m3.pth' ), 'efficientvit_m4.r224_in1k': _cfg( hf_hub_id='timm/', #url='https://github.com/xinyuliu-jeffrey/EfficientVit_Model_Zoo/releases/download/v1.0/efficientvit_m4.pth' ), 'efficientvit_m5.r224_in1k': _cfg( hf_hub_id='timm/', #url='https://github.com/xinyuliu-jeffrey/EfficientVit_Model_Zoo/releases/download/v1.0/efficientvit_m5.pth' ), }) def _create_efficientvit_msra(variant, pretrained=False, **kwargs): out_indices = kwargs.pop('out_indices', (0, 1, 2)) model = build_model_with_cfg( EfficientVitMsra, variant, pretrained, feature_cfg=dict(flatten_sequential=True, out_indices=out_indices), **kwargs ) return model @register_model def efficientvit_m0(pretrained=False, **kwargs): model_args = dict( img_size=224, embed_dim=[64, 128, 192], depth=[1, 2, 3], num_heads=[4, 4, 4], window_size=[7, 7, 7], kernels=[5, 5, 5, 5] ) return _create_efficientvit_msra('efficientvit_m0', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def efficientvit_m1(pretrained=False, **kwargs): model_args = dict( img_size=224, embed_dim=[128, 144, 192], depth=[1, 2, 3], num_heads=[2, 3, 3], window_size=[7, 7, 7], kernels=[7, 5, 3, 3] ) return _create_efficientvit_msra('efficientvit_m1', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def efficientvit_m2(pretrained=False, **kwargs): model_args = dict( img_size=224, embed_dim=[128, 192, 224], depth=[1, 2, 3], num_heads=[4, 3, 2], window_size=[7, 7, 7], kernels=[7, 5, 3, 3] ) return _create_efficientvit_msra('efficientvit_m2', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def efficientvit_m3(pretrained=False, **kwargs): model_args = dict( img_size=224, embed_dim=[128, 240, 320], depth=[1, 2, 3], num_heads=[4, 3, 4], window_size=[7, 7, 7], kernels=[5, 5, 5, 5] ) return _create_efficientvit_msra('efficientvit_m3', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def efficientvit_m4(pretrained=False, **kwargs): model_args = dict( img_size=224, embed_dim=[128, 256, 384], depth=[1, 2, 3], num_heads=[4, 4, 4], window_size=[7, 7, 7], kernels=[7, 5, 3, 3] ) return _create_efficientvit_msra('efficientvit_m4', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def efficientvit_m5(pretrained=False, **kwargs): model_args = dict( img_size=224, embed_dim=[192, 288, 384], depth=[1, 3, 4], num_heads=[3, 3, 4], window_size=[7, 7, 7], kernels=[7, 5, 3, 3] ) return _create_efficientvit_msra('efficientvit_m5', pretrained=pretrained, **dict(model_args, **kwargs))
pytorch-image-models/timm/models/efficientvit_msra.py/0
{ "file_path": "pytorch-image-models/timm/models/efficientvit_msra.py", "repo_id": "pytorch-image-models", "token_count": 12924 }
266
""" NasNet-A (Large) nasnetalarge implementation grabbed from Cadene's pretrained models https://github.com/Cadene/pretrained-models.pytorch """ from functools import partial import torch import torch.nn as nn from timm.layers import ConvNormAct, create_conv2d, create_pool2d, create_classifier from ._builder import build_model_with_cfg from ._registry import register_model, generate_default_cfgs __all__ = ['NASNetALarge'] class ActConvBn(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=''): super(ActConvBn, self).__init__() self.act = nn.ReLU() self.conv = create_conv2d( in_channels, out_channels, kernel_size=kernel_size, stride=stride, padding=padding) self.bn = nn.BatchNorm2d(out_channels, eps=0.001, momentum=0.1) def forward(self, x): x = self.act(x) x = self.conv(x) x = self.bn(x) return x class SeparableConv2d(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride, padding=''): super(SeparableConv2d, self).__init__() self.depthwise_conv2d = create_conv2d( in_channels, in_channels, kernel_size=kernel_size, stride=stride, padding=padding, groups=in_channels) self.pointwise_conv2d = create_conv2d( in_channels, out_channels, kernel_size=1, padding=0) def forward(self, x): x = self.depthwise_conv2d(x) x = self.pointwise_conv2d(x) return x class BranchSeparables(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1, pad_type='', stem_cell=False): super(BranchSeparables, self).__init__() middle_channels = out_channels if stem_cell else in_channels self.act_1 = nn.ReLU() self.separable_1 = SeparableConv2d( in_channels, middle_channels, kernel_size, stride=stride, padding=pad_type) self.bn_sep_1 = nn.BatchNorm2d(middle_channels, eps=0.001, momentum=0.1) self.act_2 = nn.ReLU(inplace=True) self.separable_2 = SeparableConv2d( middle_channels, out_channels, kernel_size, stride=1, padding=pad_type) self.bn_sep_2 = nn.BatchNorm2d(out_channels, eps=0.001, momentum=0.1) def forward(self, x): x = self.act_1(x) x = self.separable_1(x) x = self.bn_sep_1(x) x = self.act_2(x) x = self.separable_2(x) x = self.bn_sep_2(x) return x class CellStem0(nn.Module): def __init__(self, stem_size, num_channels=42, pad_type=''): super(CellStem0, self).__init__() self.num_channels = num_channels self.stem_size = stem_size self.conv_1x1 = ActConvBn(self.stem_size, self.num_channels, 1, stride=1) self.comb_iter_0_left = BranchSeparables(self.num_channels, self.num_channels, 5, 2, pad_type) self.comb_iter_0_right = BranchSeparables(self.stem_size, self.num_channels, 7, 2, pad_type, stem_cell=True) self.comb_iter_1_left = create_pool2d('max', 3, 2, padding=pad_type) self.comb_iter_1_right = BranchSeparables(self.stem_size, self.num_channels, 7, 2, pad_type, stem_cell=True) self.comb_iter_2_left = create_pool2d('avg', 3, 2, count_include_pad=False, padding=pad_type) self.comb_iter_2_right = BranchSeparables(self.stem_size, self.num_channels, 5, 2, pad_type, stem_cell=True) self.comb_iter_3_right = create_pool2d('avg', 3, 1, count_include_pad=False, padding=pad_type) self.comb_iter_4_left = BranchSeparables(self.num_channels, self.num_channels, 3, 1, pad_type) self.comb_iter_4_right = create_pool2d('max', 3, 2, padding=pad_type) def forward(self, x): x1 = self.conv_1x1(x) x_comb_iter_0_left = self.comb_iter_0_left(x1) x_comb_iter_0_right = self.comb_iter_0_right(x) x_comb_iter_0 = x_comb_iter_0_left + x_comb_iter_0_right x_comb_iter_1_left = self.comb_iter_1_left(x1) x_comb_iter_1_right = self.comb_iter_1_right(x) x_comb_iter_1 = x_comb_iter_1_left + x_comb_iter_1_right x_comb_iter_2_left = self.comb_iter_2_left(x1) x_comb_iter_2_right = self.comb_iter_2_right(x) x_comb_iter_2 = x_comb_iter_2_left + x_comb_iter_2_right x_comb_iter_3_right = self.comb_iter_3_right(x_comb_iter_0) x_comb_iter_3 = x_comb_iter_3_right + x_comb_iter_1 x_comb_iter_4_left = self.comb_iter_4_left(x_comb_iter_0) x_comb_iter_4_right = self.comb_iter_4_right(x1) x_comb_iter_4 = x_comb_iter_4_left + x_comb_iter_4_right x_out = torch.cat([x_comb_iter_1, x_comb_iter_2, x_comb_iter_3, x_comb_iter_4], 1) return x_out class CellStem1(nn.Module): def __init__(self, stem_size, num_channels, pad_type=''): super(CellStem1, self).__init__() self.num_channels = num_channels self.stem_size = stem_size self.conv_1x1 = ActConvBn(2 * self.num_channels, self.num_channels, 1, stride=1) self.act = nn.ReLU() self.path_1 = nn.Sequential() self.path_1.add_module('avgpool', nn.AvgPool2d(1, stride=2, count_include_pad=False)) self.path_1.add_module('conv', nn.Conv2d(self.stem_size, self.num_channels // 2, 1, stride=1, bias=False)) self.path_2 = nn.Sequential() self.path_2.add_module('pad', nn.ZeroPad2d((-1, 1, -1, 1))) self.path_2.add_module('avgpool', nn.AvgPool2d(1, stride=2, count_include_pad=False)) self.path_2.add_module('conv', nn.Conv2d(self.stem_size, self.num_channels // 2, 1, stride=1, bias=False)) self.final_path_bn = nn.BatchNorm2d(self.num_channels, eps=0.001, momentum=0.1) self.comb_iter_0_left = BranchSeparables(self.num_channels, self.num_channels, 5, 2, pad_type) self.comb_iter_0_right = BranchSeparables(self.num_channels, self.num_channels, 7, 2, pad_type) self.comb_iter_1_left = create_pool2d('max', 3, 2, padding=pad_type) self.comb_iter_1_right = BranchSeparables(self.num_channels, self.num_channels, 7, 2, pad_type) self.comb_iter_2_left = create_pool2d('avg', 3, 2, count_include_pad=False, padding=pad_type) self.comb_iter_2_right = BranchSeparables(self.num_channels, self.num_channels, 5, 2, pad_type) self.comb_iter_3_right = create_pool2d('avg', 3, 1, count_include_pad=False, padding=pad_type) self.comb_iter_4_left = BranchSeparables(self.num_channels, self.num_channels, 3, 1, pad_type) self.comb_iter_4_right = create_pool2d('max', 3, 2, padding=pad_type) def forward(self, x_conv0, x_stem_0): x_left = self.conv_1x1(x_stem_0) x_relu = self.act(x_conv0) # path 1 x_path1 = self.path_1(x_relu) # path 2 x_path2 = self.path_2(x_relu) # final path x_right = self.final_path_bn(torch.cat([x_path1, x_path2], 1)) x_comb_iter_0_left = self.comb_iter_0_left(x_left) x_comb_iter_0_right = self.comb_iter_0_right(x_right) x_comb_iter_0 = x_comb_iter_0_left + x_comb_iter_0_right x_comb_iter_1_left = self.comb_iter_1_left(x_left) x_comb_iter_1_right = self.comb_iter_1_right(x_right) x_comb_iter_1 = x_comb_iter_1_left + x_comb_iter_1_right x_comb_iter_2_left = self.comb_iter_2_left(x_left) x_comb_iter_2_right = self.comb_iter_2_right(x_right) x_comb_iter_2 = x_comb_iter_2_left + x_comb_iter_2_right x_comb_iter_3_right = self.comb_iter_3_right(x_comb_iter_0) x_comb_iter_3 = x_comb_iter_3_right + x_comb_iter_1 x_comb_iter_4_left = self.comb_iter_4_left(x_comb_iter_0) x_comb_iter_4_right = self.comb_iter_4_right(x_left) x_comb_iter_4 = x_comb_iter_4_left + x_comb_iter_4_right x_out = torch.cat([x_comb_iter_1, x_comb_iter_2, x_comb_iter_3, x_comb_iter_4], 1) return x_out class FirstCell(nn.Module): def __init__(self, in_chs_left, out_chs_left, in_chs_right, out_chs_right, pad_type=''): super(FirstCell, self).__init__() self.conv_1x1 = ActConvBn(in_chs_right, out_chs_right, 1, stride=1) self.act = nn.ReLU() self.path_1 = nn.Sequential() self.path_1.add_module('avgpool', nn.AvgPool2d(1, stride=2, count_include_pad=False)) self.path_1.add_module('conv', nn.Conv2d(in_chs_left, out_chs_left, 1, stride=1, bias=False)) self.path_2 = nn.Sequential() self.path_2.add_module('pad', nn.ZeroPad2d((-1, 1, -1, 1))) self.path_2.add_module('avgpool', nn.AvgPool2d(1, stride=2, count_include_pad=False)) self.path_2.add_module('conv', nn.Conv2d(in_chs_left, out_chs_left, 1, stride=1, bias=False)) self.final_path_bn = nn.BatchNorm2d(out_chs_left * 2, eps=0.001, momentum=0.1) self.comb_iter_0_left = BranchSeparables(out_chs_right, out_chs_right, 5, 1, pad_type) self.comb_iter_0_right = BranchSeparables(out_chs_right, out_chs_right, 3, 1, pad_type) self.comb_iter_1_left = BranchSeparables(out_chs_right, out_chs_right, 5, 1, pad_type) self.comb_iter_1_right = BranchSeparables(out_chs_right, out_chs_right, 3, 1, pad_type) self.comb_iter_2_left = create_pool2d('avg', 3, 1, count_include_pad=False, padding=pad_type) self.comb_iter_3_left = create_pool2d('avg', 3, 1, count_include_pad=False, padding=pad_type) self.comb_iter_3_right = create_pool2d('avg', 3, 1, count_include_pad=False, padding=pad_type) self.comb_iter_4_left = BranchSeparables(out_chs_right, out_chs_right, 3, 1, pad_type) def forward(self, x, x_prev): x_relu = self.act(x_prev) x_path1 = self.path_1(x_relu) x_path2 = self.path_2(x_relu) x_left = self.final_path_bn(torch.cat([x_path1, x_path2], 1)) x_right = self.conv_1x1(x) x_comb_iter_0_left = self.comb_iter_0_left(x_right) x_comb_iter_0_right = self.comb_iter_0_right(x_left) x_comb_iter_0 = x_comb_iter_0_left + x_comb_iter_0_right x_comb_iter_1_left = self.comb_iter_1_left(x_left) x_comb_iter_1_right = self.comb_iter_1_right(x_left) x_comb_iter_1 = x_comb_iter_1_left + x_comb_iter_1_right x_comb_iter_2_left = self.comb_iter_2_left(x_right) x_comb_iter_2 = x_comb_iter_2_left + x_left x_comb_iter_3_left = self.comb_iter_3_left(x_left) x_comb_iter_3_right = self.comb_iter_3_right(x_left) x_comb_iter_3 = x_comb_iter_3_left + x_comb_iter_3_right x_comb_iter_4_left = self.comb_iter_4_left(x_right) x_comb_iter_4 = x_comb_iter_4_left + x_right x_out = torch.cat([x_left, x_comb_iter_0, x_comb_iter_1, x_comb_iter_2, x_comb_iter_3, x_comb_iter_4], 1) return x_out class NormalCell(nn.Module): def __init__(self, in_chs_left, out_chs_left, in_chs_right, out_chs_right, pad_type=''): super(NormalCell, self).__init__() self.conv_prev_1x1 = ActConvBn(in_chs_left, out_chs_left, 1, stride=1, padding=pad_type) self.conv_1x1 = ActConvBn(in_chs_right, out_chs_right, 1, stride=1, padding=pad_type) self.comb_iter_0_left = BranchSeparables(out_chs_right, out_chs_right, 5, 1, pad_type) self.comb_iter_0_right = BranchSeparables(out_chs_left, out_chs_left, 3, 1, pad_type) self.comb_iter_1_left = BranchSeparables(out_chs_left, out_chs_left, 5, 1, pad_type) self.comb_iter_1_right = BranchSeparables(out_chs_left, out_chs_left, 3, 1, pad_type) self.comb_iter_2_left = create_pool2d('avg', 3, 1, count_include_pad=False, padding=pad_type) self.comb_iter_3_left = create_pool2d('avg', 3, 1, count_include_pad=False, padding=pad_type) self.comb_iter_3_right = create_pool2d('avg', 3, 1, count_include_pad=False, padding=pad_type) self.comb_iter_4_left = BranchSeparables(out_chs_right, out_chs_right, 3, 1, pad_type) def forward(self, x, x_prev): x_left = self.conv_prev_1x1(x_prev) x_right = self.conv_1x1(x) x_comb_iter_0_left = self.comb_iter_0_left(x_right) x_comb_iter_0_right = self.comb_iter_0_right(x_left) x_comb_iter_0 = x_comb_iter_0_left + x_comb_iter_0_right x_comb_iter_1_left = self.comb_iter_1_left(x_left) x_comb_iter_1_right = self.comb_iter_1_right(x_left) x_comb_iter_1 = x_comb_iter_1_left + x_comb_iter_1_right x_comb_iter_2_left = self.comb_iter_2_left(x_right) x_comb_iter_2 = x_comb_iter_2_left + x_left x_comb_iter_3_left = self.comb_iter_3_left(x_left) x_comb_iter_3_right = self.comb_iter_3_right(x_left) x_comb_iter_3 = x_comb_iter_3_left + x_comb_iter_3_right x_comb_iter_4_left = self.comb_iter_4_left(x_right) x_comb_iter_4 = x_comb_iter_4_left + x_right x_out = torch.cat([x_left, x_comb_iter_0, x_comb_iter_1, x_comb_iter_2, x_comb_iter_3, x_comb_iter_4], 1) return x_out class ReductionCell0(nn.Module): def __init__(self, in_chs_left, out_chs_left, in_chs_right, out_chs_right, pad_type=''): super(ReductionCell0, self).__init__() self.conv_prev_1x1 = ActConvBn(in_chs_left, out_chs_left, 1, stride=1, padding=pad_type) self.conv_1x1 = ActConvBn(in_chs_right, out_chs_right, 1, stride=1, padding=pad_type) self.comb_iter_0_left = BranchSeparables(out_chs_right, out_chs_right, 5, 2, pad_type) self.comb_iter_0_right = BranchSeparables(out_chs_right, out_chs_right, 7, 2, pad_type) self.comb_iter_1_left = create_pool2d('max', 3, 2, padding=pad_type) self.comb_iter_1_right = BranchSeparables(out_chs_right, out_chs_right, 7, 2, pad_type) self.comb_iter_2_left = create_pool2d('avg', 3, 2, count_include_pad=False, padding=pad_type) self.comb_iter_2_right = BranchSeparables(out_chs_right, out_chs_right, 5, 2, pad_type) self.comb_iter_3_right = create_pool2d('avg', 3, 1, count_include_pad=False, padding=pad_type) self.comb_iter_4_left = BranchSeparables(out_chs_right, out_chs_right, 3, 1, pad_type) self.comb_iter_4_right = create_pool2d('max', 3, 2, padding=pad_type) def forward(self, x, x_prev): x_left = self.conv_prev_1x1(x_prev) x_right = self.conv_1x1(x) x_comb_iter_0_left = self.comb_iter_0_left(x_right) x_comb_iter_0_right = self.comb_iter_0_right(x_left) x_comb_iter_0 = x_comb_iter_0_left + x_comb_iter_0_right x_comb_iter_1_left = self.comb_iter_1_left(x_right) x_comb_iter_1_right = self.comb_iter_1_right(x_left) x_comb_iter_1 = x_comb_iter_1_left + x_comb_iter_1_right x_comb_iter_2_left = self.comb_iter_2_left(x_right) x_comb_iter_2_right = self.comb_iter_2_right(x_left) x_comb_iter_2 = x_comb_iter_2_left + x_comb_iter_2_right x_comb_iter_3_right = self.comb_iter_3_right(x_comb_iter_0) x_comb_iter_3 = x_comb_iter_3_right + x_comb_iter_1 x_comb_iter_4_left = self.comb_iter_4_left(x_comb_iter_0) x_comb_iter_4_right = self.comb_iter_4_right(x_right) x_comb_iter_4 = x_comb_iter_4_left + x_comb_iter_4_right x_out = torch.cat([x_comb_iter_1, x_comb_iter_2, x_comb_iter_3, x_comb_iter_4], 1) return x_out class ReductionCell1(nn.Module): def __init__(self, in_chs_left, out_chs_left, in_chs_right, out_chs_right, pad_type=''): super(ReductionCell1, self).__init__() self.conv_prev_1x1 = ActConvBn(in_chs_left, out_chs_left, 1, stride=1, padding=pad_type) self.conv_1x1 = ActConvBn(in_chs_right, out_chs_right, 1, stride=1, padding=pad_type) self.comb_iter_0_left = BranchSeparables(out_chs_right, out_chs_right, 5, 2, pad_type) self.comb_iter_0_right = BranchSeparables(out_chs_right, out_chs_right, 7, 2, pad_type) self.comb_iter_1_left = create_pool2d('max', 3, 2, padding=pad_type) self.comb_iter_1_right = BranchSeparables(out_chs_right, out_chs_right, 7, 2, pad_type) self.comb_iter_2_left = create_pool2d('avg', 3, 2, count_include_pad=False, padding=pad_type) self.comb_iter_2_right = BranchSeparables(out_chs_right, out_chs_right, 5, 2, pad_type) self.comb_iter_3_right = create_pool2d('avg', 3, 1, count_include_pad=False, padding=pad_type) self.comb_iter_4_left = BranchSeparables(out_chs_right, out_chs_right, 3, 1, pad_type) self.comb_iter_4_right = create_pool2d('max', 3, 2, padding=pad_type) def forward(self, x, x_prev): x_left = self.conv_prev_1x1(x_prev) x_right = self.conv_1x1(x) x_comb_iter_0_left = self.comb_iter_0_left(x_right) x_comb_iter_0_right = self.comb_iter_0_right(x_left) x_comb_iter_0 = x_comb_iter_0_left + x_comb_iter_0_right x_comb_iter_1_left = self.comb_iter_1_left(x_right) x_comb_iter_1_right = self.comb_iter_1_right(x_left) x_comb_iter_1 = x_comb_iter_1_left + x_comb_iter_1_right x_comb_iter_2_left = self.comb_iter_2_left(x_right) x_comb_iter_2_right = self.comb_iter_2_right(x_left) x_comb_iter_2 = x_comb_iter_2_left + x_comb_iter_2_right x_comb_iter_3_right = self.comb_iter_3_right(x_comb_iter_0) x_comb_iter_3 = x_comb_iter_3_right + x_comb_iter_1 x_comb_iter_4_left = self.comb_iter_4_left(x_comb_iter_0) x_comb_iter_4_right = self.comb_iter_4_right(x_right) x_comb_iter_4 = x_comb_iter_4_left + x_comb_iter_4_right x_out = torch.cat([x_comb_iter_1, x_comb_iter_2, x_comb_iter_3, x_comb_iter_4], 1) return x_out class NASNetALarge(nn.Module): """NASNetALarge (6 @ 4032) """ def __init__( self, num_classes=1000, in_chans=3, stem_size=96, channel_multiplier=2, num_features=4032, output_stride=32, drop_rate=0., global_pool='avg', pad_type='same', ): super(NASNetALarge, self).__init__() self.num_classes = num_classes self.stem_size = stem_size self.num_features = self.head_hidden_size = num_features self.channel_multiplier = channel_multiplier assert output_stride == 32 channels = self.num_features // 24 # 24 is default value for the architecture self.conv0 = ConvNormAct( in_channels=in_chans, out_channels=self.stem_size, kernel_size=3, padding=0, stride=2, norm_layer=partial(nn.BatchNorm2d, eps=0.001, momentum=0.1), apply_act=False) self.cell_stem_0 = CellStem0( self.stem_size, num_channels=channels // (channel_multiplier ** 2), pad_type=pad_type) self.cell_stem_1 = CellStem1( self.stem_size, num_channels=channels // channel_multiplier, pad_type=pad_type) self.cell_0 = FirstCell( in_chs_left=channels, out_chs_left=channels // 2, in_chs_right=2 * channels, out_chs_right=channels, pad_type=pad_type) self.cell_1 = NormalCell( in_chs_left=2 * channels, out_chs_left=channels, in_chs_right=6 * channels, out_chs_right=channels, pad_type=pad_type) self.cell_2 = NormalCell( in_chs_left=6 * channels, out_chs_left=channels, in_chs_right=6 * channels, out_chs_right=channels, pad_type=pad_type) self.cell_3 = NormalCell( in_chs_left=6 * channels, out_chs_left=channels, in_chs_right=6 * channels, out_chs_right=channels, pad_type=pad_type) self.cell_4 = NormalCell( in_chs_left=6 * channels, out_chs_left=channels, in_chs_right=6 * channels, out_chs_right=channels, pad_type=pad_type) self.cell_5 = NormalCell( in_chs_left=6 * channels, out_chs_left=channels, in_chs_right=6 * channels, out_chs_right=channels, pad_type=pad_type) self.reduction_cell_0 = ReductionCell0( in_chs_left=6 * channels, out_chs_left=2 * channels, in_chs_right=6 * channels, out_chs_right=2 * channels, pad_type=pad_type) self.cell_6 = FirstCell( in_chs_left=6 * channels, out_chs_left=channels, in_chs_right=8 * channels, out_chs_right=2 * channels, pad_type=pad_type) self.cell_7 = NormalCell( in_chs_left=8 * channels, out_chs_left=2 * channels, in_chs_right=12 * channels, out_chs_right=2 * channels, pad_type=pad_type) self.cell_8 = NormalCell( in_chs_left=12 * channels, out_chs_left=2 * channels, in_chs_right=12 * channels, out_chs_right=2 * channels, pad_type=pad_type) self.cell_9 = NormalCell( in_chs_left=12 * channels, out_chs_left=2 * channels, in_chs_right=12 * channels, out_chs_right=2 * channels, pad_type=pad_type) self.cell_10 = NormalCell( in_chs_left=12 * channels, out_chs_left=2 * channels, in_chs_right=12 * channels, out_chs_right=2 * channels, pad_type=pad_type) self.cell_11 = NormalCell( in_chs_left=12 * channels, out_chs_left=2 * channels, in_chs_right=12 * channels, out_chs_right=2 * channels, pad_type=pad_type) self.reduction_cell_1 = ReductionCell1( in_chs_left=12 * channels, out_chs_left=4 * channels, in_chs_right=12 * channels, out_chs_right=4 * channels, pad_type=pad_type) self.cell_12 = FirstCell( in_chs_left=12 * channels, out_chs_left=2 * channels, in_chs_right=16 * channels, out_chs_right=4 * channels, pad_type=pad_type) self.cell_13 = NormalCell( in_chs_left=16 * channels, out_chs_left=4 * channels, in_chs_right=24 * channels, out_chs_right=4 * channels, pad_type=pad_type) self.cell_14 = NormalCell( in_chs_left=24 * channels, out_chs_left=4 * channels, in_chs_right=24 * channels, out_chs_right=4 * channels, pad_type=pad_type) self.cell_15 = NormalCell( in_chs_left=24 * channels, out_chs_left=4 * channels, in_chs_right=24 * channels, out_chs_right=4 * channels, pad_type=pad_type) self.cell_16 = NormalCell( in_chs_left=24 * channels, out_chs_left=4 * channels, in_chs_right=24 * channels, out_chs_right=4 * channels, pad_type=pad_type) self.cell_17 = NormalCell( in_chs_left=24 * channels, out_chs_left=4 * channels, in_chs_right=24 * channels, out_chs_right=4 * channels, pad_type=pad_type) self.act = nn.ReLU(inplace=True) self.feature_info = [ dict(num_chs=96, reduction=2, module='conv0'), dict(num_chs=168, reduction=4, module='cell_stem_1.conv_1x1.act'), dict(num_chs=1008, reduction=8, module='reduction_cell_0.conv_1x1.act'), dict(num_chs=2016, reduction=16, module='reduction_cell_1.conv_1x1.act'), dict(num_chs=4032, reduction=32, module='act'), ] self.global_pool, self.head_drop, self.last_linear = create_classifier( self.num_features, self.num_classes, pool_type=global_pool, drop_rate=drop_rate) @torch.jit.ignore def group_matcher(self, coarse=False): matcher = dict( stem=r'^conv0|cell_stem_[01]', blocks=[ (r'^cell_(\d+)', None), (r'^reduction_cell_0', (6,)), (r'^reduction_cell_1', (12,)), ] ) return matcher @torch.jit.ignore def set_grad_checkpointing(self, enable=True): assert not enable, 'gradient checkpointing not supported' @torch.jit.ignore def get_classifier(self) -> nn.Module: return self.last_linear def reset_classifier(self, num_classes: int, global_pool: str = 'avg'): self.num_classes = num_classes self.global_pool, self.last_linear = create_classifier( self.num_features, self.num_classes, pool_type=global_pool) def forward_features(self, x): x_conv0 = self.conv0(x) x_stem_0 = self.cell_stem_0(x_conv0) x_stem_1 = self.cell_stem_1(x_conv0, x_stem_0) x_cell_0 = self.cell_0(x_stem_1, x_stem_0) x_cell_1 = self.cell_1(x_cell_0, x_stem_1) x_cell_2 = self.cell_2(x_cell_1, x_cell_0) x_cell_3 = self.cell_3(x_cell_2, x_cell_1) x_cell_4 = self.cell_4(x_cell_3, x_cell_2) x_cell_5 = self.cell_5(x_cell_4, x_cell_3) x_reduction_cell_0 = self.reduction_cell_0(x_cell_5, x_cell_4) x_cell_6 = self.cell_6(x_reduction_cell_0, x_cell_4) x_cell_7 = self.cell_7(x_cell_6, x_reduction_cell_0) x_cell_8 = self.cell_8(x_cell_7, x_cell_6) x_cell_9 = self.cell_9(x_cell_8, x_cell_7) x_cell_10 = self.cell_10(x_cell_9, x_cell_8) x_cell_11 = self.cell_11(x_cell_10, x_cell_9) x_reduction_cell_1 = self.reduction_cell_1(x_cell_11, x_cell_10) x_cell_12 = self.cell_12(x_reduction_cell_1, x_cell_10) x_cell_13 = self.cell_13(x_cell_12, x_reduction_cell_1) x_cell_14 = self.cell_14(x_cell_13, x_cell_12) x_cell_15 = self.cell_15(x_cell_14, x_cell_13) x_cell_16 = self.cell_16(x_cell_15, x_cell_14) x_cell_17 = self.cell_17(x_cell_16, x_cell_15) x = self.act(x_cell_17) return x def forward_head(self, x, pre_logits: bool = False): x = self.global_pool(x) x = self.head_drop(x) return x if pre_logits else self.last_linear(x) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def _create_nasnet(variant, pretrained=False, **kwargs): return build_model_with_cfg( NASNetALarge, variant, pretrained, feature_cfg=dict(feature_cls='hook', no_rewrite=True), # not possible to re-write this model **kwargs, ) default_cfgs = generate_default_cfgs({ 'nasnetalarge.tf_in1k': { 'hf_hub_id': 'timm/', 'url': 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/nasnetalarge-dc4a7b8b.pth', 'input_size': (3, 331, 331), 'pool_size': (11, 11), 'crop_pct': 0.911, 'interpolation': 'bicubic', 'mean': (0.5, 0.5, 0.5), 'std': (0.5, 0.5, 0.5), 'num_classes': 1000, 'first_conv': 'conv0.conv', 'classifier': 'last_linear', }, }) @register_model def nasnetalarge(pretrained=False, **kwargs) -> NASNetALarge: """NASNet-A large model architecture. """ model_kwargs = dict(pad_type='same', **kwargs) return _create_nasnet('nasnetalarge', pretrained, **model_kwargs)
pytorch-image-models/timm/models/nasnet.py/0
{ "file_path": "pytorch-image-models/timm/models/nasnet.py", "repo_id": "pytorch-image-models", "token_count": 13254 }
267
""" ReXNet A PyTorch impl of `ReXNet: Diminishing Representational Bottleneck on Convolutional Neural Network` - https://arxiv.org/abs/2007.00992 Adapted from original impl at https://github.com/clovaai/rexnet Copyright (c) 2020-present NAVER Corp. MIT license Changes for timm, feature extraction, and rounded channel variant hacked together by Ross Wightman Copyright 2020 Ross Wightman """ from functools import partial from math import ceil from typing import Any, Dict, List, Optional, Tuple, Union import torch import torch.nn as nn from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import ClassifierHead, create_act_layer, ConvNormAct, DropPath, make_divisible, SEModule from ._builder import build_model_with_cfg from ._efficientnet_builder import efficientnet_init_weights from ._features import feature_take_indices from ._manipulate import checkpoint, checkpoint_seq from ._registry import generate_default_cfgs, register_model __all__ = ['RexNet'] # model_registry will add each entrypoint fn to this SEWithNorm = partial(SEModule, norm_layer=nn.BatchNorm2d) class LinearBottleneck(nn.Module): """Linear bottleneck block for ReXNet. A mobile inverted residual bottleneck block as used in MobileNetV2 and subsequent models. """ def __init__( self, in_chs: int, out_chs: int, stride: int, dilation: Tuple[int, int] = (1, 1), exp_ratio: float = 1.0, se_ratio: float = 0., ch_div: int = 1, act_layer: str = 'swish', dw_act_layer: str = 'relu6', drop_path: Optional[nn.Module] = None, ): """Initialize LinearBottleneck. Args: in_chs: Number of input channels. out_chs: Number of output channels. stride: Stride for depthwise conv. dilation: Dilation rates. exp_ratio: Expansion ratio. se_ratio: Squeeze-excitation ratio. ch_div: Channel divisor. act_layer: Activation layer for expansion. dw_act_layer: Activation layer for depthwise. drop_path: Drop path module. """ super(LinearBottleneck, self).__init__() self.use_shortcut = stride == 1 and dilation[0] == dilation[1] and in_chs <= out_chs self.in_channels = in_chs self.out_channels = out_chs if exp_ratio != 1.: dw_chs = make_divisible(round(in_chs * exp_ratio), divisor=ch_div) self.conv_exp = ConvNormAct(in_chs, dw_chs, act_layer=act_layer) else: dw_chs = in_chs self.conv_exp = None self.conv_dw = ConvNormAct( dw_chs, dw_chs, kernel_size=3, stride=stride, dilation=dilation[0], groups=dw_chs, apply_act=False, ) if se_ratio > 0: self.se = SEWithNorm(dw_chs, rd_channels=make_divisible(int(dw_chs * se_ratio), ch_div)) else: self.se = None self.act_dw = create_act_layer(dw_act_layer) self.conv_pwl = ConvNormAct(dw_chs, out_chs, 1, apply_act=False) self.drop_path = drop_path def feat_channels(self, exp: bool = False) -> int: """Get feature channel count. Args: exp: Return expanded channels if True. Returns: Number of feature channels. """ return self.conv_dw.out_channels if exp else self.out_channels def forward(self, x: torch.Tensor) -> torch.Tensor: """Forward pass. Args: x: Input tensor. Returns: Output tensor. """ shortcut = x if self.conv_exp is not None: x = self.conv_exp(x) x = self.conv_dw(x) if self.se is not None: x = self.se(x) x = self.act_dw(x) x = self.conv_pwl(x) if self.use_shortcut: if self.drop_path is not None: x = self.drop_path(x) x = torch.cat([x[:, 0:self.in_channels] + shortcut, x[:, self.in_channels:]], dim=1) return x def _block_cfg( width_mult: float = 1.0, depth_mult: float = 1.0, initial_chs: int = 16, final_chs: int = 180, se_ratio: float = 0., ch_div: int = 1, ) -> List[Tuple[int, float, int, float]]: """Generate ReXNet block configuration. Args: width_mult: Width multiplier. depth_mult: Depth multiplier. initial_chs: Initial channel count. final_chs: Final channel count. se_ratio: Squeeze-excitation ratio. ch_div: Channel divisor. Returns: List of tuples (out_channels, exp_ratio, stride, se_ratio). """ layers = [1, 2, 2, 3, 3, 5] strides = [1, 2, 2, 2, 1, 2] layers = [ceil(element * depth_mult) for element in layers] strides = sum([[element] + [1] * (layers[idx] - 1) for idx, element in enumerate(strides)], []) exp_ratios = [1] * layers[0] + [6] * sum(layers[1:]) depth = sum(layers[:]) * 3 base_chs = initial_chs / width_mult if width_mult < 1.0 else initial_chs # The following channel configuration is a simple instance to make each layer become an expand layer. out_chs_list = [] for i in range(depth // 3): out_chs_list.append(make_divisible(round(base_chs * width_mult), divisor=ch_div)) base_chs += final_chs / (depth // 3 * 1.0) se_ratios = [0.] * (layers[0] + layers[1]) + [se_ratio] * sum(layers[2:]) return list(zip(out_chs_list, exp_ratios, strides, se_ratios)) def _build_blocks( block_cfg: List[Tuple[int, float, int, float]], prev_chs: int, width_mult: float, ch_div: int = 1, output_stride: int = 32, act_layer: str = 'swish', dw_act_layer: str = 'relu6', drop_path_rate: float = 0., ) -> Tuple[List[nn.Module], List[Dict[str, Any]]]: """Build ReXNet blocks from configuration. Args: block_cfg: Block configuration list. prev_chs: Previous channel count. width_mult: Width multiplier. ch_div: Channel divisor. output_stride: Target output stride. act_layer: Activation layer name. dw_act_layer: Depthwise activation layer name. drop_path_rate: Drop path rate. Returns: Tuple of (features list, feature_info list). """ feat_chs = [prev_chs] feature_info = [] curr_stride = 2 dilation = 1 features = [] num_blocks = len(block_cfg) for block_idx, (chs, exp_ratio, stride, se_ratio) in enumerate(block_cfg): next_dilation = dilation if stride > 1: fname = 'stem' if block_idx == 0 else f'features.{block_idx - 1}' feature_info += [dict(num_chs=feat_chs[-1], reduction=curr_stride, module=fname)] if curr_stride >= output_stride: next_dilation = dilation * stride stride = 1 block_dpr = drop_path_rate * block_idx / (num_blocks - 1) # stochastic depth linear decay rule drop_path = DropPath(block_dpr) if block_dpr > 0. else None features.append(LinearBottleneck( in_chs=prev_chs, out_chs=chs, exp_ratio=exp_ratio, stride=stride, dilation=(dilation, next_dilation), se_ratio=se_ratio, ch_div=ch_div, act_layer=act_layer, dw_act_layer=dw_act_layer, drop_path=drop_path, )) curr_stride *= stride dilation = next_dilation prev_chs = chs feat_chs += [features[-1].feat_channels()] pen_chs = make_divisible(1280 * width_mult, divisor=ch_div) feature_info += [dict(num_chs=feat_chs[-1], reduction=curr_stride, module=f'features.{len(features) - 1}')] features.append(ConvNormAct(prev_chs, pen_chs, act_layer=act_layer)) return features, feature_info class RexNet(nn.Module): """ReXNet model architecture. Based on `ReXNet: Diminishing Representational Bottleneck on Convolutional Neural Network` - https://arxiv.org/abs/2007.00992 """ def __init__( self, in_chans: int = 3, num_classes: int = 1000, global_pool: str = 'avg', output_stride: int = 32, initial_chs: int = 16, final_chs: int = 180, width_mult: float = 1.0, depth_mult: float = 1.0, se_ratio: float = 1/12., ch_div: int = 1, act_layer: str = 'swish', dw_act_layer: str = 'relu6', drop_rate: float = 0.2, drop_path_rate: float = 0., ): """Initialize ReXNet. Args: in_chans: Number of input channels. num_classes: Number of classes for classification. global_pool: Global pooling type. output_stride: Output stride. initial_chs: Initial channel count. final_chs: Final channel count. width_mult: Width multiplier. depth_mult: Depth multiplier. se_ratio: Squeeze-excitation ratio. ch_div: Channel divisor. act_layer: Activation layer name. dw_act_layer: Depthwise activation layer name. drop_rate: Dropout rate. drop_path_rate: Drop path rate. """ super(RexNet, self).__init__() self.num_classes = num_classes self.drop_rate = drop_rate self.grad_checkpointing = False assert output_stride in (32, 16, 8) stem_base_chs = 32 / width_mult if width_mult < 1.0 else 32 stem_chs = make_divisible(round(stem_base_chs * width_mult), divisor=ch_div) self.stem = ConvNormAct(in_chans, stem_chs, 3, stride=2, act_layer=act_layer) block_cfg = _block_cfg(width_mult, depth_mult, initial_chs, final_chs, se_ratio, ch_div) features, self.feature_info = _build_blocks( block_cfg, stem_chs, width_mult, ch_div, output_stride, act_layer, dw_act_layer, drop_path_rate, ) self.num_features = self.head_hidden_size = features[-1].out_channels self.features = nn.Sequential(*features) self.head = ClassifierHead(self.num_features, num_classes, global_pool, drop_rate) efficientnet_init_weights(self) @torch.jit.ignore def group_matcher(self, coarse: bool = False) -> Dict[str, Any]: """Group matcher for parameter groups. Args: coarse: Whether to use coarse grouping. Returns: Dictionary of grouped parameters. """ matcher = dict( stem=r'^stem', blocks=r'^features\.(\d+)', ) return matcher @torch.jit.ignore def set_grad_checkpointing(self, enable: bool = True) -> None: """Enable or disable gradient checkpointing. Args: enable: Whether to enable gradient checkpointing. """ self.grad_checkpointing = enable @torch.jit.ignore def get_classifier(self) -> nn.Module: """Get the classifier module. Returns: Classifier module. """ return self.head.fc def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None) -> None: """Reset the classifier. Args: num_classes: Number of classes for new classifier. global_pool: Global pooling type. """ self.num_classes = num_classes self.head.reset(num_classes, global_pool) def forward_intermediates( self, x: torch.Tensor, indices: Optional[Union[int, List[int]]] = None, norm: bool = False, stop_early: bool = False, output_fmt: str = 'NCHW', intermediates_only: bool = False, ) -> Union[List[torch.Tensor], Tuple[torch.Tensor, List[torch.Tensor]]]: """ Forward features that returns intermediates. Args: x: Input image tensor indices: Take last n blocks if int, all if None, select matching indices if sequence norm: Apply norm layer to compatible intermediates stop_early: Stop iterating over blocks when last desired intermediate hit output_fmt: Shape of intermediate feature outputs intermediates_only: Only return intermediate features Returns: """ assert output_fmt in ('NCHW',), 'Output shape must be NCHW.' intermediates = [] stage_ends = [int(info['module'].split('.')[-1]) for info in self.feature_info] take_indices, max_index = feature_take_indices(len(stage_ends), indices) take_indices = [stage_ends[i] for i in take_indices] max_index = stage_ends[max_index] # forward pass x = self.stem(x) if torch.jit.is_scripting() or not stop_early: # can't slice blocks in torchscript stages = self.features else: stages = self.features[:max_index + 1] for feat_idx, stage in enumerate(stages): if self.grad_checkpointing and not torch.jit.is_scripting(): x = checkpoint(stage, x) else: x = stage(x) if feat_idx in take_indices: intermediates.append(x) if intermediates_only: return intermediates return x, intermediates def prune_intermediate_layers( self, indices: Union[int, List[int]] = 1, prune_norm: bool = False, prune_head: bool = True, ) -> List[int]: """Prune layers not required for specified intermediates. Args: indices: Indices of intermediate layers to keep. prune_norm: Whether to prune normalization layer. prune_head: Whether to prune the classifier head. Returns: List of indices that were kept. """ stage_ends = [int(info['module'].split('.')[-1]) for info in self.feature_info] take_indices, max_index = feature_take_indices(len(stage_ends), indices) max_index = stage_ends[max_index] self.features = self.features[:max_index + 1] # truncate blocks w/ stem as idx 0 if prune_head: self.reset_classifier(0, '') return take_indices def forward_features(self, x: torch.Tensor) -> torch.Tensor: """Forward pass through feature extraction layers. Args: x: Input tensor. Returns: Feature tensor. """ x = self.stem(x) if self.grad_checkpointing and not torch.jit.is_scripting(): x = checkpoint_seq(self.features, x) else: x = self.features(x) return x def forward_head(self, x: torch.Tensor, pre_logits: bool = False) -> torch.Tensor: """Forward pass through head. Args: x: Input features. pre_logits: Return features before final linear layer. Returns: Classification logits or features. """ return self.head(x, pre_logits=pre_logits) if pre_logits else self.head(x) def forward(self, x: torch.Tensor) -> torch.Tensor: """Forward pass. Args: x: Input tensor. Returns: Output logits. """ x = self.forward_features(x) x = self.forward_head(x) return x def _create_rexnet(variant: str, pretrained: bool, **kwargs) -> RexNet: """Create a ReXNet model. Args: variant: Model variant name. pretrained: Load pretrained weights. **kwargs: Additional model arguments. Returns: ReXNet model instance. """ feature_cfg = dict(flatten_sequential=True) return build_model_with_cfg( RexNet, variant, pretrained, feature_cfg=feature_cfg, **kwargs, ) def _cfg(url: str = '', **kwargs) -> Dict[str, Any]: """Create default configuration dictionary. Args: url: Model weight URL. **kwargs: Additional configuration options. Returns: Configuration dictionary. """ return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), 'crop_pct': 0.875, 'interpolation': 'bicubic', 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'stem.conv', 'classifier': 'head.fc', 'license': 'mit', **kwargs } default_cfgs = generate_default_cfgs({ 'rexnet_100.nav_in1k': _cfg(hf_hub_id='timm/'), 'rexnet_130.nav_in1k': _cfg(hf_hub_id='timm/'), 'rexnet_150.nav_in1k': _cfg(hf_hub_id='timm/'), 'rexnet_200.nav_in1k': _cfg(hf_hub_id='timm/'), 'rexnet_300.nav_in1k': _cfg(hf_hub_id='timm/'), 'rexnetr_100.untrained': _cfg(), 'rexnetr_130.untrained': _cfg(), 'rexnetr_150.untrained': _cfg(), 'rexnetr_200.sw_in12k_ft_in1k': _cfg( hf_hub_id='timm/', crop_pct=0.95, test_crop_pct=1.0, test_input_size=(3, 288, 288), license='apache-2.0'), 'rexnetr_300.sw_in12k_ft_in1k': _cfg( hf_hub_id='timm/', crop_pct=0.95, test_crop_pct=1.0, test_input_size=(3, 288, 288), license='apache-2.0'), 'rexnetr_200.sw_in12k': _cfg( hf_hub_id='timm/', num_classes=11821, crop_pct=0.95, test_crop_pct=1.0, test_input_size=(3, 288, 288), license='apache-2.0'), 'rexnetr_300.sw_in12k': _cfg( hf_hub_id='timm/', num_classes=11821, crop_pct=0.95, test_crop_pct=1.0, test_input_size=(3, 288, 288), license='apache-2.0'), }) @register_model def rexnet_100(pretrained: bool = False, **kwargs) -> RexNet: """ReXNet V1 1.0x""" return _create_rexnet('rexnet_100', pretrained, **kwargs) @register_model def rexnet_130(pretrained: bool = False, **kwargs) -> RexNet: """ReXNet V1 1.3x""" return _create_rexnet('rexnet_130', pretrained, width_mult=1.3, **kwargs) @register_model def rexnet_150(pretrained: bool = False, **kwargs) -> RexNet: """ReXNet V1 1.5x""" return _create_rexnet('rexnet_150', pretrained, width_mult=1.5, **kwargs) @register_model def rexnet_200(pretrained: bool = False, **kwargs) -> RexNet: """ReXNet V1 2.0x""" return _create_rexnet('rexnet_200', pretrained, width_mult=2.0, **kwargs) @register_model def rexnet_300(pretrained: bool = False, **kwargs) -> RexNet: """ReXNet V1 3.0x""" return _create_rexnet('rexnet_300', pretrained, width_mult=3.0, **kwargs) @register_model def rexnetr_100(pretrained: bool = False, **kwargs) -> RexNet: """ReXNet V1 1.0x w/ rounded (mod 8) channels""" return _create_rexnet('rexnetr_100', pretrained, ch_div=8, **kwargs) @register_model def rexnetr_130(pretrained: bool = False, **kwargs) -> RexNet: """ReXNet V1 1.3x w/ rounded (mod 8) channels""" return _create_rexnet('rexnetr_130', pretrained, width_mult=1.3, ch_div=8, **kwargs) @register_model def rexnetr_150(pretrained: bool = False, **kwargs) -> RexNet: """ReXNet V1 1.5x w/ rounded (mod 8) channels""" return _create_rexnet('rexnetr_150', pretrained, width_mult=1.5, ch_div=8, **kwargs) @register_model def rexnetr_200(pretrained: bool = False, **kwargs) -> RexNet: """ReXNet V1 2.0x w/ rounded (mod 8) channels""" return _create_rexnet('rexnetr_200', pretrained, width_mult=2.0, ch_div=8, **kwargs) @register_model def rexnetr_300(pretrained: bool = False, **kwargs) -> RexNet: """ReXNet V1 3.0x w/ rounded (mod 16) channels""" return _create_rexnet('rexnetr_300', pretrained, width_mult=3.0, ch_div=16, **kwargs)
pytorch-image-models/timm/models/rexnet.py/0
{ "file_path": "pytorch-image-models/timm/models/rexnet.py", "repo_id": "pytorch-image-models", "token_count": 9214 }
268
""" Visformer Paper: Visformer: The Vision-friendly Transformer - https://arxiv.org/abs/2104.12533 From original at https://github.com/danczs/Visformer Modifications and additions for timm hacked together by / Copyright 2021, Ross Wightman """ import torch import torch.nn as nn from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import to_2tuple, trunc_normal_, DropPath, PatchEmbed, LayerNorm2d, create_classifier, use_fused_attn from ._builder import build_model_with_cfg from ._manipulate import checkpoint_seq from ._registry import register_model, generate_default_cfgs __all__ = ['Visformer'] class SpatialMlp(nn.Module): def __init__( self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0., group=8, spatial_conv=False, ): super().__init__() out_features = out_features or in_features hidden_features = hidden_features or in_features drop_probs = to_2tuple(drop) self.in_features = in_features self.out_features = out_features self.spatial_conv = spatial_conv if self.spatial_conv: if group < 2: # net setting hidden_features = in_features * 5 // 6 else: hidden_features = in_features * 2 self.hidden_features = hidden_features self.group = group self.conv1 = nn.Conv2d(in_features, hidden_features, 1, stride=1, padding=0, bias=False) self.act1 = act_layer() self.drop1 = nn.Dropout(drop_probs[0]) if self.spatial_conv: self.conv2 = nn.Conv2d( hidden_features, hidden_features, 3, stride=1, padding=1, groups=self.group, bias=False) self.act2 = act_layer() else: self.conv2 = None self.act2 = None self.conv3 = nn.Conv2d(hidden_features, out_features, 1, stride=1, padding=0, bias=False) self.drop3 = nn.Dropout(drop_probs[1]) def forward(self, x): x = self.conv1(x) x = self.act1(x) x = self.drop1(x) if self.conv2 is not None: x = self.conv2(x) x = self.act2(x) x = self.conv3(x) x = self.drop3(x) return x class Attention(nn.Module): fused_attn: torch.jit.Final[bool] def __init__(self, dim, num_heads=8, head_dim_ratio=1., attn_drop=0., proj_drop=0.): super().__init__() self.dim = dim self.num_heads = num_heads head_dim = round(dim // num_heads * head_dim_ratio) self.head_dim = head_dim self.scale = head_dim ** -0.5 self.fused_attn = use_fused_attn(experimental=True) self.qkv = nn.Conv2d(dim, head_dim * num_heads * 3, 1, stride=1, padding=0, bias=False) self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Conv2d(self.head_dim * self.num_heads, dim, 1, stride=1, padding=0, bias=False) self.proj_drop = nn.Dropout(proj_drop) def forward(self, x): B, C, H, W = x.shape x = self.qkv(x).reshape(B, 3, self.num_heads, self.head_dim, -1).permute(1, 0, 2, 4, 3) q, k, v = x.unbind(0) if self.fused_attn: x = torch.nn.functional.scaled_dot_product_attention( q.contiguous(), k.contiguous(), v.contiguous(), dropout_p=self.attn_drop.p if self.training else 0., ) else: attn = (q @ k.transpose(-2, -1)) * self.scale attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) x = attn @ v x = x.permute(0, 1, 3, 2).reshape(B, -1, H, W) x = self.proj(x) x = self.proj_drop(x) return x class Block(nn.Module): def __init__( self, dim, num_heads, head_dim_ratio=1., mlp_ratio=4., proj_drop=0., attn_drop=0., drop_path=0., act_layer=nn.GELU, norm_layer=LayerNorm2d, group=8, attn_disabled=False, spatial_conv=False, ): super().__init__() self.spatial_conv = spatial_conv self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() if attn_disabled: self.norm1 = None self.attn = None else: self.norm1 = norm_layer(dim) self.attn = Attention( dim, num_heads=num_heads, head_dim_ratio=head_dim_ratio, attn_drop=attn_drop, proj_drop=proj_drop, ) self.norm2 = norm_layer(dim) self.mlp = SpatialMlp( in_features=dim, hidden_features=int(dim * mlp_ratio), act_layer=act_layer, drop=proj_drop, group=group, spatial_conv=spatial_conv, ) def forward(self, x): if self.attn is not None: x = x + self.drop_path(self.attn(self.norm1(x))) x = x + self.drop_path(self.mlp(self.norm2(x))) return x class Visformer(nn.Module): def __init__( self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, init_channels=32, embed_dim=384, depth=12, num_heads=6, mlp_ratio=4., drop_rate=0., pos_drop_rate=0., proj_drop_rate=0., attn_drop_rate=0., drop_path_rate=0., norm_layer=LayerNorm2d, attn_stage='111', use_pos_embed=True, spatial_conv='111', vit_stem=False, group=8, global_pool='avg', conv_init=False, embed_norm=None, ): super().__init__() img_size = to_2tuple(img_size) self.num_classes = num_classes self.embed_dim = embed_dim self.init_channels = init_channels self.img_size = img_size self.vit_stem = vit_stem self.conv_init = conv_init if isinstance(depth, (list, tuple)): self.stage_num1, self.stage_num2, self.stage_num3 = depth depth = sum(depth) else: self.stage_num1 = self.stage_num3 = depth // 3 self.stage_num2 = depth - self.stage_num1 - self.stage_num3 self.use_pos_embed = use_pos_embed self.grad_checkpointing = False dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stage 1 if self.vit_stem: self.stem = None self.patch_embed1 = PatchEmbed( img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim, norm_layer=embed_norm, flatten=False, ) img_size = [x // patch_size for x in img_size] else: if self.init_channels is None: self.stem = None self.patch_embed1 = PatchEmbed( img_size=img_size, patch_size=patch_size // 2, in_chans=in_chans, embed_dim=embed_dim // 2, norm_layer=embed_norm, flatten=False, ) img_size = [x // (patch_size // 2) for x in img_size] else: self.stem = nn.Sequential( nn.Conv2d(in_chans, self.init_channels, 7, stride=2, padding=3, bias=False), nn.BatchNorm2d(self.init_channels), nn.ReLU(inplace=True) ) img_size = [x // 2 for x in img_size] self.patch_embed1 = PatchEmbed( img_size=img_size, patch_size=patch_size // 4, in_chans=self.init_channels, embed_dim=embed_dim // 2, norm_layer=embed_norm, flatten=False, ) img_size = [x // (patch_size // 4) for x in img_size] if self.use_pos_embed: if self.vit_stem: self.pos_embed1 = nn.Parameter(torch.zeros(1, embed_dim, *img_size)) else: self.pos_embed1 = nn.Parameter(torch.zeros(1, embed_dim//2, *img_size)) self.pos_drop = nn.Dropout(p=pos_drop_rate) else: self.pos_embed1 = None self.stage1 = nn.Sequential(*[ Block( dim=embed_dim//2, num_heads=num_heads, head_dim_ratio=0.5, mlp_ratio=mlp_ratio, proj_drop=proj_drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, group=group, attn_disabled=(attn_stage[0] == '0'), spatial_conv=(spatial_conv[0] == '1'), ) for i in range(self.stage_num1) ]) # stage2 if not self.vit_stem: self.patch_embed2 = PatchEmbed( img_size=img_size, patch_size=patch_size // 8, in_chans=embed_dim // 2, embed_dim=embed_dim, norm_layer=embed_norm, flatten=False, ) img_size = [x // (patch_size // 8) for x in img_size] if self.use_pos_embed: self.pos_embed2 = nn.Parameter(torch.zeros(1, embed_dim, *img_size)) else: self.pos_embed2 = None else: self.patch_embed2 = None self.stage2 = nn.Sequential(*[ Block( dim=embed_dim, num_heads=num_heads, head_dim_ratio=1.0, mlp_ratio=mlp_ratio, proj_drop=proj_drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, group=group, attn_disabled=(attn_stage[1] == '0'), spatial_conv=(spatial_conv[1] == '1'), ) for i in range(self.stage_num1, self.stage_num1+self.stage_num2) ]) # stage 3 if not self.vit_stem: self.patch_embed3 = PatchEmbed( img_size=img_size, patch_size=patch_size // 8, in_chans=embed_dim, embed_dim=embed_dim * 2, norm_layer=embed_norm, flatten=False, ) img_size = [x // (patch_size // 8) for x in img_size] if self.use_pos_embed: self.pos_embed3 = nn.Parameter(torch.zeros(1, embed_dim*2, *img_size)) else: self.pos_embed3 = None else: self.patch_embed3 = None self.stage3 = nn.Sequential(*[ Block( dim=embed_dim * 2, num_heads=num_heads, head_dim_ratio=1.0, mlp_ratio=mlp_ratio, proj_drop=proj_drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, group=group, attn_disabled=(attn_stage[2] == '0'), spatial_conv=(spatial_conv[2] == '1'), ) for i in range(self.stage_num1+self.stage_num2, depth) ]) self.num_features = self.head_hidden_size = embed_dim if self.vit_stem else embed_dim * 2 self.norm = norm_layer(self.num_features) # head global_pool, head = create_classifier(self.num_features, self.num_classes, pool_type=global_pool) self.global_pool = global_pool self.head_drop = nn.Dropout(drop_rate) self.head = head # weights init if self.use_pos_embed: trunc_normal_(self.pos_embed1, std=0.02) if not self.vit_stem: trunc_normal_(self.pos_embed2, std=0.02) trunc_normal_(self.pos_embed3, std=0.02) self.apply(self._init_weights) def _init_weights(self, m): if isinstance(m, nn.Linear): trunc_normal_(m.weight, std=0.02) if m.bias is not None: nn.init.constant_(m.bias, 0) elif isinstance(m, nn.Conv2d): if self.conv_init: nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') else: trunc_normal_(m.weight, std=0.02) if m.bias is not None: nn.init.constant_(m.bias, 0.) @torch.jit.ignore def group_matcher(self, coarse=False): return dict( stem=r'^patch_embed1|pos_embed1|stem', # stem and embed blocks=[ (r'^stage(\d+)\.(\d+)' if coarse else r'^stage(\d+)\.(\d+)', None), (r'^(?:patch_embed|pos_embed)(\d+)', (0,)), (r'^norm', (99999,)) ] ) @torch.jit.ignore def set_grad_checkpointing(self, enable=True): self.grad_checkpointing = enable @torch.jit.ignore def get_classifier(self) -> nn.Module: return self.head def reset_classifier(self, num_classes: int, global_pool: str = 'avg'): self.num_classes = num_classes self.global_pool, self.head = create_classifier(self.num_features, self.num_classes, pool_type=global_pool) def forward_features(self, x): if self.stem is not None: x = self.stem(x) # stage 1 x = self.patch_embed1(x) if self.pos_embed1 is not None: x = self.pos_drop(x + self.pos_embed1) if self.grad_checkpointing and not torch.jit.is_scripting(): x = checkpoint_seq(self.stage1, x) else: x = self.stage1(x) # stage 2 if self.patch_embed2 is not None: x = self.patch_embed2(x) if self.pos_embed2 is not None: x = self.pos_drop(x + self.pos_embed2) if self.grad_checkpointing and not torch.jit.is_scripting(): x = checkpoint_seq(self.stage2, x) else: x = self.stage2(x) # stage3 if self.patch_embed3 is not None: x = self.patch_embed3(x) if self.pos_embed3 is not None: x = self.pos_drop(x + self.pos_embed3) if self.grad_checkpointing and not torch.jit.is_scripting(): x = checkpoint_seq(self.stage3, x) else: x = self.stage3(x) x = self.norm(x) return x def forward_head(self, x, pre_logits: bool = False): x = self.global_pool(x) x = self.head_drop(x) return x if pre_logits else self.head(x) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def _create_visformer(variant, pretrained=False, default_cfg=None, **kwargs): if kwargs.get('features_only', None): raise RuntimeError('features_only not implemented for Vision Transformer models.') model = build_model_with_cfg(Visformer, variant, pretrained, **kwargs) return model def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), 'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True, 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'stem.0', 'classifier': 'head', **kwargs } default_cfgs = generate_default_cfgs({ 'visformer_tiny.in1k': _cfg(hf_hub_id='timm/'), 'visformer_small.in1k': _cfg(hf_hub_id='timm/'), }) @register_model def visformer_tiny(pretrained=False, **kwargs) -> Visformer: model_cfg = dict( init_channels=16, embed_dim=192, depth=(7, 4, 4), num_heads=3, mlp_ratio=4., group=8, attn_stage='011', spatial_conv='100', norm_layer=nn.BatchNorm2d, conv_init=True, embed_norm=nn.BatchNorm2d) model = _create_visformer('visformer_tiny', pretrained=pretrained, **dict(model_cfg, **kwargs)) return model @register_model def visformer_small(pretrained=False, **kwargs) -> Visformer: model_cfg = dict( init_channels=32, embed_dim=384, depth=(7, 4, 4), num_heads=6, mlp_ratio=4., group=8, attn_stage='011', spatial_conv='100', norm_layer=nn.BatchNorm2d, conv_init=True, embed_norm=nn.BatchNorm2d) model = _create_visformer('visformer_small', pretrained=pretrained, **dict(model_cfg, **kwargs)) return model # @register_model # def visformer_net1(pretrained=False, **kwargs): # model = Visformer( # init_channels=None, embed_dim=384, depth=(0, 12, 0), num_heads=6, mlp_ratio=4., attn_stage='111', # spatial_conv='000', vit_stem=True, conv_init=True, **kwargs) # model.default_cfg = _cfg() # return model # # # @register_model # def visformer_net2(pretrained=False, **kwargs): # model = Visformer( # init_channels=32, embed_dim=384, depth=(0, 12, 0), num_heads=6, mlp_ratio=4., attn_stage='111', # spatial_conv='000', vit_stem=False, conv_init=True, **kwargs) # model.default_cfg = _cfg() # return model # # # @register_model # def visformer_net3(pretrained=False, **kwargs): # model = Visformer( # init_channels=32, embed_dim=384, depth=12, num_heads=6, mlp_ratio=4., attn_stage='111', # spatial_conv='000', vit_stem=False, conv_init=True, **kwargs) # model.default_cfg = _cfg() # return model # # # @register_model # def visformer_net4(pretrained=False, **kwargs): # model = Visformer( # init_channels=32, embed_dim=384, depth=12, num_heads=6, mlp_ratio=4., attn_stage='111', # spatial_conv='000', vit_stem=False, conv_init=True, **kwargs) # model.default_cfg = _cfg() # return model # # # @register_model # def visformer_net5(pretrained=False, **kwargs): # model = Visformer( # init_channels=32, embed_dim=384, depth=12, num_heads=6, mlp_ratio=4., group=1, attn_stage='111', # spatial_conv='111', vit_stem=False, conv_init=True, **kwargs) # model.default_cfg = _cfg() # return model # # # @register_model # def visformer_net6(pretrained=False, **kwargs): # model = Visformer( # init_channels=32, embed_dim=384, depth=12, num_heads=6, mlp_ratio=4., group=1, attn_stage='111', # pos_embed=False, spatial_conv='111', conv_init=True, **kwargs) # model.default_cfg = _cfg() # return model # # # @register_model # def visformer_net7(pretrained=False, **kwargs): # model = Visformer( # init_channels=32, embed_dim=384, depth=(6, 7, 7), num_heads=6, group=1, attn_stage='000', # pos_embed=False, spatial_conv='111', conv_init=True, **kwargs) # model.default_cfg = _cfg() # return model
pytorch-image-models/timm/models/visformer.py/0
{ "file_path": "pytorch-image-models/timm/models/visformer.py", "repo_id": "pytorch-image-models", "token_count": 10151 }
269
""" Adafactor Optimizer Lifted from https://github.com/pytorch/fairseq/blob/master/fairseq/optim/adafactor.py Modified by Ross Wightman to fix some issues with factorization dims for non nn.Linear layers Original header/copyright below. """ # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math from typing import Optional, Tuple import torch from ._types import ParamsT class Adafactor(torch.optim.Optimizer): """Implements Adafactor algorithm. This implementation is based on: `Adafactor: Adaptive Learning Rates with Sublinear Memory Cost` (see https://arxiv.org/abs/1804.04235) Note that this optimizer internally adjusts the learning rate depending on the *scale_parameter*, *relative_step* and *warmup_init* options. To use a manual (external) learning rate schedule you should set `scale_parameter=False` and `relative_step=False`. Ags: params: iterable of parameters to optimize or dicts defining parameter groups lr: external learning rate eps: regularization constants for square gradient and parameter scale respectively eps_scale: regularization constants for parameter scale respectively clip_threshold: threshold of root-mean-square of final gradient update decay_rate: coefficient used to compute running averages of square gradient beta1: coefficient used for computing running averages of gradient weight_decay: weight decay scale_parameter: if True, learning rate is scaled by root-mean-square of parameter warmup_init: time-dependent learning rate computation depends on whether warm-up initialization is being used """ def __init__( self, params: ParamsT, lr: Optional[float] = None, eps: float = 1e-30, eps_scale: float = 1e-3, clip_threshold: float = 1.0, decay_rate: float = -0.8, betas: Optional[Tuple[float, float]] = None, weight_decay: float = 0.0, scale_parameter: bool = True, warmup_init: bool = False, min_dim_size_to_factor: int = 16, caution: bool = False, ): relative_step = not lr if warmup_init and not relative_step: raise ValueError('warmup_init requires relative_step=True') beta1 = None if betas is None else betas[0] # make it compat with standard betas arg defaults = dict( lr=lr, eps=eps, eps_scale=eps_scale, clip_threshold=clip_threshold, decay_rate=decay_rate, beta1=beta1, weight_decay=weight_decay, scale_parameter=scale_parameter, relative_step=relative_step, warmup_init=warmup_init, min_dim_size_to_factor=min_dim_size_to_factor, caution=caution, ) super(Adafactor, self).__init__(params, defaults) def __setstate__(self, state): super().__setstate__(state) for group in self.param_groups: group.setdefault('caution', False) group.setdefault('min_dim_size_to_factor', 16) @staticmethod def _get_lr(param_group, param_state): if param_group['relative_step']: min_step = 1e-6 * param_state['step'] if param_group['warmup_init'] else 1e-2 lr_t = min(min_step, 1.0 / math.sqrt(param_state['step'])) param_scale = 1.0 if param_group['scale_parameter']: param_scale = max(param_group['eps_scale'], param_state['RMS']) param_group['lr'] = lr_t * param_scale return param_group['lr'] @staticmethod def _get_options(param_group, param_shape, min_size_to_factor=16): use_first_moment = param_group['beta1'] is not None factored = None ndim = len(param_shape) # Use a simple heuristic to pick factorization row & col, note other PyTorch impl tend to # always use -2, -1 BUT this will not pick correct dims for convolutions. This is a simple # approach that should work in most cases, compare to the slightly more involved approach # in AdafactorBigVision that sorts dims by size, please report if wrong dims chosen. if ndim > 2 and param_shape[0] > min_size_to_factor and param_shape[1] > min_size_to_factor: # nD convs in torch are ND + 2 dim weights with leading in/out chs factored = 0, 1 elif ndim >= 2 and param_shape[-2] > min_size_to_factor and param_shape[-1] > min_size_to_factor: # if the criteria above didn't match, test trailing dims for eligibility as per original impl factored = ndim - 2, ndim - 1 return factored, use_first_moment @staticmethod def _rms(tensor): return tensor.norm(2) / (tensor.numel() ** 0.5) def _approx_sq_grad(self, exp_avg_sq_row, exp_avg_sq_col, dim_col, dim_row): # from our dim heuristic, always dim_col < dim_row, so col reduction dim for factored row = dim_col r_factor = (exp_avg_sq_row / exp_avg_sq_row.mean(dim=dim_col, keepdim=True)).rsqrt_().unsqueeze(dim_row) c_factor = exp_avg_sq_col.unsqueeze(dim_col).rsqrt() return torch.mul(r_factor, c_factor) @torch.no_grad() def step(self, closure=None): """Performs a single optimization step. Arguments: closure (callable, optional): A closure that reevaluates the model and returns the loss. """ loss = None if closure is not None: with torch.enable_grad(): loss = closure() for group in self.param_groups: for p in group['params']: if p.grad is None: continue grad = p.grad if grad.dtype in {torch.float16, torch.bfloat16}: grad = grad.float() if grad.is_sparse: raise RuntimeError('Adafactor does not support sparse gradients.') state = self.state[p] factored_dims, use_first_moment = self._get_options( group, grad.shape, min_size_to_factor=group['min_dim_size_to_factor'], ) # State Initialization if len(state) == 0: state['step'] = 0 if use_first_moment: # Exponential moving average of gradient values state['exp_avg'] = torch.zeros_like(grad) if factored_dims is not None: dim_col, dim_row = factored_dims def _remove_dim(shape, dim): return shape[:dim] + shape[dim + 1:] state['exp_avg_sq_row'] = torch.zeros(_remove_dim(grad.shape, dim_row)).to(grad) state['exp_avg_sq_col'] = torch.zeros(_remove_dim(grad.shape, dim_col)).to(grad) else: state['exp_avg_sq'] = torch.zeros_like(grad) state['RMS'] = 0 else: if use_first_moment: state['exp_avg'] = state['exp_avg'].to(grad) if factored_dims is not None: state['exp_avg_sq_row'] = state['exp_avg_sq_row'].to(grad) state['exp_avg_sq_col'] = state['exp_avg_sq_col'].to(grad) else: state['exp_avg_sq'] = state['exp_avg_sq'].to(grad) p_fp32 = p if p.dtype in {torch.float16, torch.bfloat16}: p_fp32 = p_fp32.float() state['step'] += 1 state['RMS'] = self._rms(p_fp32) lr_t = self._get_lr(group, state) beta2t = 1.0 - math.pow(state['step'], group['decay_rate']) update = grad ** 2 + group['eps'] if factored_dims is not None: dim_col, dim_row = factored_dims exp_avg_sq_row = state['exp_avg_sq_row'] exp_avg_sq_col = state['exp_avg_sq_col'] exp_avg_sq_row.mul_(beta2t).add_(update.mean(dim=dim_row), alpha=1.0 - beta2t) exp_avg_sq_col.mul_(beta2t).add_(update.mean(dim=dim_col), alpha=1.0 - beta2t) # Approximation of exponential moving average of square of gradient update = self._approx_sq_grad(exp_avg_sq_row, exp_avg_sq_col, dim_col, dim_row) update.mul_(grad) else: exp_avg_sq = state['exp_avg_sq'] exp_avg_sq.mul_(beta2t).add_(update, alpha=1.0 - beta2t) update = exp_avg_sq.rsqrt().mul_(grad) update.div_((self._rms(update) / group['clip_threshold']).clamp_(min=1.0)) update.mul_(lr_t) if use_first_moment: exp_avg = state['exp_avg'] exp_avg.mul_(group['beta1']).add_(update, alpha=1 - group['beta1']) if group['caution']: # Apply caution as per 'Cautious Optimizers' - https://arxiv.org/abs/2411.16085 mask = (exp_avg * grad > 0).to(grad.dtype) mask.div_(mask.mean().clamp_(min=1e-3)) update = exp_avg * mask else: update = exp_avg if group['weight_decay'] != 0: p_fp32.add_(p_fp32, alpha=-group['weight_decay'] * lr_t) p_fp32.add_(-update) if p.dtype in {torch.float16, torch.bfloat16}: p.copy_(p_fp32) return loss
pytorch-image-models/timm/optim/adafactor.py/0
{ "file_path": "pytorch-image-models/timm/optim/adafactor.py", "repo_id": "pytorch-image-models", "token_count": 4921 }
270
""" NAdamW Optimizer Based on simplified algorithm in https://github.com/mlcommons/algorithmic-efficiency/tree/main/baselines/nadamw Added multi-tensor (foreach) path. References for added functionality: Cautious Optimizers: https://arxiv.org/abs/2411.16085 Why Gradients Rapidly Increase Near the End of Training: https://arxiv.org/abs/2506.02285 """ import math from typing import List, Optional, Tuple import torch from torch import Tensor from ._types import ParamsT # Modified from github.com/pytorch/pytorch/blob/v1.12.1/torch/optim/adamw.py. class NAdamW(torch.optim.Optimizer): """ Implements NAdamW algorithm. See Table 1 in https://arxiv.org/abs/1910.05446 for the implementation of the NAdam algorithm (there is also a comment in the code which highlights the only difference of NAdamW and AdamW). For further details regarding the algorithm we refer to - Decoupled Weight Decay Regularization: https://arxiv.org/abs/1711.05101 - On the Convergence of Adam and Beyond: https://openreview.net/forum?id=ryQu7f-RZ Args: params: iterable of parameters to optimize or dicts defining parameter groups lr: learning rate betas: coefficients used for computing running averages of gradient and its square eps: term added to the denominator to improve numerical stability weight_decay: weight decay coefficient caution: enable caution corrected_weight_decay: apply corrected weight decay (lr**2 / max_lr) """ def __init__( self, params: ParamsT, lr: float = 1e-3, betas: Tuple[float, float] = (0.9, 0.999), eps: float = 1e-8, weight_decay: float = 1e-2, caution: bool = False, corrected_weight_decay: bool = False, maximize: bool = False, foreach: Optional[bool] = None, capturable: bool = False, ): if not 0.0 <= lr: raise ValueError(f'Invalid learning rate: {lr}') if not 0.0 <= eps: raise ValueError(f'Invalid epsilon value: {eps}') if not 0.0 <= betas[0] < 1.0: raise ValueError(f'Invalid beta parameter at index 0: {betas[0]}') if not 0.0 <= betas[1] < 1.0: raise ValueError(f'Invalid beta parameter at index 1: {betas[1]}') if not 0.0 <= weight_decay: raise ValueError(f'Invalid weight_decay value: {weight_decay}') defaults = dict( lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, caution=caution, corrected_weight_decay=corrected_weight_decay, foreach=foreach, maximize=maximize, capturable=capturable, ) super().__init__(params, defaults) def __setstate__(self, state): super().__setstate__(state) state_values = list(self.state.values()) step_is_tensor = (len(state_values) != 0) and torch.is_tensor(state_values[0]['step']) if not step_is_tensor: for s in state_values: s['step'] = torch.tensor(float(s['step'])) for group in self.param_groups: group.setdefault('caution', False) group.setdefault('corrected_weight_decay', False) @torch.no_grad() def step(self, closure=None): """Performs a single optimization step. Args: closure (callable, optional): A closure that reevaluates the model and returns the loss. """ self._cuda_graph_capture_health_check() loss = None if closure is not None: with torch.enable_grad(): loss = closure() for group in self.param_groups: params_with_grad = [] grads = [] exp_avgs = [] exp_avg_sqs = [] state_steps = [] beta1, beta2 = group['betas'] for p in group['params']: if p.grad is None: continue params_with_grad.append(p) if p.grad.is_sparse: raise RuntimeError('NAdamW does not support sparse gradients') grads.append(p.grad) state = self.state[p] # State initialization if len(state) == 0: state['step'] = torch.tensor(0.) # Exponential moving average of gradient values state['exp_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format) # Exponential moving average of squared gradient values state['exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format) exp_avgs.append(state['exp_avg']) exp_avg_sqs.append(state['exp_avg_sq']) state_steps.append(state['step']) nadamw( params_with_grad, grads, exp_avgs, exp_avg_sqs, state_steps, beta1=beta1, beta2=beta2, lr=group['lr'], weight_decay=group['weight_decay'], eps=group['eps'], caution=group['caution'], maximize=group['maximize'], capturable=group['capturable'], max_lr=self.defaults['lr'] if group['corrected_weight_decay'] else None, ) return loss def nadamw( params: List[Tensor], grads: List[Tensor], exp_avgs: List[Tensor], exp_avg_sqs: List[Tensor], state_steps: List[Tensor], foreach: Optional[bool] = None, capturable: bool = False, *, beta1: float, beta2: float, lr: float, weight_decay: float, eps: float, caution: bool, maximize: bool, max_lr: float, ) -> None: r"""Functional API that performs NAdamW algorithm computation. See NAdamW class for details. """ if not all(isinstance(t, torch.Tensor) for t in state_steps): raise RuntimeError( 'API has changed, `state_steps` argument must contain a list of' + ' singleton tensors') if foreach is None: try: # cannot do foreach if this overload doesn't exist when caution enabled foreach = not caution or 'Scalar' in torch.ops.aten._foreach_maximum_.overloads() except: foreach = False if foreach and not torch.jit.is_scripting(): func = _multi_tensor_nadamw else: func = _single_tensor_nadamw func( params, grads, exp_avgs, exp_avg_sqs, state_steps, beta1=beta1, beta2=beta2, lr=lr, weight_decay=weight_decay, eps=eps, caution=caution, maximize=maximize, capturable=capturable, max_lr=max_lr, ) def _single_tensor_nadamw( params: List[Tensor], grads: List[Tensor], exp_avgs: List[Tensor], exp_avg_sqs: List[Tensor], state_steps: List[Tensor], *, beta1: float, beta2: float, lr: float, weight_decay: float, eps: float, caution: bool, maximize: bool, capturable: bool, max_lr: Optional[float], ): for i, param in enumerate(params): grad = grads[i] if not maximize else -grads[i] exp_avg = exp_avgs[i] exp_avg_sq = exp_avg_sqs[i] step_t = state_steps[i] # Update step. step_t += 1 # Perform stepweight decay. wd_scale = lr if max_lr is None else lr ** 2 / max_lr param.mul_(1. - wd_scale * weight_decay) # Decay the first and second moment running average coefficient. exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1) exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2) if capturable: step = step_t # 1 - beta1 ** step can't be captured in a CUDA graph, even if step is a CUDA tensor # (incurs "RuntimeError: CUDA error: operation not permitted when stream is capturing") bias_correction1 = 1 - torch.pow(beta1, step) bias_correction2 = 1 - torch.pow(beta2, step) step_size = lr / bias_correction1 step_size_neg = step_size.neg() bias_correction2_sqrt = bias_correction2.sqrt() # Only difference between NAdamW and AdamW in this implementation. # The official PyTorch implementation of NAdam uses a different algorithm. exp_avg = exp_avg.mul(beta1).add_(grad, alpha=1 - beta1) denom = (exp_avg_sq.sqrt() / (bias_correction2_sqrt * step_size_neg)).add_(eps / step_size_neg) if caution: # Apply caution as per 'Cautious Optimizers' - https://arxiv.org/abs/2411.16085 # FIXME not 100% sure if this remains capturable? mask = (exp_avg * grad > 0).to(grad.dtype) mask.div_(mask.mean().clamp_(min=1e-3)) exp_avg.mul_(mask) param.addcdiv_(exp_avg, denom) else: step = step_t.item() bias_correction1 = 1 - beta1 ** step bias_correction2 = 1 - beta2 ** step step_size = lr / bias_correction1 bias_correction2_sqrt = math.sqrt(bias_correction2) # Apply Nesterov. Only difference between NAdamW and AdamW in this implementation. # The official PyTorch implementation of NAdam uses a different algorithm. exp_avg = exp_avg.mul(beta1).add_(grad, alpha=1 - beta1) denom = (exp_avg_sq.sqrt() / bias_correction2_sqrt).add_(eps) if caution: # Apply caution as per 'Cautious Optimizers' - https://arxiv.org/abs/2411.16085 mask = (exp_avg * grad > 0).to(grad.dtype) mask.div_(mask.mean().clamp_(min=1e-3)) exp_avg.mul_(mask) param.addcdiv_(exp_avg, denom, value=-step_size) def _multi_tensor_nadamw( params: List[Tensor], grads: List[Tensor], exp_avgs: List[Tensor], exp_avg_sqs: List[Tensor], state_steps: List[Tensor], *, beta1: float, beta2: float, lr: float, weight_decay: float, eps: float, caution: bool, maximize: bool, capturable: bool, max_lr: Optional[float], ): if len(params) == 0: return if capturable: assert all( p.is_cuda and step.is_cuda for p, step in zip(params, state_steps) ), "If capturable=True, params and state_steps must be CUDA tensors." if maximize: grads = torch._foreach_neg(tuple(grads)) # type: ignore[assignment] grads = [torch.view_as_real(x) if torch.is_complex(x) else x for x in grads] exp_avgs = [torch.view_as_real(x) if torch.is_complex(x) else x for x in exp_avgs] exp_avg_sqs = [torch.view_as_real(x) if torch.is_complex(x) else x for x in exp_avg_sqs] params = [torch.view_as_real(x) if torch.is_complex(x) else x for x in params] # update steps torch._foreach_add_(state_steps, 1) # Perform stepweight decay wd_scale = lr if max_lr is None else lr ** 2 / max_lr torch._foreach_mul_(params, 1 - wd_scale * weight_decay) # Decay the first and second moment running average coefficient torch._foreach_mul_(exp_avgs, beta1) torch._foreach_add_(exp_avgs, grads, alpha=1 - beta1) torch._foreach_mul_(exp_avg_sqs, beta2) torch._foreach_addcmul_(exp_avg_sqs, grads, grads, 1 - beta2) if capturable: # TODO: use foreach_pow if/when foreach_pow is added bias_correction1 = [torch.pow(beta1, step) for step in state_steps] bias_correction2 = [torch.pow(beta2, step) for step in state_steps] # foreach_sub doesn't allow a scalar as the first arg torch._foreach_sub_(bias_correction1, 1) torch._foreach_sub_(bias_correction2, 1) torch._foreach_neg_(bias_correction1) torch._foreach_neg_(bias_correction2) # foreach_div doesn't allow a scalar as the first arg step_size = torch._foreach_div(bias_correction1, lr) torch._foreach_reciprocal_(step_size) torch._foreach_neg_(step_size) bias_correction2_sqrt = torch._foreach_sqrt(bias_correction2) # Only difference between NAdamW and AdamW in this implementation. # The official PyTorch implementation of NAdam uses a different algorithm. exp_avgs = torch._foreach_mul(exp_avgs, beta1) torch._foreach_add_(exp_avgs, grads, alpha=1 - beta1) exp_avg_sq_sqrt = torch._foreach_sqrt(exp_avg_sqs) torch._foreach_div_( exp_avg_sq_sqrt, torch._foreach_mul(bias_correction2_sqrt, step_size) ) eps_over_step_size = torch._foreach_div(step_size, eps) torch._foreach_reciprocal_(eps_over_step_size) denom = torch._foreach_add(exp_avg_sq_sqrt, eps_over_step_size) if caution: # Apply caution as per 'Cautious Optimizers' - https://arxiv.org/abs/2411.16085 masks = torch._foreach_mul(exp_avgs, grads) masks = [(m > 0).to(g.dtype) for m, g in zip(masks, grads)] # capturable? mask_scale = [m.mean() for m in masks] torch._foreach_maximum_(mask_scale, 1e-3) #torch._foreach_clamp_min_(mask_scale, 1e-3) torch._foreach_div_(masks, mask_scale) torch._foreach_mul_(exp_avgs, masks) torch._foreach_addcdiv_(params, exp_avgs, denom) else: bias_correction1 = [1 - beta1 ** step.item() for step in state_steps] bias_correction2 = [1 - beta2 ** step.item() for step in state_steps] step_size = [(lr / bc) * -1 for bc in bias_correction1] bias_correction2_sqrt = [math.sqrt(bc) for bc in bias_correction2] # Apply Nesterov. Only difference between NAdamW and AdamW in this implementation. # The official PyTorch implementation of NAdam uses a different algorithm. exp_avgs = torch._foreach_mul(exp_avgs, beta1) torch._foreach_add_(exp_avgs, grads, alpha=1 - beta1) exp_avg_sq_sqrt = torch._foreach_sqrt(exp_avg_sqs) torch._foreach_div_(exp_avg_sq_sqrt, bias_correction2_sqrt) denom = torch._foreach_add(exp_avg_sq_sqrt, eps) if caution: # Apply caution as per 'Cautious Optimizers' - https://arxiv.org/abs/2411.16085 masks = torch._foreach_mul(exp_avgs, grads) masks = [(m > 0).to(g.dtype) for m, g in zip(masks, grads)] mask_scale = [m.mean() for m in masks] torch._foreach_maximum_(mask_scale, 1e-3) #torch._foreach_clamp_min_(mask_scale, 1e-3) torch._foreach_div_(masks, mask_scale) torch._foreach_mul_(exp_avgs, masks) torch._foreach_addcdiv_(params, exp_avgs, denom, step_size)
pytorch-image-models/timm/optim/nadamw.py/0
{ "file_path": "pytorch-image-models/timm/optim/nadamw.py", "repo_id": "pytorch-image-models", "token_count": 7305 }
271
""" TanH Scheduler TanH schedule with warmup, cycle/restarts, noise. Hacked together by / Copyright 2021 Ross Wightman """ import logging import math import numpy as np import torch from typing import List from .scheduler import Scheduler _logger = logging.getLogger(__name__) class TanhLRScheduler(Scheduler): """ Hyberbolic-Tangent decay with restarts. This is described in the paper https://arxiv.org/abs/1806.01593 """ def __init__( self, optimizer: torch.optim.Optimizer, t_initial: int, lb: float = -7., ub: float = 3., lr_min: float = 0., cycle_mul: float = 1., cycle_decay: float = 1., cycle_limit: int = 1, warmup_t=0, warmup_lr_init=0, warmup_prefix=False, t_in_epochs=True, noise_range_t=None, noise_pct=0.67, noise_std=1.0, noise_seed=42, initialize=True, ) -> None: super().__init__( optimizer, param_group_field="lr", t_in_epochs=t_in_epochs, noise_range_t=noise_range_t, noise_pct=noise_pct, noise_std=noise_std, noise_seed=noise_seed, initialize=initialize, ) assert t_initial > 0 assert lr_min >= 0 assert lb < ub assert cycle_limit >= 0 assert warmup_t >= 0 assert warmup_lr_init >= 0 self.lb = lb self.ub = ub self.t_initial = t_initial self.lr_min = lr_min self.cycle_mul = cycle_mul self.cycle_decay = cycle_decay self.cycle_limit = cycle_limit self.warmup_t = warmup_t self.warmup_lr_init = warmup_lr_init self.warmup_prefix = warmup_prefix if self.warmup_t: t_v = self.base_values if self.warmup_prefix else self._get_lr(self.warmup_t) self.warmup_steps = [(v - warmup_lr_init) / self.warmup_t for v in t_v] super().update_groups(self.warmup_lr_init) else: self.warmup_steps = [1 for _ in self.base_values] def _get_lr(self, t: int) -> List[float]: if t < self.warmup_t: lrs = [self.warmup_lr_init + t * s for s in self.warmup_steps] else: if self.warmup_prefix: t = t - self.warmup_t if self.cycle_mul != 1: i = math.floor(math.log(1 - t / self.t_initial * (1 - self.cycle_mul), self.cycle_mul)) t_i = self.cycle_mul ** i * self.t_initial t_curr = t - (1 - self.cycle_mul ** i) / (1 - self.cycle_mul) * self.t_initial else: i = t // self.t_initial t_i = self.t_initial t_curr = t - (self.t_initial * i) if i < self.cycle_limit: gamma = self.cycle_decay ** i lr_max_values = [v * gamma for v in self.base_values] tr = t_curr / t_i lrs = [ self.lr_min + 0.5 * (lr_max - self.lr_min) * (1 - math.tanh(self.lb * (1. - tr) + self.ub * tr)) for lr_max in lr_max_values ] else: lrs = [self.lr_min for _ in self.base_values] return lrs def get_cycle_length(self, cycles=0): cycles = max(1, cycles or self.cycle_limit) if self.cycle_mul == 1.0: t = self.t_initial * cycles else: t = int(math.floor(-self.t_initial * (self.cycle_mul ** cycles - 1) / (1 - self.cycle_mul))) return t + self.warmup_t if self.warmup_prefix else t
pytorch-image-models/timm/scheduler/tanh_lr.py/0
{ "file_path": "pytorch-image-models/timm/scheduler/tanh_lr.py", "repo_id": "pytorch-image-models", "token_count": 2000 }
272
import random import numpy as np import torch def random_seed(seed=42, rank=0): torch.manual_seed(seed + rank) np.random.seed(seed + rank) random.seed(seed + rank)
pytorch-image-models/timm/utils/random.py/0
{ "file_path": "pytorch-image-models/timm/utils/random.py", "repo_id": "pytorch-image-models", "token_count": 68 }
273
.PHONY: quality style test docs check_dirs := examples src tests # Check code quality of the source code quality: ruff check $(check_dirs) ruff format --check $(check_dirs) # Format source code automatically style: ruff check $(check_dirs) --fix ruff format $(check_dirs) # Run smolagents tests test: pytest ./tests/
smolagents/Makefile/0
{ "file_path": "smolagents/Makefile", "repo_id": "smolagents", "token_count": 104 }
274
# `smolagents` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/smolagents/license_to_call.png" style="max-width:700px"/> </div> ## What is smolagents? `smolagents` is an open-source Python library designed to make it extremely easy to build and run agents using just a few lines of code. Key features of `smolagents` include: ✨ **Simplicity**: The logic for agents fits in ~thousand lines of code. We kept abstractions to their minimal shape above raw code! 🧑‍💻 **First-class support for Code Agents**: [`CodeAgent`](reference/agents#smolagents.CodeAgent) writes its actions in code (as opposed to "agents being used to write code") to invoke tools or perform computations, enabling natural composability (function nesting, loops, conditionals). To make it secure, we support [executing in sandboxed environment](tutorials/secure_code_execution) via [E2B](https://e2b.dev/) or via Docker. 📡 **Common Tool-Calling Agent Support**: In addition to CodeAgents, [`ToolCallingAgent`](reference/agents#smolagents.ToolCallingAgent) supports usual JSON/text-based tool-calling for scenarios where that paradigm is preferred. 🤗 **Hub integrations**: Seamlessly share and load agents and tools to/from the Hub as Gradio Spaces. 🌐 **Model-agnostic**: Easily integrate any large language model (LLM), whether it's hosted on the Hub via [Inference providers](https://huggingface.co/docs/inference-providers/index), accessed via APIs such as OpenAI, Anthropic, or many others via LiteLLM integration, or run locally using Transformers or Ollama. Powering an agent with your preferred LLM is straightforward and flexible. 👁️ **Modality-agnostic**: Beyond text, agents can handle vision, video, and audio inputs, broadening the range of possible applications. Check out [this tutorial](examples/web_browser) for vision. 🛠️ **Tool-agnostic**: You can use tools from any [MCP server](reference/tools#smolagents.ToolCollection.from_mcp), from [LangChain](reference/tools#smolagents.Tool.from_langchain), you can even use a [Hub Space](reference/tools#smolagents.Tool.from_space) as a tool. 💻 **CLI Tools**: Comes with command-line utilities (smolagent, webagent) for quickly running agents without writing boilerplate code. ## Quickstart [[open-in-colab]] Get started with smolagents in just a few minutes! This guide will show you how to create and run your first agent. ### Installation Install smolagents with pip: ```bash pip install smolagents[toolkit] # Includes default tools like web search ``` ### Create Your First Agent Here's a minimal example to create and run an agent: ```python from smolagents import CodeAgent, InferenceClientModel # Initialize a model (using Hugging Face Inference API) model = InferenceClientModel() # Uses a default model # Create an agent with no tools agent = CodeAgent(tools=[], model=model) # Run the agent with a task result = agent.run("Calculate the sum of numbers from 1 to 10") print(result) ``` That's it! Your agent will use Python code to solve the task and return the result. ### Adding Tools Let's make our agent more capable by adding some tools: ```python from smolagents import CodeAgent, InferenceClientModel, DuckDuckGoSearchTool model = InferenceClientModel() agent = CodeAgent( tools=[DuckDuckGoSearchTool()], model=model, ) # Now the agent can search the web! result = agent.run("What is the current weather in Paris?") print(result) ``` ### Using Different Models You can use various models with your agent: ```python # Using a specific model from Hugging Face model = InferenceClientModel(model_id="meta-llama/Llama-2-70b-chat-hf") # Using OpenAI/Anthropic (requires smolagents[litellm]) from smolagents import LiteLLMModel model = LiteLLMModel(model_id="gpt-4") # Using local models (requires smolagents[transformers]) from smolagents import TransformersModel model = TransformersModel(model_id="meta-llama/Llama-2-7b-chat-hf") ``` ## Next Steps - Learn how to set up smolagents with various models and tools in the [Installation Guide](installation) - Check out the [Guided Tour](guided_tour) for more advanced features - Learn about [building custom tools](tutorials/tools) - Explore [secure code execution](tutorials/secure_code_execution) - See how to create [multi-agent systems](tutorials/building_good_agents) <div class="mt-10"> <div class="w-full flex flex-col space-y-4 md:space-y-0 md:grid md:grid-cols-2 md:gap-y-4 md:gap-x-5"> <a class="!no-underline border dark:border-gray-700 p-5 rounded-lg shadow hover:shadow-lg" href="./guided_tour" ><div class="w-full text-center bg-gradient-to-br from-blue-400 to-blue-500 rounded-lg py-1.5 font-semibold mb-5 text-white text-lg leading-relaxed">Guided tour</div> <p class="text-gray-700">Learn the basics and become familiar with using Agents. Start here if you are using Agents for the first time!</p> </a> <a class="!no-underline border dark:border-gray-700 p-5 rounded-lg shadow hover:shadow-lg" href="./examples/text_to_sql" ><div class="w-full text-center bg-gradient-to-br from-indigo-400 to-indigo-500 rounded-lg py-1.5 font-semibold mb-5 text-white text-lg leading-relaxed">How-to guides</div> <p class="text-gray-700">Practical guides to help you achieve a specific goal: create an agent to generate and test SQL queries!</p> </a> <a class="!no-underline border dark:border-gray-700 p-5 rounded-lg shadow hover:shadow-lg" href="./conceptual_guides/intro_agents" ><div class="w-full text-center bg-gradient-to-br from-pink-400 to-pink-500 rounded-lg py-1.5 font-semibold mb-5 text-white text-lg leading-relaxed">Conceptual guides</div> <p class="text-gray-700">High-level explanations for building a better understanding of important topics.</p> </a> <a class="!no-underline border dark:border-gray-700 p-5 rounded-lg shadow hover:shadow-lg" href="./tutorials/building_good_agents" ><div class="w-full text-center bg-gradient-to-br from-purple-400 to-purple-500 rounded-lg py-1.5 font-semibold mb-5 text-white text-lg leading-relaxed">Tutorials</div> <p class="text-gray-700">Horizontal tutorials that cover important aspects of building agents.</p> </a> </div> </div>
smolagents/docs/source/en/index.md/0
{ "file_path": "smolagents/docs/source/en/index.md", "repo_id": "smolagents", "token_count": 1993 }
275
# एजेंटिक RAG [[open-in-colab]] रिट्रीवल-ऑगमेंटेड-जनरेशन (RAG) है "एक यूजर के प्रश्न का उत्तर देने के लिए LLM का उपयोग करना, लेकिन उत्तर को एक नॉलेज बेस से प्राप्त जानकारी पर आधारित करना"। इसमें वैनिला या फाइन-ट्यून्ड LLM का उपयोग करने की तुलना में कई फायदे हैं: कुछ नाम लेने के लिए, यह उत्तर को सत्य तथ्यों पर आधारित करने और काल्पनिक बातों को कम करने की अनुमति देता है, यह LLM को डोमेन-विशिष्ट ज्ञान प्रदान करने की अनुमति देता है, और यह नॉलेज बेस से जानकारी तक पहुंच का सूक्ष्म नियंत्रण प्रदान करता है। लेकिन वैनिला RAG की सीमाएं हैं, सबसे महत्वपूर्ण ये दो: - यह केवल एक रिट्रीवल स्टेप करता है: यदि परिणाम खराब हैं, तो जनरेशन भी बदले में खराब होगा। - सिमेंटिक समानता की गणना यूजर के प्रश्न को संदर्भ के रूप में करके की जाती है, जो अनुकूल नहीं हो सकती: उदाहरण के लिए, यूजर का प्रश्न अक्सर एक सवाल होगा, जबकि सही उत्तर देने वाला डॉक्यूमेंट सकारात्मक स्वर में हो सकता है, और इसका समानता स्कोर अन्य स्रोत दस्तावेज़ों की तुलना में कम हो सकता है, जो प्रश्नवाचक स्वर में हो सकते हैं। इससे संबंधित जानकारी को चूकने का जोखिम होता है। हम एक RAG एजेंट बनाकर इन समस्याओं को कम कर सकते हैं: बहुत सरल तरीके से, एक रिट्रीवर टूल से लैस एजेंट! यह एजेंट करेगा: ✅ स्वयं क्वेरी तैयार करेगा और ✅ आवश्यकता पड़ने पर पुनः-प्राप्ति के लिए समीक्षा करेगा। इसलिए यह सहज रूप से कुछ उन्नत RAG तकनीकों को प्राप्त कर लेना चाहिए! - सिमेंटिक खोज में सीधे यूजर क्वेरी का संदर्भ के रूप में उपयोग करने के बजाय, एजेंट स्वयं एक संदर्भ वाक्य तैयार करता है जो लक्षित डॉक्यूमेंट्स के करीब हो सकता है, जैसा कि [HyDE](https://huggingface.co/papers/2212.10496) में किया गया है। एजेंट जनरेट किए गए स्निपेट्स का उपयोग कर सकता है और आवश्यकता पड़ने पर पुनः-प्राप्ति कर सकता है, जैसा कि [Self-Query](https://docs.llamaindex.ai/en/stable/examples/evaluation/RetryQuery/) में किया गया है। चलिए इस सिस्टम को बनाते हैं। 🛠️ आवश्यक डिपेंडेंसी इंस्टॉल करने के लिए नीचे दी गई लाइन चलाएं। ```bash !pip install smolagents pandas langchain langchain-community sentence-transformers rank_bm25 --upgrade -q ``` HF Inference API को कॉल करने के लिए, आपको अपने एनवायरनमेंट वेरिएबल `HF_TOKEN` के रूप में एक वैध टोकन की आवश्यकता होगी। हम इसे लोड करने के लिए python-dotenv का उपयोग करते हैं। ```py from dotenv import load_dotenv load_dotenv() ``` हम पहले एक नॉलेज बेस लोड करते हैं जिस पर हम RAG को लागू करना चाहते हैं: यह डेटा सेट Hugging Face के कई लाइब्रेरी के डॉक्यूमेंट पृष्ठों का संकलन है, जिन्हें Markdown में स्टोर किया गया है। हम केवल `transformers` लाइब्रेरी के दस्तावेज़ों को रखेंगे। फिर डेटासेट को प्रोसेस करके और इसे एक वेक्टर डेटाबेस में स्टोर करके नॉलेज बेस तैयार करें जिसे रिट्रीवर द्वारा उपयोग किया जाएगा। हम [LangChain](https://python.langchain.com/docs/introduction/) का उपयोग करते हैं क्योंकि इसमें उत्कृष्ट वेक्टर डेटाबेस उपयोगिताएं हैं। ```py import datasets from langchain.docstore.document import Document from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain_community.retrievers import BM25Retriever knowledge_base = datasets.load_dataset("m-ric/huggingface_doc", split="train") knowledge_base = knowledge_base.filter(lambda row: row["source"].startswith("huggingface/transformers")) source_docs = [ Document(page_content=doc["text"], metadata={"source": doc["source"].split("/")[1]}) for doc in knowledge_base ] text_splitter = RecursiveCharacterTextSplitter( chunk_size=500, chunk_overlap=50, add_start_index=True, strip_whitespace=True, separators=["\n\n", "\n", ".", " ", ""], ) docs_processed = text_splitter.split_documents(source_docs) ``` अब डॉक्यूमेंट्स तैयार हैं। तो चलिए अपना एजेंटिक RAG सिस्टम बनाएं! 👉 हमें केवल एक RetrieverTool की आवश्यकता है जिसका उपयोग हमारा एजेंट नॉलेज बेस से जानकारी प्राप्त करने के लिए कर सकता है। चूंकि हमें टूल के एट्रीब्यूट के रूप में एक vectordb जोड़ने की आवश्यकता है, हम सरल टूल कंस्ट्रक्टर को `@tool` डेकोरेटर के साथ सीधे उपयोग नहीं कर सकते: इसलिए हम [tools tutorial](../tutorials/tools) में हाइलाइट किए गए सेटअप का पालन करेंगे। ```py from smolagents import Tool class RetrieverTool(Tool): name = "retriever" description = "Uses semantic search to retrieve the parts of transformers documentation that could be most relevant to answer your query." inputs = { "query": { "type": "string", "description": "The query to perform. This should be semantically close to your target documents. Use the affirmative form rather than a question.", } } output_type = "string" def __init__(self, docs, **kwargs): super().__init__(**kwargs) self.retriever = BM25Retriever.from_documents( docs, k=10 ) def forward(self, query: str) -> str: assert isinstance(query, str), "Your search query must be a string" docs = self.retriever.invoke( query, ) return "\nRetrieved documents:\n" + "".join( [ f"\n\n===== Document {str(i)} =====\n" + doc.page_content for i, doc in enumerate(docs) ] ) retriever_tool = RetrieverTool(docs_processed) ``` हमने BM25 का उपयोग किया है, जो एक क्लासिक रिट्रीवल विधि है, क्योंकि इसे सेटअप करना बहुत आसान है। रिट्रीवल सटीकता में सुधार करने के लिए, आप BM25 को डॉक्यूमेंट्स के लिए वेक्टर प्रतिनिधित्व का उपयोग करके सिमेंटिक खोज से बदल सकते हैं: इस प्रकार आप एक अच्छा एम्बेडिंग मॉडल चुनने के लिए [MTEB Leaderboard](https://huggingface.co/spaces/mteb/leaderboard) पर जा सकते हैं। अब यह सीधा है कि एक एजेंट बनाया जाए जो इस `retriever_tool` का उपयोग करेगा! एजेंट को इनिशियलाइजेशन पर इन आर्गुमेंट्स की आवश्यकता होगी: - `tools`: टूल्स की एक सूची जिन्हें एजेंट कॉल कर सकेगा। - `model`: LLM जो एजेंट को पावर देता है। हमारा `model` एक कॉलेबल होना चाहिए जो इनपुट के रूप में संदेशों की एक सूची लेता है और टेक्स्ट लौटाता है। इसे एक stop_sequences आर्गुमेंट भी स्वीकार करने की आवश्यकता है जो बताता है कि जनरेशन कब रोकनी है। सुविधा के लिए, हम सीधे पैकेज में प्रदान की गई HfEngine क्लास का उपयोग करते हैं ताकि एक LLM इंजन मिल सके जो Hugging Face के Inference API को कॉल करता है। और हम [meta-llama/Llama-3.3-70B-Instruct](meta-llama/Llama-3.3-70B-Instruct) का उपयोग llm इंजन के रूप में करते हैं क्योंकि: - इसमें लंबा 128k कॉन्टेक्स्ट है, जो लंबे स्रोत दस्तावेजों को प्रोसेस करने में मददगार है - यह हर समय HF के Inference API पर मुफ्त में उपलब्ध है! _नोट:_ Inference API विभिन्न मानदंडों के आधार पर मॉडल होस्ट करता है, और डिप्लॉय किए गए मॉडल बिना पूर्व सूचना के अपडेट या बदले जा सकते हैं। इसके बारे में अधिक जानें [यहां](https://huggingface.co/docs/api-inference/supported-models) पढ़ें। ```py from smolagents import InferenceClientModel, CodeAgent agent = CodeAgent( tools=[retriever_tool], model=InferenceClientModel(model_id="meta-llama/Llama-3.3-70B-Instruct"), max_steps=4, verbosity_level=2 ) ``` CodeAgent को इनिशियलाइज करने पर, इसे स्वचालित रूप से एक डिफ़ॉल्ट सिस्टम प्रॉम्प्ट दिया गया है जो LLM इंजन को चरण-दर-चरण प्रोसेस करने और कोड स्निपेट्स के रूप में टूल कॉल जनरेट करने के लिए कहता है, लेकिन आप आवश्यकतानुसार इस प्रॉम्प्ट टेम्पलेट को अपने से बदल सकते हैं। जब CodeAgent का `.run()` मेथड लॉन्च किया जाता है, तो एजेंट LLM इंजन को कॉल करने का कार्य करता है, और टूल कॉल्स को निष्पादित करता है, यह सब एक लूप में होता है, जो तब तक चलता है जब तक टूल final_answer के साथ अंतिम उत्तर के रूप में नहीं बुलाया जाता। ```py agent_output = agent.run("For a transformers model training, which is slower, the forward or the backward pass?") print("Final output:") print(agent_output) ```
smolagents/docs/source/hi/examples/rag.md/0
{ "file_path": "smolagents/docs/source/hi/examples/rag.md", "repo_id": "smolagents", "token_count": 7604 }
276
- title: 起步 sections: - local: index title: 🤗 Agents - local: guided_tour title: 导览 - title: Tutorials sections: - local: tutorials/building_good_agents title: ✨ 构建好用的 agents - local: tutorials/inspect_runs title: 📊 监控 Agent 的运行 - local: tutorials/tools title: 🛠️ 工具 - 深度指南 - local: tutorials/secure_code_execution title: 🛡️ 使用 E2B 保护你的代码执行 - local: tutorials/memory title: 📚 管理 Agent 的记忆 - title: Conceptual guides sections: - local: conceptual_guides/intro_agents title: 🤖 Agent 化系统介绍 - local: conceptual_guides/react title: 🤔 多步骤 Agent 是如何工作的? - title: Examples sections: - local: examples/text_to_sql title: 自我修正 Text-to-SQL - local: examples/rag title: 借助 agentic RAG 掌控知识库 - local: examples/multiagents title: 编排 multi-agent 系统 - local: examples/web_browser title: 基于视觉模型构建能够浏览网页的agent - title: Reference sections: - local: reference/agents title: Agent-related objects - local: reference/models title: Model-related objects - local: reference/tools title: Tool-related objects
smolagents/docs/source/zh/_toctree.yml/0
{ "file_path": "smolagents/docs/source/zh/_toctree.yml", "repo_id": "smolagents", "token_count": 555 }
277
# 工具 [[open-in-colab]] 在这里,我们将学习高级工具的使用。 > [!TIP] > 如果你是构建 agent 的新手,请确保先阅读 [agent 介绍](../conceptual_guides/intro_agents) 和 [smolagents 导览](../guided_tour)。 - [工具](#工具) - [什么是工具,如何构建一个工具?](#什么是工具如何构建一个工具) - [将你的工具分享到 Hub](#将你的工具分享到-hub) - [将 Space 导入为工具](#将-space-导入为工具) - [使用 LangChain 工具](#使用-langchain-工具) - [管理你的 agent 工具箱](#管理你的-agent-工具箱) - [使用工具集合](#使用工具集合) ### 什么是工具,如何构建一个工具? 工具主要是 LLM 可以在 agent 系统中使用的函数。 但要使用它,LLM 需要被提供一个 API:名称、工具描述、输入类型和描述、输出类型。 所以它不能仅仅是一个函数。它应该是一个类。 因此,核心上,工具是一个类,它包装了一个函数,并带有帮助 LLM 理解如何使用它的元数据。 以下是它的结构: ```python from smolagents import Tool class HFModelDownloadsTool(Tool): name = "model_download_counter" description = """ This is a tool that returns the most downloaded model of a given task on the Hugging Face Hub. It returns the name of the checkpoint.""" inputs = { "task": { "type": "string", "description": "the task category (such as text-classification, depth-estimation, etc)", } } output_type = "string" def forward(self, task: str): from huggingface_hub import list_models model = next(iter(list_models(filter=task, sort="downloads", direction=-1))) return model.id model_downloads_tool = HFModelDownloadsTool() ``` 自定义工具继承 [`Tool`] 以继承有用的方法。子类还定义了: - 一个属性 `name`,对应于工具本身的名称。名称通常描述工具的功能。由于代码返回任务中下载量最多的模型,我们将其命名为 `model_download_counter`。 - 一个属性 `description`,用于填充 agent 的系统提示。 - 一个 `inputs` 属性,它是一个带有键 `"type"` 和 `"description"` 的字典。它包含帮助 Python 解释器对输入做出明智选择的信息。 - 一个 `output_type` 属性,指定输出类型。`inputs` 和 `output_type` 的类型应为 [Pydantic 格式](https://docs.pydantic.dev/latest/concepts/json_schema/#generating-json-schema),它们可以是以下之一:[`~AUTHORIZED_TYPES`]。 - 一个 `forward` 方法,包含要执行的推理代码。 这就是它在 agent 中使用所需的全部内容! 还有另一种构建工具的方法。在 [guided_tour](../guided_tour) 中,我们使用 `@tool` 装饰器实现了一个工具。[`tool`] 装饰器是定义简单工具的推荐方式,但有时你需要更多:在类中使用多个方法以获得更清晰的代码,或使用额外的类属性。 在这种情况下,你可以通过如上所述继承 [`Tool`] 来构建你的工具。 ### 将你的工具分享到 Hub 你可以通过调用 [`~Tool.push_to_hub`] 将你的自定义工具分享到 Hub。确保你已经在 Hub 上为其创建了一个仓库,并且使用的是具有读取权限的 token。 ```python model_downloads_tool.push_to_hub("{your_username}/hf-model-downloads", token="<YOUR_HUGGINGFACEHUB_API_TOKEN>") ``` 为了使推送到 Hub 正常工作,你的工具需要遵守一些规则: - 所有方法都是自包含的,例如使用来自其参数中的变量。 - 根据上述要点,**所有导入应直接在工具的函数中定义**,否则在尝试使用 [`~Tool.save`] 或 [`~Tool.push_to_hub`] 调用你的自定义工具时会出现错误。 - 如果你继承了 `__init__` 方法,除了 `self` 之外,你不能给它任何其他参数。这是因为在特定工具实例初始化期间设置的参数很难跟踪,这阻碍了将它们正确分享到 Hub。无论如何,创建特定类的想法是你已经可以为任何需要硬编码的内容设置类属性(只需在 `class YourTool(Tool):` 行下直接设置 `your_variable=(...)`)。当然,你仍然可以通过将内容分配给 `self.your_variable` 在代码中的任何地方创建类属性。 一旦你的工具被推送到 Hub,你就可以查看它。[这里](https://huggingface.co/spaces/m-ric/hf-model-downloads) 是我推送的 `model_downloads_tool`。它有一个漂亮的 gradio 界面。 在深入工具文件时,你可以发现所有工具的逻辑都在 [tool.py](https://huggingface.co/spaces/m-ric/hf-model-downloads/blob/main/tool.py) 下。这是你可以检查其他人分享的工具的地方。 然后你可以使用 [`load_tool`] 加载工具或使用 [`~Tool.from_hub`] 创建它,并将其传递给 agent 中的 `tools` 参数。 由于运行工具意味着运行自定义代码,你需要确保你信任该仓库,因此我们需要传递 `trust_remote_code=True` 来从 Hub 加载工具。 ```python from smolagents import load_tool, CodeAgent model_download_tool = load_tool( "{your_username}/hf-model-downloads", trust_remote_code=True ) ``` ### 将 Space 导入为工具 你可以使用 [`Tool.from_space`] 方法直接从 Hub 导入一个 Space 作为工具! 你只需要提供 Hub 上 Space 的 id、它的名称和一个帮助你的 agent 理解工具功能的描述。在底层,这将使用 [`gradio-client`](https://pypi.org/project/gradio-client/) 库来调用 Space。 例如,让我们从 Hub 导入 [FLUX.1-dev](https://huggingface.co/black-forest-labs/FLUX.1-dev) Space 并使用它生成一张图片。 ```python image_generation_tool = Tool.from_space( "black-forest-labs/FLUX.1-schnell", name="image_generator", description="Generate an image from a prompt" ) image_generation_tool("A sunny beach") ``` 瞧,这是你的图片!🏖️ <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/sunny_beach.webp"> 然后你可以像使用任何其他工具一样使用这个工具。例如,让我们改进提示 `A rabbit wearing a space suit` 并生成它的图片。 ```python from smolagents import CodeAgent, InferenceClientModel model = InferenceClientModel(model_id="Qwen/Qwen2.5-Coder-32B-Instruct") agent = CodeAgent(tools=[image_generation_tool], model=model) agent.run( "Improve this prompt, then generate an image of it.", additional_args={'user_prompt': 'A rabbit wearing a space suit'} ) ``` ```text === Agent thoughts: improved_prompt could be "A bright blue space suit wearing rabbit, on the surface of the moon, under a bright orange sunset, with the Earth visible in the background" Now that I have improved the prompt, I can use the image generator tool to generate an image based on this prompt. >>> Agent is executing the code below: image = image_generator(prompt="A bright blue space suit wearing rabbit, on the surface of the moon, under a bright orange sunset, with the Earth visible in the background") final_answer(image) ``` <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/rabbit_spacesuit_flux.webp"> 这得有多酷?🤩 ### 使用 LangChain 工具 我们喜欢 Langchain,并认为它有一套非常吸引人的工具。 要从 LangChain 导入工具,请使用 `from_langchain()` 方法。 以下是如何使用它来重现介绍中的搜索结果,使用 LangChain 的 web 搜索工具。 这个工具需要 `pip install langchain google-search-results -q` 才能正常工作。 ```python from langchain.agents import load_tools search_tool = Tool.from_langchain(load_tools(["serpapi"])[0]) agent = CodeAgent(tools=[search_tool], model=model) agent.run("How many more blocks (also denoted as layers) are in BERT base encoder compared to the encoder from the architecture proposed in Attention is All You Need?") ``` ### 管理你的 agent 工具箱 你可以通过添加或替换工具来管理 agent 的工具箱。 让我们将 `model_download_tool` 添加到一个仅使用默认工具箱初始化的现有 agent 中。 ```python from smolagents import InferenceClientModel model = InferenceClientModel(model_id="Qwen/Qwen2.5-Coder-32B-Instruct") agent = CodeAgent(tools=[], model=model, add_base_tools=True) agent.tools[model_download_tool.name] = model_download_tool ``` 现在我们可以利用新工具: ```python agent.run( "Can you give me the name of the model that has the most downloads in the 'text-to-video' task on the Hugging Face Hub but reverse the letters?" ) ``` > [!TIP] > 注意不要向 agent 添加太多工具:这可能会让较弱的 LLM 引擎不堪重负。 ### 使用工具集合 你可以通过使用 ToolCollection 对象来利用工具集合,使用你想要使用的集合的 slug。 然后将它们作为列表传递给 agent 初始化,并开始使用它们! ```py from smolagents import ToolCollection, CodeAgent image_tool_collection = ToolCollection.from_hub( collection_slug="huggingface-tools/diffusion-tools-6630bb19a942c2306a2cdb6f", token="<YOUR_HUGGINGFACEHUB_API_TOKEN>" ) agent = CodeAgent(tools=[*image_tool_collection.tools], model=model, add_base_tools=True) agent.run("Please draw me a picture of rivers and lakes.") ``` 为了加快启动速度,工具仅在 agent 调用时加载。
smolagents/docs/source/zh/tutorials/tools.md/0
{ "file_path": "smolagents/docs/source/zh/tutorials/tools.md", "repo_id": "smolagents", "token_count": 4839 }
278
import argparse import datetime import json import os import threading import time from concurrent.futures import ThreadPoolExecutor, as_completed from pathlib import Path import datasets import pandas as pd from dotenv import load_dotenv from tqdm import tqdm from smolagents import ( AgentError, CodeAgent, GoogleSearchTool, InferenceClientModel, LiteLLMModel, PythonInterpreterTool, ToolCallingAgent, VisitWebpageTool, ) load_dotenv() os.makedirs("output", exist_ok=True) APPEND_ANSWER_LOCK = threading.Lock() def parse_arguments(): parser = argparse.ArgumentParser(description="Runs an agent powered by the given model on smolagent benchmark.") parser.add_argument( "--date", type=str, default=None, help="The date for the evaluation.", ) parser.add_argument( "--eval-dataset", type=str, default="smolagents/benchmark-v1", ) # The eval dataset is gated, so you must first visit its page to request access: https://huggingface.co/datasets/smolagents-benchmark/benchmark-v1 parser.add_argument( "--model-type", type=str, default="InferenceClientModel", choices=["LiteLLMModel", "InferenceClientModel"], help="The model type to use (LiteLLMModel or InferenceClientModel)", ) parser.add_argument( "--model-id", type=str, required=True, help="The model ID to use for the specified model type", ) parser.add_argument( "--provider", type=str, default="hf-inference", help="The provider for InferenceClientModel - will not be used for LiteLLMModel", ) parser.add_argument( "--agent-action-type", type=str, default="code", choices=["code", "tool-calling", "vanilla"], help="The agent action type: 'code', 'tool-calling', or 'vanilla' to use the vanilla llm", ) parser.add_argument( "--parallel-workers", type=int, default=8, help="The number of processes to run in parallel", ) parser.add_argument( "--push-answers-to-hub", action="store_true", help="Push the answers to the hub", ) parser.add_argument( "--answers-dataset", type=str, default="smolagents/answers", ) return parser.parse_args() def load_eval_dataset(eval_dataset): # Choose the tasks to evaluate on: # tasks = ["gaia"] # or evaluate on all tasks: ["gaia", "math", "simpleqa"] tasks = datasets.get_dataset_config_names(eval_dataset) print(tasks) eval_ds = {task: datasets.load_dataset(eval_dataset, task, split="test") for task in tasks} print(pd.DataFrame(eval_ds["simpleqa"]).head()) return eval_ds def serialize_agent_error(obj): if isinstance(obj, AgentError): return {"error_type": obj.__class__.__name__, "message": obj.message} else: return str(obj) def append_answer(entry: dict, jsonl_file: str) -> None: jsonl_file = Path(jsonl_file) jsonl_file.parent.mkdir(parents=True, exist_ok=True) def convert_to_serializable(obj): if hasattr(obj, "dict"): return obj.dict() else: raise TypeError(f"Object of type {type(obj)} is not JSON serializable") with APPEND_ANSWER_LOCK, open(jsonl_file, "a", encoding="utf-8") as fp: fp.write(json.dumps(entry, default=convert_to_serializable) + "\n") assert os.path.exists(jsonl_file), "File not found!" def answer_single_question(example, model, answers_file, action_type): if action_type == "vanilla": agent = model elif action_type == "code": agent = CodeAgent( tools=[GoogleSearchTool(provider="serper"), VisitWebpageTool()], model=model, additional_authorized_imports=["numpy", "sympy"], max_steps=10, ) elif action_type == "tool-calling": agent = ToolCallingAgent( tools=[GoogleSearchTool(provider="serper"), VisitWebpageTool(), PythonInterpreterTool()], model=model, additional_authorized_imports=["numpy", "sympy"], max_steps=10, ) augmented_question = example["question"] if example["source"] == "SimpleQA": augmented_question += " Answer with only the final number." if example["source"] == "MATH": augmented_question += " Write code, not latex." start_time = time.time() try: if action_type == "vanilla": answer = agent([{"role": "user", "content": augmented_question}]).content token_counts = agent.monitor.get_total_token_counts() intermediate_steps = answer else: # Run agent 🚀 answer = str(agent.run(augmented_question)) token_counts = agent.monitor.get_total_token_counts() intermediate_steps = [dict(message) for message in agent.write_memory_to_messages()] end_time = time.time() except Exception as e: print("Error on ", augmented_question, e) intermediate_steps = [] token_counts = {"input": 0, "output": 0} answer = str(e) end_time = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") annotated_example = { "model_id": model.model_id, "agent_action_type": action_type, "question": augmented_question, "original_question": example["question"], "answer": answer, "true_answer": example["true_answer"], "source": example["source"], "intermediate_steps": intermediate_steps, "start_time": start_time, "end_time": end_time, "token_counts": token_counts, } append_answer(annotated_example, answers_file) def answer_questions( eval_ds, model, date, action_type: str = "code", output_dir: str = "output", answers_dataset: str = None, push_answers_to_hub: bool = False, parallel_workers: int = 32, ): date = date or datetime.date.today().isoformat() model_id = model.model_id for task in eval_ds: file_name = f"{output_dir}/{model_id.replace('/', '__')}__{action_type}__{task}__{date}.jsonl" print(f"Starting processing and writing output to '{file_name}'") answered_questions = [] if os.path.exists(file_name): with open(file_name, "r") as f: for line in f: answered_questions.append(json.loads(line)["original_question"]) examples_todo = [example for example in eval_ds[task] if example["question"] not in answered_questions] print(f"Launching {parallel_workers} parallel workers.") with ThreadPoolExecutor(max_workers=parallel_workers) as exe: futures = [ exe.submit(answer_single_question, example, model, file_name, action_type) for example in examples_todo ] for f in tqdm(as_completed(futures), total=len(examples_todo), desc="Processing tasks"): f.result() print("All tasks processed.") if push_answers_to_hub and answers_dataset: print("Pushing answers to hub...") ds = datasets.Dataset.from_pandas(pd.read_json(file_name, lines=True), split="test", preserve_index=False) config = f"{model_id.replace('/', '__')}__{action_type}__{task}" data_dir = f"{model_id}/{action_type}/{task}/{date}" ds.push_to_hub( answers_dataset, config_name=config, data_dir=data_dir, split="test", commit_message=f"Upload {config}", ) if __name__ == "__main__": args = parse_arguments() eval_ds = load_eval_dataset(args.eval_dataset) if args.model_type == "LiteLLMModel": model = LiteLLMModel( model_id=args.model_id, max_completion_tokens=8192, ) else: model = InferenceClientModel(model_id=args.model_id, provider=args.provider, max_tokens=8192) answer_questions( eval_ds, model, args.date, action_type=args.agent_action_type, answers_dataset=args.answers_dataset, push_answers_to_hub=args.push_answers_to_hub, parallel_workers=args.parallel_workers, )
smolagents/examples/smolagents_benchmark/run.py/0
{ "file_path": "smolagents/examples/smolagents_benchmark/run.py", "repo_id": "smolagents", "token_count": 3660 }
279
#!/usr/bin/env python # coding=utf-8 # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json from dataclasses import dataclass, field from enum import IntEnum from rich import box from rich.console import Console, Group from rich.panel import Panel from rich.rule import Rule from rich.syntax import Syntax from rich.table import Table from rich.text import Text from rich.tree import Tree from smolagents.utils import escape_code_brackets __all__ = ["AgentLogger", "LogLevel", "Monitor", "TokenUsage", "Timing"] @dataclass class TokenUsage: """ Contains the token usage information for a given step or run. """ input_tokens: int output_tokens: int total_tokens: int = field(init=False) def __post_init__(self): self.total_tokens = self.input_tokens + self.output_tokens def dict(self): return { "input_tokens": self.input_tokens, "output_tokens": self.output_tokens, "total_tokens": self.total_tokens, } @dataclass class Timing: """ Contains the timing information for a given step or run. """ start_time: float end_time: float | None = None @property def duration(self): return None if self.end_time is None else self.end_time - self.start_time def dict(self): return { "start_time": self.start_time, "end_time": self.end_time, "duration": self.duration, } def __repr__(self) -> str: return f"Timing(start_time={self.start_time}, end_time={self.end_time}, duration={self.duration})" class Monitor: def __init__(self, tracked_model, logger): self.step_durations = [] self.tracked_model = tracked_model self.logger = logger self.total_input_token_count = 0 self.total_output_token_count = 0 def get_total_token_counts(self) -> TokenUsage: return TokenUsage( input_tokens=self.total_input_token_count, output_tokens=self.total_output_token_count, ) def reset(self): self.step_durations = [] self.total_input_token_count = 0 self.total_output_token_count = 0 def update_metrics(self, step_log): """Update the metrics of the monitor. Args: step_log ([`MemoryStep`]): Step log to update the monitor with. """ step_duration = step_log.timing.duration self.step_durations.append(step_duration) console_outputs = f"[Step {len(self.step_durations)}: Duration {step_duration:.2f} seconds" if step_log.token_usage is not None: self.total_input_token_count += step_log.token_usage.input_tokens self.total_output_token_count += step_log.token_usage.output_tokens console_outputs += ( f"| Input tokens: {self.total_input_token_count:,} | Output tokens: {self.total_output_token_count:,}" ) console_outputs += "]" self.logger.log(Text(console_outputs, style="dim"), level=1) class LogLevel(IntEnum): OFF = -1 # No output ERROR = 0 # Only errors INFO = 1 # Normal output (default) DEBUG = 2 # Detailed output YELLOW_HEX = "#d4b702" class AgentLogger: def __init__(self, level: LogLevel = LogLevel.INFO, console: Console | None = None): self.level = level if console is None: self.console = Console(highlight=False) else: self.console = console def log(self, *args, level: int | str | LogLevel = LogLevel.INFO, **kwargs) -> None: """Logs a message to the console. Args: level (LogLevel, optional): Defaults to LogLevel.INFO. """ if isinstance(level, str): level = LogLevel[level.upper()] if level <= self.level: self.console.print(*args, **kwargs) def log_error(self, error_message: str) -> None: self.log(escape_code_brackets(error_message), style="bold red", level=LogLevel.ERROR) def log_markdown(self, content: str, title: str | None = None, level=LogLevel.INFO, style=YELLOW_HEX) -> None: markdown_content = Syntax( content, lexer="markdown", theme="github-dark", word_wrap=True, ) if title: self.log( Group( Rule( "[bold italic]" + title, align="left", style=style, ), markdown_content, ), level=level, ) else: self.log(markdown_content, level=level) def log_code(self, title: str, content: str, level: int = LogLevel.INFO) -> None: self.log( Panel( Syntax( content, lexer="python", theme="monokai", word_wrap=True, ), title="[bold]" + title, title_align="left", box=box.HORIZONTALS, ), level=level, ) def log_rule(self, title: str, level: int = LogLevel.INFO) -> None: self.log( Rule( "[bold]" + title, characters="━", style=YELLOW_HEX, ), level=LogLevel.INFO, ) def log_task(self, content: str, subtitle: str, title: str | None = None, level: LogLevel = LogLevel.INFO) -> None: self.log( Panel( f"\n[bold]{escape_code_brackets(content)}\n", title="[bold]New run" + (f" - {title}" if title else ""), subtitle=subtitle, border_style=YELLOW_HEX, subtitle_align="left", ), level=level, ) def log_messages(self, messages: list[dict], level: LogLevel = LogLevel.DEBUG) -> None: messages_as_string = "\n".join([json.dumps(dict(message), indent=4) for message in messages]) self.log( Syntax( messages_as_string, lexer="markdown", theme="github-dark", word_wrap=True, ), level=level, ) def visualize_agent_tree(self, agent): def create_tools_section(tools_dict): table = Table(show_header=True, header_style="bold") table.add_column("Name", style="#1E90FF") table.add_column("Description") table.add_column("Arguments") for name, tool in tools_dict.items(): args = [ f"{arg_name} (`{info.get('type', 'Any')}`{', optional' if info.get('optional') else ''}): {info.get('description', '')}" for arg_name, info in getattr(tool, "inputs", {}).items() ] table.add_row(name, getattr(tool, "description", str(tool)), "\n".join(args)) return Group("🛠️ [italic #1E90FF]Tools:[/italic #1E90FF]", table) def get_agent_headline(agent, name: str | None = None): name_headline = f"{name} | " if name else "" return f"[bold {YELLOW_HEX}]{name_headline}{agent.__class__.__name__} | {agent.model.model_id}" def build_agent_tree(parent_tree, agent_obj): """Recursively builds the agent tree.""" parent_tree.add(create_tools_section(agent_obj.tools)) if agent_obj.managed_agents: agents_branch = parent_tree.add("🤖 [italic #1E90FF]Managed agents:") for name, managed_agent in agent_obj.managed_agents.items(): agent_tree = agents_branch.add(get_agent_headline(managed_agent, name)) if managed_agent.__class__.__name__ == "CodeAgent": agent_tree.add( f"✅ [italic #1E90FF]Authorized imports:[/italic #1E90FF] {managed_agent.additional_authorized_imports}" ) agent_tree.add(f"📝 [italic #1E90FF]Description:[/italic #1E90FF] {managed_agent.description}") build_agent_tree(agent_tree, managed_agent) main_tree = Tree(get_agent_headline(agent)) if agent.__class__.__name__ == "CodeAgent": main_tree.add( f"✅ [italic #1E90FF]Authorized imports:[/italic #1E90FF] {agent.additional_authorized_imports}" ) build_agent_tree(main_tree, agent) self.console.print(main_tree)
smolagents/src/smolagents/monitoring.py/0
{ "file_path": "smolagents/src/smolagents/monitoring.py", "repo_id": "smolagents", "token_count": 4314 }
280
from unittest.mock import patch import pytest from smolagents.cli import load_model from smolagents.local_python_executor import CodeOutput, LocalPythonExecutor from smolagents.models import InferenceClientModel, LiteLLMModel, OpenAIServerModel, TransformersModel @pytest.fixture def set_env_vars(monkeypatch): monkeypatch.setenv("FIREWORKS_API_KEY", "test_fireworks_api_key") monkeypatch.setenv("HF_TOKEN", "test_hf_api_key") def test_load_model_openai_server_model(set_env_vars): with patch("openai.OpenAI") as MockOpenAI: model = load_model("OpenAIServerModel", "test_model_id") assert isinstance(model, OpenAIServerModel) assert model.model_id == "test_model_id" assert MockOpenAI.call_count == 1 assert MockOpenAI.call_args.kwargs["base_url"] == "https://api.fireworks.ai/inference/v1" assert MockOpenAI.call_args.kwargs["api_key"] == "test_fireworks_api_key" def test_load_model_litellm_model(): model = load_model("LiteLLMModel", "test_model_id", api_key="test_api_key", api_base="https://api.test.com") assert isinstance(model, LiteLLMModel) assert model.api_key == "test_api_key" assert model.api_base == "https://api.test.com" assert model.model_id == "test_model_id" def test_load_model_transformers_model(): with ( patch( "transformers.AutoModelForImageTextToText.from_pretrained", side_effect=ValueError("Unrecognized configuration class"), ), patch("transformers.AutoModelForCausalLM.from_pretrained"), patch("transformers.AutoTokenizer.from_pretrained"), ): model = load_model("TransformersModel", "test_model_id") assert isinstance(model, TransformersModel) assert model.model_id == "test_model_id" def test_load_model_hf_api_model(set_env_vars): with patch("huggingface_hub.InferenceClient") as huggingface_hub_InferenceClient: model = load_model("InferenceClientModel", "test_model_id") assert isinstance(model, InferenceClientModel) assert model.model_id == "test_model_id" assert huggingface_hub_InferenceClient.call_count == 1 assert huggingface_hub_InferenceClient.call_args.kwargs["token"] == "test_hf_api_key" def test_load_model_invalid_model_type(): with pytest.raises(ValueError, match="Unsupported model type: InvalidModel"): load_model("InvalidModel", "test_model_id") def test_cli_main(capsys): with patch("smolagents.cli.load_model") as mock_load_model: mock_load_model.return_value = "mock_model" with patch("smolagents.cli.CodeAgent") as mock_code_agent: from smolagents.cli import run_smolagent run_smolagent("test_prompt", [], "InferenceClientModel", "test_model_id", provider="hf-inference") # load_model assert len(mock_load_model.call_args_list) == 1 assert mock_load_model.call_args.args == ("InferenceClientModel", "test_model_id") assert mock_load_model.call_args.kwargs == {"api_base": None, "api_key": None, "provider": "hf-inference"} # CodeAgent assert len(mock_code_agent.call_args_list) == 1 assert mock_code_agent.call_args.args == () assert mock_code_agent.call_args.kwargs == { "tools": [], "model": "mock_model", "additional_authorized_imports": None, } # agent.run assert len(mock_code_agent.return_value.run.call_args_list) == 1 assert mock_code_agent.return_value.run.call_args.args == ("test_prompt",) # print captured = capsys.readouterr() assert "Running agent with these tools: []" in captured.out def test_vision_web_browser_main(): with patch("smolagents.vision_web_browser.helium"): with patch("smolagents.vision_web_browser.load_model") as mock_load_model: mock_load_model.return_value = "mock_model" with patch("smolagents.vision_web_browser.CodeAgent") as mock_code_agent: from smolagents.vision_web_browser import helium_instructions, run_webagent run_webagent("test_prompt", "InferenceClientModel", "test_model_id", provider="hf-inference") # load_model assert len(mock_load_model.call_args_list) == 1 assert mock_load_model.call_args.args == ("InferenceClientModel", "test_model_id") # CodeAgent assert len(mock_code_agent.call_args_list) == 1 assert mock_code_agent.call_args.args == () assert len(mock_code_agent.call_args.kwargs["tools"]) == 4 assert mock_code_agent.call_args.kwargs["model"] == "mock_model" assert mock_code_agent.call_args.kwargs["additional_authorized_imports"] == ["helium"] # agent.python_executor assert len(mock_code_agent.return_value.python_executor.call_args_list) == 1 assert mock_code_agent.return_value.python_executor.call_args.args == ("from helium import *",) assert LocalPythonExecutor(["helium"])("from helium import *") == CodeOutput( output=None, logs="", is_final_answer=False ) # agent.run assert len(mock_code_agent.return_value.run.call_args_list) == 1 assert mock_code_agent.return_value.run.call_args.args == ("test_prompt" + helium_instructions,)
smolagents/tests/test_cli.py/0
{ "file_path": "smolagents/tests/test_cli.py", "repo_id": "smolagents", "token_count": 1975 }
281
# coding=utf-8 # Copyright 2024 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import tempfile import unittest import uuid import PIL.Image from smolagents.agent_types import AgentAudio, AgentImage, AgentText from .utils.markers import require_soundfile, require_torch def get_new_path(suffix="") -> str: directory = tempfile.mkdtemp() return os.path.join(directory, str(uuid.uuid4()) + suffix) @require_soundfile @require_torch class AgentAudioTests(unittest.TestCase): def test_from_tensor(self): import soundfile as sf import torch tensor = torch.rand(12, dtype=torch.float64) - 0.5 agent_type = AgentAudio(tensor) path = str(agent_type.to_string()) # Ensure that the tensor and the agent_type's tensor are the same self.assertTrue(torch.allclose(tensor, agent_type.to_raw(), atol=1e-4)) del agent_type # Ensure the path remains even after the object deletion self.assertTrue(os.path.exists(path)) # Ensure that the file contains the same value as the original tensor new_tensor, _ = sf.read(path) self.assertTrue(torch.allclose(tensor, torch.tensor(new_tensor), atol=1e-4)) def test_from_string(self): import soundfile as sf import torch tensor = torch.rand(12, dtype=torch.float64) - 0.5 path = get_new_path(suffix=".wav") sf.write(path, tensor, 16000) agent_type = AgentAudio(path) self.assertTrue(torch.allclose(tensor, agent_type.to_raw(), atol=1e-4)) self.assertEqual(agent_type.to_string(), path) @require_torch class TestAgentImage: def test_from_tensor(self): import torch tensor = torch.randint(0, 256, (64, 64, 3)) agent_type = AgentImage(tensor) path = str(agent_type.to_string()) # Ensure that the tensor and the agent_type's tensor are the same assert torch.allclose(tensor, agent_type._tensor, atol=1e-4) assert isinstance(agent_type.to_raw(), PIL.Image.Image) # Ensure the path remains even after the object deletion del agent_type assert os.path.exists(path) def test_from_string(self, shared_datadir): path = shared_datadir / "000000039769.png" image = PIL.Image.open(path) agent_type = AgentImage(path) assert path.samefile(agent_type.to_string()) assert image == agent_type.to_raw() # Ensure the path remains even after the object deletion del agent_type assert os.path.exists(path) def test_from_image(self, shared_datadir): path = shared_datadir / "000000039769.png" image = PIL.Image.open(path) agent_type = AgentImage(image) assert not path.samefile(agent_type.to_string()) assert image == agent_type.to_raw() # Ensure the path remains even after the object deletion del agent_type assert os.path.exists(path) class AgentTextTests(unittest.TestCase): def test_from_string(self): string = "Hey!" agent_type = AgentText(string) self.assertEqual(string, agent_type.to_string()) self.assertEqual(string, agent_type.to_raw())
smolagents/tests/test_types.py/0
{ "file_path": "smolagents/tests/test_types.py", "repo_id": "smolagents", "token_count": 1468 }
282
ARG PLATFORM=xpu FROM lukemathwalker/cargo-chef:latest-rust-1.85.1 AS chef WORKDIR /usr/src ARG CARGO_REGISTRIES_CRATES_IO_PROTOCOL=sparse FROM chef AS planner COPY Cargo.lock Cargo.lock COPY Cargo.toml Cargo.toml COPY rust-toolchain.toml rust-toolchain.toml COPY proto proto COPY benchmark benchmark COPY router router COPY backends backends COPY launcher launcher RUN cargo chef prepare --recipe-path recipe.json FROM chef AS builder RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \ python3.11-dev RUN PROTOC_ZIP=protoc-21.12-linux-x86_64.zip && \ curl -OL https://github.com/protocolbuffers/protobuf/releases/download/v21.12/$PROTOC_ZIP && \ unzip -o $PROTOC_ZIP -d /usr/local bin/protoc && \ unzip -o $PROTOC_ZIP -d /usr/local 'include/*' && \ rm -f $PROTOC_ZIP COPY --from=planner /usr/src/recipe.json recipe.json RUN cargo chef cook --profile release-opt --recipe-path recipe.json ARG GIT_SHA ARG DOCKER_LABEL COPY Cargo.lock Cargo.lock COPY Cargo.toml Cargo.toml COPY rust-toolchain.toml rust-toolchain.toml COPY proto proto COPY benchmark benchmark COPY router router COPY backends backends COPY launcher launcher RUN cargo build --profile release-opt --frozen # Text Generation Inference base image for Intel FROM intel/oneapi-basekit:2025.1.3-0-devel-ubuntu22.04 AS xpu USER root ARG MAMBA_VERSION=23.1.0-1 ARG PYTHON_VERSION='3.11.10' # Automatically set by buildx ARG TARGETPLATFORM ENV PATH=/opt/conda/bin:$PATH # TGI seem to require libssl.so.1.1 instead of libssl.so.3 so we can't use ubuntu 22.04. Ubuntu 20.04 has python==3.8, and TGI requires python>=3.9, hence the need for miniconda. # Install mamba # translating Docker's TARGETPLATFORM into mamba arches RUN case ${TARGETPLATFORM} in \ "linux/arm64") MAMBA_ARCH=aarch64 ;; \ *) MAMBA_ARCH=x86_64 ;; \ esac && \ curl -fsSL -v -o ~/mambaforge.sh -O "https://github.com/conda-forge/miniforge/releases/download/${MAMBA_VERSION}/Mambaforge-${MAMBA_VERSION}-Linux-${MAMBA_ARCH}.sh" RUN chmod +x ~/mambaforge.sh && \ bash ~/mambaforge.sh -b -p /opt/conda && \ rm ~/mambaforge.sh RUN case ${TARGETPLATFORM} in \ "linux/arm64") exit 1 ;; \ *) /opt/conda/bin/conda update -y conda && \ /opt/conda/bin/conda install -y "python=${PYTHON_VERSION}" ;; \ esac && \ /opt/conda/bin/conda clean -ya # libssl.so.1.1 is not installed on Ubuntu 22.04 by default, install it RUN wget http://nz2.archive.ubuntu.com/ubuntu/pool/main/o/openssl/libssl1.1_1.1.1f-1ubuntu2_amd64.deb && \ dpkg -i ./libssl1.1_1.1.1f-1ubuntu2_amd64.deb RUN wget -qO - https://repositories.intel.com/gpu/intel-graphics.key | gpg --dearmor | tee /usr/share/keyrings/intel-graphics.gpg > /dev/null RUN wget -O- https://apt.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS.PUB \ | gpg --dearmor | tee /usr/share/keyrings/oneapi-archive-keyring.gpg > /dev/null && echo "deb [signed-by=/usr/share/keyrings/oneapi-archive-keyring.gpg] https://apt.repos.intel.com/oneapi all main" | tee /etc/apt/sources.list.d/oneAPI.list RUN echo "deb [signed-by=/usr/share/keyrings/oneapi-archive-keyring.gpg] https://apt.repos.intel.com/intel-for-pytorch-gpu-dev all main" > /tmp/intel-for-pytorch-gpu-dev.list RUN mv /tmp/intel-for-pytorch-gpu-dev.list /etc/apt/sources.list.d RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt install -y xpu-smi cmake ninja-build pciutils intel-ocloc libnl-genl-3-200 # Text Generation Inference base env ENV HF_HOME=/data \ HF_HUB_ENABLE_HF_TRANSFER=1 \ PORT=80 WORKDIR /usr/src RUN pip install torch==2.8.0 torchvision==0.23.0 --index-url https://download.pytorch.org/whl/xpu # Install server COPY proto proto COPY server server COPY server/Makefile server/Makefile ENV UV_SYSTEM_PYTHON=1 RUN cd server && \ make gen-server && \ pip install -U pip uv && \ uv pip install -e ".[accelerate, compressed-tensors, peft, outlines]" --no-cache-dir ENV LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/opt/conda/lib ENV CCL_ZE_IPC_EXCHANGE=sockets ENV TORCH_LLM_ALLREDUCE=1 ENV CCL_TOPO_FABRIC_VERTEX_CONNECTION_CHECK=0 ENV TORCH_DEVICE_BACKEND_AUTOLOAD=0 RUN pip install https://intel-extension-for-pytorch.s3.amazonaws.com/ipex_stable/xpu/intel_extension_for_pytorch-2.8.10%2Bxpu-cp311-cp311-linux_x86_64.whl # Install benchmarker COPY --from=builder /usr/src/target/release-opt/text-generation-benchmark /usr/local/bin/text-generation-benchmark # Install router COPY --from=builder /usr/src/target/release-opt/text-generation-router /usr/local/bin/text-generation-router # Install launcher COPY --from=builder /usr/src/target/release-opt/text-generation-launcher /usr/local/bin/text-generation-launcher # Text Generation Inference base image for Intel-cpu FROM ubuntu:22.04 AS cpu RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \ curl \ ca-certificates \ make \ g++-12 \ gcc-12 \ git \ wget \ cmake \ libnuma-dev RUN update-alternatives --install /usr/bin/g++ g++ /usr/bin/g++-12 12 RUN update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-12 12 RUN update-alternatives --install /usr/bin/cc cc /usr/bin/gcc 30 RUN update-alternatives --set cc /usr/bin/gcc RUN update-alternatives --install /usr/bin/c++ c++ /usr/bin/g++ 30 RUN update-alternatives --set c++ /usr/bin/g++ ENV HUGGINGFACE_HUB_CACHE=/data \ HF_HUB_ENABLE_HF_TRANSFER=1 \ PORT=80 ARG MAMBA_VERSION=23.1.0-1 ARG PYTHON_VERSION='3.11.10' # Automatically set by buildx ARG TARGETPLATFORM ENV PATH=/opt/conda/bin:$PATH # TGI seem to require libssl.so.1.1 instead of libssl.so.3 so we can't use ubuntu 22.04. Ubuntu 20.04 has python==3.8, and TGI requires python>=3.9, hence the need for miniconda. # Install mamba # translating Docker's TARGETPLATFORM into mamba arches RUN case ${TARGETPLATFORM} in \ "linux/arm64") MAMBA_ARCH=aarch64 ;; \ *) MAMBA_ARCH=x86_64 ;; \ esac && \ curl -fsSL -v -o ~/mambaforge.sh -O "https://github.com/conda-forge/miniforge/releases/download/${MAMBA_VERSION}/Mambaforge-${MAMBA_VERSION}-Linux-${MAMBA_ARCH}.sh" RUN chmod +x ~/mambaforge.sh && \ bash ~/mambaforge.sh -b -p /opt/conda && \ rm ~/mambaforge.sh RUN case ${TARGETPLATFORM} in \ "linux/arm64") exit 1 ;; \ *) /opt/conda/bin/conda update -y conda && \ /opt/conda/bin/conda install -y "python=${PYTHON_VERSION}" ;; \ esac && \ /opt/conda/bin/conda clean -ya RUN conda install -c conda-forge gperftools mkl RUN pip install torch==2.7.0 torchvision==0.22.0 torchaudio==2.7.0 --index-url https://download.pytorch.org/whl/cpu RUN pip install triton==3.2.0 py-libnuma WORKDIR /usr/src RUN pip install https://intel-extension-for-pytorch.s3.amazonaws.com/ipex_stable/cpu/intel_extension_for_pytorch-2.7.0%2Bcpu-cp311-cp311-linux_x86_64.whl RUN pip install https://intel-extension-for-pytorch.s3.amazonaws.com/ipex_stable/cpu/oneccl_bind_pt-2.7.0%2Bcpu-cp311-cp311-linux_x86_64.whl ENV LD_PRELOAD=/opt/conda/lib/libtcmalloc.so ENV CCL_ROOT=/opt/conda/lib/python3.11/site-packages/oneccl_bindings_for_pytorch ENV I_MPI_ROOT=/opt/conda/lib/python3.11/site-packages/oneccl_bindings_for_pytorch ENV FI_PROVIDER_PATH=/opt/conda/lib/python3.11/site-packages/oneccl_bindings_for_pytorch/opt/mpi/libfabric/lib/prov:/usr/lib64/libfabric ENV LD_LIBRARY_PATH=/opt/conda/lib/python3.11/site-packages/oneccl_bindings_for_pytorch/opt/mpi/libfabric/lib:/opt/conda/lib/python3.11/site-packages/oneccl_bindings_for_pytorch/lib ENV LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/opt/conda/lib/" # Install server COPY proto proto COPY server server COPY server/Makefile server/Makefile ENV UV_SYSTEM_PYTHON=1 RUN cd server && \ make gen-server && \ pip install -U pip uv && \ uv pip install -e ".[accelerate, compressed-tensors, peft, outlines]" --no-cache-dir # Install benchmarker COPY --from=builder /usr/src/target/release-opt/text-generation-benchmark /usr/local/bin/text-generation-benchmark # Install router COPY --from=builder /usr/src/target/release-opt/text-generation-router /usr/local/bin/text-generation-router # Install launcher COPY --from=builder /usr/src/target/release-opt/text-generation-launcher /usr/local/bin/text-generation-launcher FROM ${PLATFORM} AS final ENV ATTENTION=flashdecoding-ipex ENV PREFIX_CACHING=1 ENV PREFILL_CHUNKING=1 ENV CUDA_GRAPHS=0 ENTRYPOINT ["text-generation-launcher"] CMD ["--json-output"]
text-generation-inference/Dockerfile_intel/0
{ "file_path": "text-generation-inference/Dockerfile_intel", "repo_id": "text-generation-inference", "token_count": 3628 }
283
#!/bin/bash git clone -b dill-0.3.7 https://github.com/uqfoundation/dill.git pushd dill cat <<EOF > dill-0.3.7.patch diff --git a/dill/_dill.py b/dill/_dill.py index d0cf543..f6eb662 100644 --- a/dill/_dill.py +++ b/dill/_dill.py @@ -69,7 +69,15 @@ TypeType = type # 'new-style' classes #XXX: unregistered XRangeType = range from types import MappingProxyType as DictProxyType, new_class from pickle import DEFAULT_PROTOCOL, HIGHEST_PROTOCOL, PickleError, PicklingError, UnpicklingError -import __main__ as _main_module +class _LazyMainModule(object): + _module = None + @property + def module(self): + if self._module is None: + import __main__ as _m_module + self._module = _m_module + return self._module +_main_module = _LazyMainModule() import marshal import gc # import zlib @@ -353,7 +361,7 @@ class Pickler(StockPickler): _fmode = kwds.pop('fmode', None) _recurse = kwds.pop('recurse', None) StockPickler.__init__(self, file, *args, **kwds) - self._main = _main_module + self._main = _main_module.module self._diff_cache = {} self._byref = settings['byref'] if _byref is None else _byref self._strictio = False #_strictio @@ -435,12 +443,12 @@ class Unpickler(StockUnpickler): settings = Pickler.settings _ignore = kwds.pop('ignore', None) StockUnpickler.__init__(self, *args, **kwds) - self._main = _main_module + self._main = _main_module.module self._ignore = settings['ignore'] if _ignore is None else _ignore def load(self): #NOTE: if settings change, need to update attributes obj = StockUnpickler.load(self) - if type(obj).__module__ == getattr(_main_module, '__name__', '__main__'): + if type(obj).__module__ == getattr(self._main, '__name__', '__main__'): if not self._ignore: # point obj class to main try: obj.__class__ = getattr(self._main, type(obj).__name__) @@ -1194,11 +1202,11 @@ def save_module_dict(pickler, obj): logger.trace(pickler, "D1: %s", _repr_dict(obj)) # obj pickler.write(bytes('c__builtin__\n__main__\n', 'UTF-8')) logger.trace(pickler, "# D1") - elif (not is_dill(pickler, child=False)) and (obj == _main_module.__dict__): + elif (not is_dill(pickler, child=False)) and (obj == _main_module.module.__dict__): logger.trace(pickler, "D3: %s", _repr_dict(obj)) # obj pickler.write(bytes('c__main__\n__dict__\n', 'UTF-8')) #XXX: works in general? logger.trace(pickler, "# D3") - elif '__name__' in obj and obj != _main_module.__dict__ \\ + elif '__name__' in obj and obj != _main_module.module.__dict__ \\ and type(obj['__name__']) is str \\ and obj is getattr(_import_module(obj['__name__'],True), '__dict__', None): logger.trace(pickler, "D4: %s", _repr_dict(obj)) # obj diff --git a/dill/session.py b/dill/session.py index 74234ab..1be8d89 100644 --- a/dill/session.py +++ b/dill/session.py @@ -233,7 +233,7 @@ def dump_module( protocol = settings['protocol'] main = module if main is None: - main = _main_module + main = _main_module.module elif isinstance(main, str): main = _import_module(main) if not isinstance(main, ModuleType): @@ -501,7 +501,7 @@ def load_module( pass assert loaded is main _restore_modules(unpickler, main) - if main is _main_module or main is module: + if main is _main_module.module or main is module: return None else: return main EOF git apply dill-0.3.7.patch python -m pip install . popd rm -fr dill
text-generation-inference/backends/gaudi/server/dill-0.3.7-patch.sh/0
{ "file_path": "text-generation-inference/backends/gaudi/server/dill-0.3.7-patch.sh", "repo_id": "text-generation-inference", "token_count": 1641 }
284
import torch from text_generation_server.layers.attention import Seqlen, HPUPagedAttentionMetadata from typing import Optional from text_generation_server.layers.attention.kv_cache import KVCache, KVScales from vllm_hpu_extension import ops from vllm_hpu_extension.utils import Matmul from habana_frameworks.torch.hpex.kernels import FusedSDPA from vllm_hpu_extension.utils import ModuleFusedSDPA import os from text_generation_server.models.globals import BLOCK_SIZE import math SUPPORTS_WINDOWING = False class FP8Matmul(torch.nn.Module): def __init__(self, scale_other): super().__init__() self.scale_input = torch.tensor(1.0, dtype=torch.bfloat16, device="hpu") self.scale_other = scale_other def quant_input(self, x, scale): return torch.ops.hpu.cast_to_fp8_v2( x, scale, False, False, torch.float8_e4m3fn )[0] def matmul_fp8( self, x, other, out_dtype, scale_input_inv=None, scale_other_inv=None ): return torch.ops.hpu.fp8_gemm_v2( A=x, trans_A=False, B=other, trans_B=False, D=None, out_dtype=out_dtype, A_scale_inv=scale_input_inv, B_scale_inv=scale_other_inv, bias=None, accumulate=False, ) def forward(self, input, other): qinput = self.quant_input(input, self.scale_input) qother = self.quant_input(other, self.scale_other) output = self.matmul_fp8( qinput, qother, out_dtype=torch.bfloat16, scale_input_inv=1.0 / self.scale_input, scale_other_inv=1.0 / self.scale_other, ) return output class FetchFromCache(torch.nn.Module): def __init__(self, scale_inv): super().__init__() self.scale_inv = scale_inv def forward(self, cache, blocks): if os.environ.get("VLLM_CONTIGUOUS_PA", "true").lower() == "true": out = cache[: blocks.size(0)] else: out = cache.index_select(0, blocks) if out.dtype == torch.float8_e4m3fn: out = torch.ops.hpu.cast_from_fp8(out, self.scale_inv, torch.bfloat16) return out def attention( *, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, kv_cache: KVCache, kv_scales: KVScales, seqlen: Seqlen, softmax_scale: float, window_size_left: int = -1, causal: bool = True, softcap: Optional[float] = None, ): fsdpa_op = ModuleFusedSDPA(FusedSDPA) bs = seqlen.input_lengths.shape[0] _, head_num, head_size = query.shape _, kv_head_num, head_size = key.shape query = query.view(bs, -1, head_num, head_size).transpose(1, 2) key = key.view(bs, -1, kv_head_num, head_size).transpose(1, 2) value = value.view(bs, -1, kv_head_num, head_size).transpose(1, 2) attn_output = fsdpa_op( query, key, value, attn_mask=seqlen.attn_mask if window_size_left != -1 else None, dropout_p=0.0, is_causal=causal if window_size_left == -1 else False, scale=softmax_scale, softmax_mode="None", recompute_mode=None, valid_sequence_lengths=seqlen.input_lengths if window_size_left == -1 else None, padding_side="left", ) attn_output = attn_output.transpose(1, 2).squeeze(0).contiguous() return attn_output def set_block_mapping(hpu_attention_meta: HPUPagedAttentionMetadata, batch_size): block_mapping = torch.nn.functional.one_hot( hpu_attention_meta.block_groups, num_classes=batch_size ) dtype = hpu_attention_meta.block_usage.dtype device = hpu_attention_meta.block_usage.device mask = torch.arange(0, BLOCK_SIZE, device=device, dtype=torch.int32).unsqueeze(0) mask = mask >= hpu_attention_meta.block_usage.unsqueeze(-1) attn_bias = torch.zeros_like(mask, dtype=dtype).masked_fill_(mask, -math.inf) hpu_attention_meta = hpu_attention_meta._replace( attn_bias=attn_bias, block_mapping=block_mapping.to(dtype) ) if hpu_attention_meta.block_groups_in_window is not None: block_mapping = torch.nn.functional.one_hot( hpu_attention_meta.block_groups_in_window, num_classes=batch_size ) attn_bias = torch.log(hpu_attention_meta.slots_in_window_mask.float()) hpu_attention_meta = hpu_attention_meta._replace( attn_bias_in_window=attn_bias, block_mapping_in_window=block_mapping.to(dtype), ) return hpu_attention_meta def paged_attention( query: torch.Tensor, kv_cache: KVCache, kv_head_mapping: torch.Tensor, softmax_scale: float, seqlen: Seqlen, *, kv_scales: KVScales, softcap: Optional[float] = None, hpu_attention_meta: HPUPagedAttentionMetadata, window_size_left: int = -1, ): batch_size, head_num, head_size = query.shape fp8_kv = kv_cache.dtype == torch.float8_e4m3fn output = ops.flat_pa( query=query.view(batch_size, 1, head_num * head_size), key_cache=kv_cache.key, value_cache=kv_cache.value, block_list=( hpu_attention_meta.block_list if window_size_left == -1 else hpu_attention_meta.block_list_in_window ), block_mapping=( hpu_attention_meta.block_mapping if window_size_left == -1 else hpu_attention_meta.block_mapping_in_window ), block_bias=( hpu_attention_meta.attn_bias if window_size_left == -1 else hpu_attention_meta.attn_bias_in_window ), block_groups=( hpu_attention_meta.block_groups if window_size_left == -1 else hpu_attention_meta.block_groups_in_window ), block_size=BLOCK_SIZE, scale=softmax_scale, matmul_qk_op=FP8Matmul(kv_scales.key_scale) if fp8_kv else Matmul(), matmul_av_op=FP8Matmul(kv_scales.value_scale) if fp8_kv else Matmul(), batch2block_matmul_op=Matmul(), block2batch_matmul_op=Matmul(), keys_fetch_func=FetchFromCache(1.0 / kv_scales.key_scale_cpu), values_fetch_func=FetchFromCache(1.0 / kv_scales.value_scale_cpu), ) # Reshape the output tensor. return output.view(batch_size, head_num, head_size) def paged_attention_mla( query: torch.Tensor, kv_cache: KVCache, kv_head_mapping: torch.Tensor, softmax_scale: float, seqlen: Seqlen, *, kv_scales: KVScales, softcap: Optional[float] = None, hpu_attention_meta: HPUPagedAttentionMetadata, kv_lora_rank: int = 0, ): batch_size, head_num, head_size = query.shape fp8_kv = kv_cache.dtype == torch.float8_e4m3fn output = ops.flat_pa_mla( query=query, key_cache=kv_cache.key, value_cache=None, block_list=hpu_attention_meta.block_list, block_mapping=hpu_attention_meta.block_mapping, block_bias=hpu_attention_meta.attn_bias, block_groups=hpu_attention_meta.block_groups, block_size=BLOCK_SIZE, scale=softmax_scale, matmul_qk_op=FP8Matmul(kv_scales.key_scale) if fp8_kv else Matmul(), matmul_av_op=FP8Matmul(kv_scales.value_scale) if fp8_kv else Matmul(), batch2block_matmul_op=Matmul(), block2batch_matmul_op=Matmul(), keys_fetch_func=FetchFromCache(1.0 / kv_scales.key_scale_cpu), values_fetch_func=None, kv_lora_rank=kv_lora_rank, ) # Reshape the output tensor. return output.view(batch_size, head_num, -1) __all__ = [ "SUPPORTS_WINDOWING", "attention", "paged_attention", "paged_attention_mla", "set_block_mapping", ]
text-generation-inference/backends/gaudi/server/text_generation_server/layers/attention/hpu.py/0
{ "file_path": "text-generation-inference/backends/gaudi/server/text_generation_server/layers/attention/hpu.py", "repo_id": "text-generation-inference", "token_count": 3794 }
285
import torch from torch import nn from accelerate import init_empty_weights # Monkey patching @classmethod def load_layer_norm(cls, prefix, weights, eps): weight = weights.get_tensor(f"{prefix}.weight") bias = weights.get_tensor(f"{prefix}.bias") with init_empty_weights(): ln = cls(weight.shape, eps=eps) ln.weight = torch.nn.Parameter(weight) ln.bias = torch.nn.Parameter(bias) return ln @classmethod def load_layer_norm_no_bias(cls, prefix, weights, eps): weight = weights.get_tensor(f"{prefix}.weight") with init_empty_weights(): ln = cls(weight.shape, eps=eps) ln.weight = torch.nn.Parameter(weight) ln.bias = None return ln torch.nn.LayerNorm.load = load_layer_norm torch.nn.LayerNorm.load_no_bias = load_layer_norm_no_bias class FastLayerNorm(nn.LayerNorm): def forward(self, hidden_states, residual=None): if residual is not None: hidden_states += residual residual = hidden_states return super().forward(hidden_states), residual class FastRMSNorm(nn.Module): def __init__(self, weight: torch.Tensor, eps: float): super().__init__() self.weight = nn.Parameter(weight) self.variance_epsilon = eps @classmethod def load(cls, prefix, weights, eps=1e-6): weight = weights.get_tensor(f"{prefix}.weight") return cls(weight, eps) def forward(self, hidden_states, residual=None): if residual is not None: hidden_states += residual residual = hidden_states hidden_states = hidden_states.to(torch.float32) variance = hidden_states.pow(2).mean(-1, keepdim=True) hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon) return self.weight * hidden_states.to(self.weight.dtype), residual
text-generation-inference/backends/gaudi/server/text_generation_server/layers/layernorm.py/0
{ "file_path": "text-generation-inference/backends/gaudi/server/text_generation_server/layers/layernorm.py", "repo_id": "text-generation-inference", "token_count": 746 }
286
# coding=utf-8 # Copyright 2024 Cohere team. All rights reserved. # # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX # and OPT implementations in this library. It has been modified from its # original forms to accommodate minor architectural differences compared # to GPT-NeoX and OPT used by the Meta AI team that trained the model. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch import torch.distributed from torch import nn from transformers.activations import ACT2FN from typing import Optional, List, Tuple from text_generation_server.layers.attention import ( paged_attention, attention, set_block_mapping, Seqlen, HPUPagedAttentionMetadata, ) from text_generation_server.layers.attention.kv_cache import get_kv_scales from text_generation_server.layers import ( TensorParallelRowLinear, TensorParallelColumnLinear, TensorParallelEmbedding, SpeculativeHead, get_linear, ) from text_generation_server.layers.layernorm import ( FastLayerNorm, ) from text_generation_server.layers.rotary import ( PositionRotaryEmbedding, ) from text_generation_server.utils.weights import UnquantizedWeight from habana_frameworks.torch.hpex.kernels import ( RotaryPosEmbeddingMode, apply_rotary_pos_emb, ) import habana_frameworks.torch as htorch class CohereRotary(PositionRotaryEmbedding): def forward( self, query: torch.Tensor, key: torch.Tensor, cos: torch.Tensor, sin: torch.Tensor, ): # Such controlflows may add some overhead. num_tokens = query.shape[0] head_size = query.shape[-1] rope_mode = RotaryPosEmbeddingMode.PAIRWISE sin = torch.repeat_interleave(sin, 2, dim=-1) cos = torch.repeat_interleave(cos, 2, dim=-1) rotary_dim = cos.shape[-1] query_shape = query.shape query = query.view(num_tokens, -1, head_size) query_rot = query[..., :rotary_dim] query_pass = query[..., rotary_dim:] query_rot = apply_rotary_pos_emb(query_rot, cos, sin, None, 0, rope_mode) query.copy_(torch.cat((query_rot, query_pass), dim=-1).reshape(query_shape)) key_shape = key.shape key = key.view(num_tokens, -1, head_size) key_rot = key[..., :rotary_dim] key_pass = key[..., rotary_dim:] key_rot = apply_rotary_pos_emb(key_rot, cos, sin, None, 0, rope_mode) key.copy_(torch.cat((key_rot, key_pass), dim=-1).reshape(key_shape)) class CohereLayerNorm(nn.Module): def __init__(self, prefix, weights, eps): super().__init__() weight = weights.get_sharded(f"{prefix}.weight", dim=0) self.weight = nn.Parameter(weight) # Fake weights self.ones = weight.new_ones(weight.shape[1]) self.eps = eps def forward(self, hidden_states): hidden_states = hidden_states.reshape( -1, self.weight.shape[0], self.weight.shape[1] ) input_dtype = hidden_states.dtype hidden_states = hidden_states.to(torch.float32) mean = hidden_states.mean(-1, keepdim=True) hidden_states_minus_mean = hidden_states - mean variance = hidden_states_minus_mean.pow(2).mean(-1, keepdim=True) hidden_states = hidden_states_minus_mean * torch.rsqrt(variance + self.eps) hidden_states = self.weight.to(torch.float32) * hidden_states hidden_states = hidden_states.view(-1, self.weight.shape[1]) return hidden_states.to(input_dtype) def load_attention(config, prefix, weights): if config.num_attention_heads != config.num_key_value_heads: return _load_gqa(config, prefix, weights) else: return TensorParallelColumnLinear.load_multi( config, prefixes=[f"{prefix}.q_proj", f"{prefix}.k_proj", f"{prefix}.v_proj"], dim=0, weights=weights, bias=config.attention_bias, ) def _load_gqa(config, prefix: str, weights): assert config.hidden_size % config.num_attention_heads == 0 assert config.num_attention_heads % weights.process_group.size() == 0 weight = weights.get_multi_weights_col( prefixes=[f"{prefix}.q_proj", f"{prefix}.k_proj", f"{prefix}.v_proj"], dim=0, ) if isinstance(weight, UnquantizedWeight): weight.weight = weight.weight.to(dtype=weights.dtype).to(device=weights.device) head_size = config.hidden_size // config.num_attention_heads num_heads = config.num_attention_heads // weights.process_group.size() num_key_value_heads = config.num_key_value_heads // weights.process_group.size() assert list(weight.weight.shape) == [ (num_heads + 2 * num_key_value_heads) * head_size, config.hidden_size, ], f"{list(weight.weight.shape)} != {[(num_heads + 2 * config.num_key_value_heads) * head_size, config.hidden_size]}" if config.attention_bias: w = [ weights.get_sharded(f"{p}.bias", dim=0) for p in [f"{prefix}.q_proj", f"{prefix}.k_proj", f"{prefix}.v_proj"] ] bias = torch.cat(w, dim=0).to(dtype=weights.dtype).to(device=weights.device) else: bias = None return TensorParallelColumnLinear(get_linear(weight, bias=bias)) class FlashCohereAttention(torch.nn.Module): def __init__( self, prefix: str, config, weights, rotary_emb, ): super().__init__() self.num_heads = config.num_attention_heads self.hidden_size = config.hidden_size self.head_size = self.hidden_size // self.num_heads self.rotary_emb = rotary_emb self.softmax_scale = self.head_size**-0.5 if self.num_heads % weights.process_group.size() != 0: raise ValueError( f"`num_heads` must be divisible by `num_shards` (got `num_heads`: {self.num_heads} " f"and `num_shards`: {weights.process_group.size()}" ) self.num_heads = self.num_heads // weights.process_group.size() self.num_key_value_heads = ( config.num_key_value_heads // weights.process_group.size() ) self.query_key_value = load_attention(config, prefix, weights) self.kv_scales = get_kv_scales(weights, f"{prefix}") self.use_qk_norm = config.use_qk_norm if self.use_qk_norm: self.q_norm = CohereLayerNorm( prefix=f"{prefix}.q_norm", weights=weights, eps=config.layer_norm_eps, ) self.k_norm = CohereLayerNorm( prefix=f"{prefix}.k_norm", weights=weights, eps=config.layer_norm_eps, ) else: self.q_norm = None self.k_norm = None self.o_proj = TensorParallelRowLinear.load( config, prefix=f"{prefix}.o_proj", weights=weights, bias=config.attention_bias, ) self.num_groups = self.num_heads // self.num_key_value_heads self.kv_head_mapping = torch.arange( 0, self.num_key_value_heads, dtype=torch.int32, device=weights.device ).repeat_interleave(self.num_groups) def forward( self, hidden_states, cos, sin, cu_seqlen_prefill, kv_cache, slots, seqlen, hpu_attention_meta, ): qkv = self.query_key_value(hidden_states) query, key, value = qkv.split( [ self.head_size * self.num_heads, self.head_size * self.num_key_value_heads, self.head_size * self.num_key_value_heads, ], dim=1, ) if self.use_qk_norm: query = query.reshape(-1, self.head_size) key = key.reshape(-1, self.head_size) query = self.q_norm(query.contiguous()) key = self.k_norm(key.contiguous()) query = query.view(-1, self.num_heads, self.head_size) key = key.view(-1, self.num_key_value_heads, self.head_size) value = value.view(-1, self.num_key_value_heads, self.head_size) self.rotary_emb(query, key, cos, sin) kv_cache.store( key=key, value=value, slots=slots, kv_scales=self.kv_scales, ) # Prefill if cu_seqlen_prefill is not None: # sdpa attn_output = attention( query=query, key=key, value=value, kv_cache=kv_cache, kv_scales=self.kv_scales, seqlen=seqlen, softmax_scale=self.softmax_scale, ) # Decode else: attn_output = paged_attention( query, kv_cache, self.kv_head_mapping, self.softmax_scale, seqlen, kv_scales=self.kv_scales, hpu_attention_meta=hpu_attention_meta, ) return self.o_proj( attn_output.view(-1, self.num_heads * self.head_size), reduce=False ) class CohereMLP(nn.Module): def __init__(self, prefix, config, weights): super().__init__() act = config.hidden_act self.act = ( ACT2FN[act] if "gelu" not in act else lambda x: torch.nn.functional.gelu( x, approximate=( "tanh" if act in ["gelu_fast", "gelu_pytorch_tanh"] else "none" ), ) ) # Fuse gate and up proj self.gate_up_proj = TensorParallelColumnLinear.load_multi( config, prefixes=[f"{prefix}.gate_proj", f"{prefix}.up_proj"], weights=weights, dim=0, bias=False, ) self.down_proj = TensorParallelRowLinear.load( config, prefix=f"{prefix}.down_proj", weights=weights, bias=False, ) self.intermediate_size = ( config.intermediate_size // weights.process_group.size() ) def forward(self, hidden_states): gate_up_states = self.gate_up_proj(hidden_states) gate_up_states = gate_up_states.view(-1, 2, self.intermediate_size) return self.down_proj( self.act(gate_up_states[:, 0]) * gate_up_states[:, 1], reduce=False ) class FlashCohereLayer(nn.Module): def __init__(self, prefix: str, layer_id, config, weights, rotary_emb): super().__init__() prefix = f"{prefix}.layers.{layer_id}" self.self_attn = FlashCohereAttention( prefix=f"{prefix}.self_attn", config=config, weights=weights, rotary_emb=rotary_emb, ) self.mlp = CohereMLP(prefix=f"{prefix}.mlp", config=config, weights=weights) self.input_layernorm = FastLayerNorm.load_no_bias( prefix=f"{prefix}.input_layernorm", weights=weights, eps=config.layer_norm_eps, ) self.process_group = weights.process_group def forward( self, hidden_states, residual, cos, sin, cu_seqlen_prefill, kv_cache, slots, seqlen, hpu_attention_meta, ): normed_hidden_states, res = self.input_layernorm(hidden_states, residual) # Self Attention attn_output = self.self_attn( normed_hidden_states, cos, sin, cu_seqlen_prefill, kv_cache, slots, seqlen, hpu_attention_meta, ) mlp_output = self.mlp(normed_hidden_states) output = attn_output + mlp_output if self.process_group.size() > 1: torch.distributed.all_reduce(output, group=self.process_group) return output, res class FlashCohereModel(torch.nn.Module): def __init__(self, prefix: str, config, weights): super().__init__() process_group = weights.process_group self.tp_rank = process_group.rank() self.tp_world_size = process_group.size() self.embed_tokens = TensorParallelEmbedding( prefix=f"{prefix}.embed_tokens", weights=weights ) rotary_emb = CohereRotary.static( config=config, dim=config.hidden_size // config.num_attention_heads, base=config.rope_theta, device=weights.device, ) self.layers = nn.ModuleList( [ FlashCohereLayer( prefix, layer_id, config, weights, rotary_emb, ) for layer_id in range(config.num_hidden_layers) ] ) self.norm = FastLayerNorm.load_no_bias( prefix=f"{prefix}.norm", weights=weights, eps=config.layer_norm_eps ) self.gradient_checkpointing = False self.head_size = self.layers[0].self_attn.head_size self.num_heads = self.layers[0].self_attn.num_heads self.num_key_value_heads = self.layers[0].self_attn.num_key_value_heads def forward( self, input_ids: torch.Tensor, position_ids: torch.Tensor, cu_seqlen_prefill: Optional[torch.Tensor], kv_cache: List[Tuple[torch.Tensor, torch.Tensor]], slots: torch.Tensor, seqlen: torch.Tensor, hpu_attention_meta: Optional[HPUPagedAttentionMetadata], ) -> torch.Tensor: if hpu_attention_meta is not None: hpu_attention_meta = set_block_mapping( hpu_attention_meta, input_ids.shape[0] ) hidden_states = self.embed_tokens(input_ids) # Get rotary cos and sin for this forward # Avoid to index in each layer cos, sin = self.layers[0].self_attn.rotary_emb.get_cos_sin(position_ids) residual = None lazy_mode = htorch.utils.internal.is_lazy() if lazy_mode: htorch.core.mark_step() for i, layer in enumerate(self.layers): hidden_states, residual = layer( hidden_states, residual, cos, sin, cu_seqlen_prefill, kv_cache[i], slots, seqlen, hpu_attention_meta, ) if lazy_mode: htorch.core.mark_step() hidden_states, _ = self.norm(hidden_states, residual) return hidden_states class FlashCohereForCausalLM(torch.nn.Module): def __init__(self, prefix: str, config, weights): super().__init__() if not prefix: prefix = "model" else: prefix = f"{prefix}.model" self.model = FlashCohereModel(prefix, config, weights) try: self.lm_head = SpeculativeHead.load( config, prefix="lm_head", weights=weights, ) except RuntimeError: self.lm_head = SpeculativeHead.load( config, prefix=f"{prefix}.embed_tokens", weights=weights, ) self.logit_scale = config.logit_scale def forward( self, input_ids: torch.Tensor, position_ids: torch.Tensor, cu_seqlen_prefill: Optional[torch.Tensor], kv_cache: List[Tuple[torch.Tensor, torch.Tensor]], slots: torch.Tensor, seqlen: Seqlen, hpu_attention_meta: Optional[HPUPagedAttentionMetadata], lm_head_indices: Optional[torch.Tensor] = None, adapter_data: Optional[torch.Tensor] = None, ) -> Tuple[torch.Tensor, Optional[torch.Tensor]]: hidden_states = self.model( input_ids, position_ids, cu_seqlen_prefill, kv_cache, slots, seqlen, hpu_attention_meta, ) if lm_head_indices is not None: hidden_states = hidden_states[lm_head_indices] logits, speculative_logits = self.lm_head(hidden_states) logits *= self.logit_scale if speculative_logits is not None: speculative_logits *= self.logit_scale return logits, speculative_logits
text-generation-inference/backends/gaudi/server/text_generation_server/models/custom_modeling/flash_cohere_modeling.py/0
{ "file_path": "text-generation-inference/backends/gaudi/server/text_generation_server/models/custom_modeling/flash_cohere_modeling.py", "repo_id": "text-generation-inference", "token_count": 8402 }
287
# coding=utf-8 # Copyright 2024 HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch import torch.distributed from torch import nn from typing import Optional, List, Tuple from text_generation_server.layers.tensor_parallel import TensorParallelColumnLinear from text_generation_server.layers.attention import Seqlen, HPUPagedAttentionMetadata from text_generation_server.models.custom_modeling.vlm import ( load_text_model, load_vision_model, ) class PaliGemmaForConditionalGeneration(nn.Module): def __init__(self, prefix, config, weights): super().__init__() config.vision_config.quantize = config.quantize self.vision_tower = load_vision_model( prefix="vision_tower" if not prefix else f"{prefix}.vision_tower", config=config.vision_config, weights=weights, ) self.post_vision_tower_layernorm = nn.LayerNorm.load( prefix="vision_tower.vision_model.post_layernorm", weights=weights, eps=config.vision_config.layer_norm_eps, ) self.multi_modal_projector = TensorParallelColumnLinear.load( config, prefix="multi_modal_projector.linear", weights=weights, bias=True, ) self.vocab_size = config.text_config.vocab_size self.config = config text_config = config.text_config text_config.speculator = config.speculator text_config.quantize = config.quantize self.text_model = load_text_model( prefix="language_model" if not prefix else f"{prefix}.language_model", config=config.text_config, weights=weights, ) self.pad_token_id = ( config.pad_token_id if config.pad_token_id is not None else -1 ) self.dtype = weights.dtype def get_vision_embeds( self, pixel_values: torch.FloatTensor, pixel_attention_mask: Optional[torch.FloatTensor] = None, image_sizes: Optional[torch.Tensor] = None, image_grid_thw: Optional[torch.LongTensor] = None, ): pixel_values = pixel_values.to(dtype=self.dtype) image_outputs = self.vision_tower(pixel_values) last_hidden_state = self.post_vision_tower_layernorm( image_outputs.last_hidden_state ) image_features = self.multi_modal_projector(last_hidden_state) image_features = image_features.view(-1, image_features.shape[-1]) return image_features def get_inputs_embeds( self, input_ids: torch.Tensor, vision_embeds: torch.Tensor = None, ): inputs_embeds = self.text_model.embed_tokens(input_ids) if vision_embeds is not None: mask = input_ids == self.config.image_token_index inputs_embeds[mask] = vision_embeds return inputs_embeds def forward( self, inputs_embeds: torch.Tensor, position_ids: torch.Tensor, cu_seqlen_prefill: Optional[torch.Tensor], kv_cache: List[Tuple[torch.Tensor, torch.Tensor]], slots: torch.Tensor, seqlen: Seqlen, hpu_attention_meta: Optional[HPUPagedAttentionMetadata], lm_head_indices: Optional[torch.Tensor] = None, attention_mask: Optional[torch.BoolTensor] = None, adapter_data: Optional[torch.Tensor] = None, ) -> Tuple[torch.Tensor, Optional[torch.Tensor]]: # TODO This is odd but apparently pali gemma position ids start at 1. if cu_seqlen_prefill is not None: position_ids += 1 hidden_states = self.text_model.model( inputs_embeds=inputs_embeds, position_ids=position_ids, cu_seqlen_prefill=cu_seqlen_prefill, kv_cache=kv_cache, slots=slots, seqlen=seqlen, hpu_attention_meta=hpu_attention_meta, adapter_data=adapter_data, ) if lm_head_indices is not None: hidden_states = hidden_states[lm_head_indices] logits, speculative_logits = self.text_model.lm_head(hidden_states) return logits, speculative_logits
text-generation-inference/backends/gaudi/server/text_generation_server/models/custom_modeling/flash_pali_gemma_modeling.py/0
{ "file_path": "text-generation-inference/backends/gaudi/server/text_generation_server/models/custom_modeling/flash_pali_gemma_modeling.py", "repo_id": "text-generation-inference", "token_count": 2030 }
288
import math import os import time import torch import torch.distributed import numpy as np from loguru import logger from dataclasses import dataclass from opentelemetry import trace from transformers import ( PreTrainedTokenizerBase, AutoConfig, AutoTokenizer, GenerationConfig, ) from typing import ( Any, Iterable, Optional, Tuple, List, Type, Dict, Union, ) import torch.nn.functional as F from text_generation_server.adapters import AdapterBatchData, AdapterBatchMetadata from text_generation_server.utils.chunks import concat_text_chunks from text_generation_server.models import Model from text_generation_server.utils.log import log_master from text_generation_server.utils.tokens import batch_top_tokens from text_generation_server.utils.speculate import get_speculate from text_generation_server.utils import ( initialize_torch_distributed, weight_files, Weights, pad_next_token_chooser_parameters, ) from text_generation_server.models.types import ( Batch, Tokens, Generation, GeneratedText, ) from text_generation_server.pb import generate_pb2 from text_generation_server.models.globals import ( BLOCK_SIZE, REQUEST_LOGPROBS, TGI_WIGGLE_ROOM, get_adapter_to_index, ) from text_generation_server.layers.attention import ( KVCache, KVCompressCache, Seqlen, HPUPagedAttentionMetadata, trim_attn_metadata, trim_seqlen_metadata, _async_h2d_tensor_copy, ) from text_generation_server.utils import StoppingCriteria, HeterogeneousNextTokenChooser from text_generation_server.utils.dist import MEMORY_FRACTION from text_generation_server.utils.quantization import get_loader from text_generation_server.utils.segments import SegmentConcatBuilder, find_segments from text_generation_server.utils.import_utils import ( empty_cache, synchronize, get_free_memory, ) from text_generation_server.utils.prefill_chunking import ( get_max_prefill_tokens, ) import vllm_hpu_extension.environment as environment import habana_frameworks.torch as htorch import itertools from vllm_hpu_extension.bucketing.common import get_bucketing_context from vllm_hpu_extension.profiler import HabanaMemoryProfiler, format_bytes tracer = trace.get_tracer(__name__) def generate_block_metadata( dtype, use_contiguous_pa, slots, block_tables, bucketing_ctx, slots_in_window=None, block_bucket_size=None, ): # Prepare values if we need to continue decoding # need for HPUPagedAttentionMetadata preparation def flatten(in_list): return list(itertools.chain(*in_list)) def gather_list(input, indices, v): return [input[i] if i is not None else v for i in indices] def pad_list(input, k, v): input_len = len(input) target_len = (input_len + k - 1) // k * k padding = target_len - input_len return input + [v] * padding last_block_usage = [slot % BLOCK_SIZE + 1 for slot in slots] block_groups = [[i] * len(bt) for i, bt in enumerate(block_tables)] block_usage = [ [BLOCK_SIZE] * (len(bt) - 1) + [lbu] for bt, lbu in zip(block_tables, last_block_usage) if bt ] block_list = flatten(block_tables) block_groups = flatten(block_groups) block_usage = flatten(block_usage) assert len(block_list) == len(block_groups) assert len(block_list) == len(block_usage) if use_contiguous_pa: if block_bucket_size is None: block_bucket_size = max(max(block_list) + 1, len(block_list)) if bucketing_ctx is not None: block_bucket_size = bucketing_ctx.get_padded_decode_num_blocks( block_bucket_size ) indices: List[Any] indices = [None] * block_bucket_size for i, bid in enumerate(block_list): indices[bid] = i block_list = gather_list(block_list, indices, 0) block_groups = gather_list(block_groups, indices, -1) block_usage = gather_list(block_usage, indices, 1) else: if block_bucket_size is None: block_bucket_size = len(block_list) if bucketing_ctx is not None: block_bucket_size = bucketing_ctx.get_padded_decode_num_blocks( block_bucket_size ) block_list = pad_list(block_list, block_bucket_size, 0) block_groups = pad_list(block_groups, block_bucket_size, -1) block_usage = pad_list(block_usage, block_bucket_size, 1) slots_in_window_mask = None if slots_in_window is not None: slot_list = [ block_id * BLOCK_SIZE + slot_idx for block_id in block_list for slot_idx in range(BLOCK_SIZE) ] slot_list = torch.tensor(slot_list, dtype=torch.int64) slot_list = slot_list.view(-1, BLOCK_SIZE) slots_in_window_mask = torch.isin(slot_list, slots_in_window) for i in range(slots_in_window_mask.shape[0]): if not slots_in_window_mask[i].any(): slots_in_window_mask[i, 0] = True block_list = torch.tensor(block_list, dtype=torch.int, device="cpu") block_groups = torch.tensor(block_groups, dtype=torch.int, device="cpu") block_usage = torch.tensor(block_usage, dtype=dtype, device="cpu") return ( block_list, block_groups, block_usage, slots_in_window_mask, block_bucket_size, ) @dataclass class FlashCausalLMBatch(Batch): batch_id: int requests: List[generate_pb2.Request] # request id -> idx in list mapping requests_idx_mapping: Dict[int, int] # Decoder values # Can be a list for easy filtering # If `input_ids` is a list, it needs to be materialized to a tensor first input_ids: Union[torch.Tensor, List[List[int]]] # Will be set by `generate_token` and reset after each prefill forward before staying set in decode position_ids: Optional[torch.Tensor] speculative_ids: Optional[torch.Tensor] # Set when creating the batch # tensor of indices of the currently used slots, length = \sum_{i=0}^{b} s_i in prefill, length = b in decode # Will be set by `generate_token` and reset after each prefill forward before staying set in decode slot_indices: Optional[torch.Tensor] # list of length b of list of length s_i // block_size block_tables: List[List[int]] # tensor of size [b, max_total_seqlen // block_size] holding the paged attention block tables for all sequences block_tables_tensor: torch.Tensor # tensor of length \sum_{i=0}^{b} max_s_i holding the paged attention slots for all sequences slots: torch.Tensor # list of length b + 1 containing the cumulative sequence slot lengths of the sequences in the batch # used for filtering cu_slots: torch.Tensor max_input_length: int max_current_length: int # Whether this batch contains at least one request that is prefilling prefilling: bool # Whether each request is prefilling prefilling_mask: List[bool] # Prefill metadata tensors to efficiently compute logprobs # tensor of length b + 1 containing the cumulative sequence lengths of the sequences in the batch, only used in prefill cu_seqlen_prefill: Optional[torch.Tensor] # Prefill cache indices is used to slice into the kv tensor before caching it into the paged attention buffers # as we only keep SLIDING_WINDOW values instead of the whole tensor prefill_cache_indices: Optional[torch.Tensor] # Will be set by `generate_token` and reset after each prefill forward prefill_head_indices: Optional[torch.Tensor] # Will be set by `generate_token` and reset after each prefill forward prefill_next_token_indices: Optional[torch.tensor] # Will be set by `generate_token` and reset after each prefill forward prefill_cu_outlens: Optional[List[int]] # Will be set by `generate_token` and reset after each prefill forward prefill_logprob_tokens: List[Optional[Tokens]] # All tokens all_input_ids: List[List[int]] all_input_ids_tensor: torch.Tensor # Lengths of all generations present in the batch input_lengths: List[int] # size [b], containing the number of blocks that can be retrieved from the cache cache_lengths: List[int] prompt_lengths: List[int] # Will be set by `generate_token` and reset after each prefill forward before staying set in decode input_lengths_tensor: Optional[torch.Tensor] cache_lengths_tensor: Optional[torch.Tensor] prompt_lengths_tensor: torch.Tensor prefix_offsets: List[Optional[int]] read_offsets: List[Optional[int]] # Generation helpers next_token_chooser: HeterogeneousNextTokenChooser stopping_criterias: List[StoppingCriteria] top_n_tokens: List[int] top_n_tokens_tensor: torch.Tensor # Adapter metadata for each request # Will be set by `generate_token` and reset after each prefill forward before staying set in decode adapter_meta: Optional[AdapterBatchMetadata] # Number of blocks in this batch num_blocks: int # Maximum number of blocks max_blocks: int hpu_attn_meta: Optional[HPUPagedAttentionMetadata] next_token_logits: Optional[torch.Tensor] speculative_logits: Optional[torch.Tensor] valid_indices: Optional[List[int]] def to_pb(self) -> generate_pb2.CachedBatch: return generate_pb2.CachedBatch( id=self.batch_id, request_ids=[r.id for r in self.requests], size=len(self), max_tokens=self.num_blocks * BLOCK_SIZE, current_tokens=( sum([len(i) for i in self.input_ids]) if isinstance(self.input_ids, list) else len(self.input_ids) ), ) @classmethod def batch_tokenized_inputs( cls, requests: Iterable[generate_pb2.Request], tokenizer ): max_length = 0 all_input_ids = [] batch_size = 0 for r in requests: batch_size += 1 inputs = concat_text_chunks(r.input_chunks.chunks) input_ids = tokenizer( inputs, truncation=True, max_length=r.truncate, add_special_tokens=r.add_special_tokens, )["input_ids"] max_length = max(max_length, len(input_ids)) all_input_ids.append(input_ids) return all_input_ids @classmethod def from_tokenized( cls, pb: generate_pb2.Batch, tokenizer: PreTrainedTokenizerBase, batch_tokenized_inputs, dtype: torch.dtype, device: torch.device, ) -> "FlashCausalLMBatch": cache_lengths = [] input_lengths = [] prompt_lengths = [] prefix_offsets = [] read_offsets = [] all_input_ids = [] all_postfix_ids = [] requests_idx_mapping = {} slots = [] cu_slots = [0] next_token_chooser_parameters = [] stopping_criterias = [] top_n_tokens = [] num_blocks = 0 max_input_length = 0 max_current_length = 0 max_length = 0 max_blocks = 0 cu_blocks = [0] block_tables = [] block_tables_ragged = [] # Parse batch for i, (r, tokenized_input) in enumerate( zip(pb.requests, batch_tokenized_inputs) ): ### XXX: This consumes so much memory on long requests ### Deactivating it by default seems like the best course. if not REQUEST_LOGPROBS: r.prefill_logprobs = False else: assert False, "prefill_logprobs not supported yet" # request id -> idx in list mapping requests_idx_mapping[r.id] = i prompt_length = len(tokenized_input) prompt_lengths.append(prompt_length) cache_length = r.cache_len assert ( cache_length <= prompt_length ), f"Prefix {cache_length} vs input {prompt_length}" if cache_length == prompt_length: assert False, "unreachable" # `chunk_len` is an optional field in the protobuf # It is only set if the model support chunking # Use all the remaining ids postfix_ids = tokenized_input[cache_length:] input_length = len(postfix_ids) input_lengths.append(input_length) prefix_offsets.append(prompt_length - 5) read_offsets.append(prompt_length) all_postfix_ids.append(postfix_ids) all_input_ids.append(tokenized_input) next_token_chooser_parameters.append(r.parameters) stopping_criteria = StoppingCriteria.from_pb( r.stopping_parameters, tokenizer ) max_new_tokens = stopping_criteria.max_new_tokens stopping_criterias.append(stopping_criteria) top_n_tokens.append(r.top_n_tokens) # Paged attention # Remove one as the first token des not have a past speculative_length = get_speculate() speculative_length = 0 if speculative_length is None else speculative_length # Tokens that need to be mapped to blocks. block_tokens = prompt_length + max_new_tokens - 1 + speculative_length # blocks and slots can be empty (for example in warmup) if not r.blocks: needed_blocks = math.ceil(block_tokens / BLOCK_SIZE) request_blocks = [ b for b in range(num_blocks, num_blocks + needed_blocks) ] request_slots = [ s for b in request_blocks for s in range(b * BLOCK_SIZE, (b + 1) * BLOCK_SIZE) ] else: request_blocks = r.blocks request_slots = r.slots block_tables.append(request_blocks) block_tables_ragged.extend(request_blocks) cu_blocks.append(len(block_tables_ragged)) slots.extend(request_slots) cu_slots.append(len(slots)) cache_lengths.append(cache_length) num_blocks += len(request_blocks) # Update max_blocks = max(max_blocks, len(request_blocks)) max_input_length = max(max_input_length, input_length) max_current_length = max(max_current_length, cache_length + input_length) max_length = max( max_length, prompt_length + max_new_tokens + speculative_length, ) next_token_chooser = HeterogeneousNextTokenChooser.from_pb( next_token_chooser_parameters, dtype, device, tokenizer ) # Padded all_input_ids_tensor all_input_ids_tensor = np.zeros( (len(all_input_ids), max_length), dtype=np.int64 ) for i, input_ids in enumerate(all_input_ids): all_input_ids_tensor[i, : len(input_ids)] = input_ids # put on cpu temporarily, move to hpu in prepare_for_prefill all_input_ids_tensor = torch.tensor(all_input_ids_tensor, dtype=torch.int64) top_n_tokens_tensor = torch.tensor(top_n_tokens, dtype=torch.int64) block_tables_ragged = torch.tensor(block_tables_ragged, dtype=torch.int32) cu_blocks = torch.tensor(cu_blocks, dtype=torch.int64) block_tables_tensor = torch.empty( (len(block_tables), max_blocks), dtype=torch.int32, ) for i, request_blocks in enumerate(block_tables): block_tables_tensor[i, : len(request_blocks)] = torch.tensor(request_blocks) prompt_lengths_tensor = torch.tensor(prompt_lengths, dtype=torch.int32) slots = torch.tensor(slots, dtype=torch.int64) cu_slots = torch.tensor(cu_slots, dtype=torch.int64) return cls( batch_id=pb.id, requests=pb.requests, requests_idx_mapping=requests_idx_mapping, input_ids=all_postfix_ids, block_tables=block_tables, block_tables_tensor=block_tables_tensor, cache_lengths=cache_lengths, max_input_length=max_input_length, max_current_length=max_current_length, prefilling=True, prefilling_mask=[True] * len(pb.requests), prefill_logprob_tokens=[None] * len(pb.requests), input_lengths=input_lengths, prompt_lengths=prompt_lengths, prefix_offsets=prefix_offsets, read_offsets=read_offsets, all_input_ids=all_input_ids, all_input_ids_tensor=all_input_ids_tensor, next_token_chooser=next_token_chooser, stopping_criterias=stopping_criterias, top_n_tokens=top_n_tokens, top_n_tokens_tensor=top_n_tokens_tensor, num_blocks=num_blocks, max_blocks=max_blocks, speculative_ids=None, prompt_lengths_tensor=prompt_lengths_tensor, # These values will be set by `FlashCausalLMBatch.prepare_for_prefill` position_ids=None, cu_seqlen_prefill=None, prefill_cache_indices=None, slot_indices=None, slots=slots, cu_slots=cu_slots, prefill_head_indices=None, prefill_next_token_indices=None, prefill_cu_outlens=None, cache_lengths_tensor=None, input_lengths_tensor=None, adapter_meta=None, hpu_attn_meta=None, next_token_logits=None, speculative_logits=None, valid_indices=None, ) @classmethod def from_pb( cls, pb: generate_pb2.Batch, tokenizer: PreTrainedTokenizerBase, dtype: torch.dtype, device: torch.device, ) -> "FlashCausalLMBatch": assert len(pb.requests) > 0 batch_tokenized_inputs = cls.batch_tokenized_inputs(pb.requests, tokenizer) return cls.from_tokenized(pb, tokenizer, batch_tokenized_inputs, dtype, device) @tracer.start_as_current_span("filter") def filter(self, request_ids: List[int]) -> "FlashCausalLMBatch": if len(request_ids) == 0: raise ValueError("Batch must have at least one request") # We assume that if len(requests) == len(self) then the requests are the same if len(request_ids) == len(self): return self device = self.block_tables_tensor.device # New values after filtering requests_idx_mapping = {} # Used to index into tensors indices = [] # slots to keep after filtering slot_filtering_indices = torch.zeros(self.slots.shape[0], dtype=torch.bool) # Create on CPU to only move to GPU once instead of at every copy slot_indices = torch.empty(len(request_ids), dtype=torch.int64) max_input_length = 0 max_current_length = 0 requests = [] block_tables = [] all_input_ids = [] input_ids = [] prompt_lengths = [] input_lengths = [] cache_lengths = [] prefix_offsets = [] read_offsets = [] cu_slots = [0] prefilling_mask = [] prefill_logprob_tokens = [] stopping_criterias = [] adapter_set = set() num_blocks = 0 max_blocks = 0 max_slots = 0 cumulative_slot_tokens = 0 for i, request_id in enumerate(request_ids): idx = self.requests_idx_mapping[request_id] indices.append(idx) requests_idx_mapping[request_id] = i requests.append(self.requests[idx]) # Prefilling request_prefilling = self.prefilling_mask[idx] prefilling_mask.append(request_prefilling) # Get length request_input_length = self.input_lengths[idx] request_cache_length = self.cache_lengths[idx] max_input_length = max(max_input_length, request_input_length) max_current_length = max( max_current_length, request_cache_length + request_input_length ) all_input_ids.append(self.all_input_ids[idx]) prompt_lengths.append(self.prompt_lengths[idx]) input_lengths.append(request_input_length) cache_lengths.append(request_cache_length) prefix_offsets.append(self.prefix_offsets[idx]) read_offsets.append(self.read_offsets[idx]) stopping_criteria = self.stopping_criterias[idx] stopping_criterias.append(stopping_criteria) prefill_logprob_tokens.append(self.prefill_logprob_tokens[idx]) ADAPTER_TO_INDEX = get_adapter_to_index() adapter_index = ADAPTER_TO_INDEX.get(self.requests[idx].adapter_id, 0) adapter_set.add(adapter_index) request_block_table = self.block_tables[idx] num_blocks += len(request_block_table) block_tables.append(request_block_table) start_slot = self.cu_slots[idx] end_slot = self.cu_slots[idx + 1] slot_length = end_slot - start_slot # Set slice slot_filtering_indices[start_slot:end_slot] = True cu_slots.append(cumulative_slot_tokens + slot_length) # Input ids if the request was part of a prefilling batch # If the batch was decoding we can index into the tensor directly later if self.prefilling: input_ids.append(self.input_ids[idx]) else: # Copy to tensor (CPU) slot_indices[i] = cumulative_slot_tokens + request_cache_length cumulative_slot_tokens += slot_length max_blocks = max(max_blocks, len(request_block_table)) max_slots = max(max_slots, slot_length) block_tables_tensor = self.block_tables_tensor[indices] prompt_lengths_tensor = self.prompt_lengths_tensor[indices] cu_slots = torch.tensor(cu_slots, dtype=torch.int64) slots = self.slots[slot_filtering_indices] if self.prefilling: # These values will be set by `FlashCausalLMBatch.prepare_for_prefill` position_ids = None slot_indices = None cache_lengths_tensor = None input_lengths_tensor = None adapter_meta = None else: # Index into tensors input_ids = self.input_ids[indices] position_ids = self.position_ids[indices] input_lengths_tensor = self.input_lengths_tensor[indices] cache_lengths_tensor = self.cache_lengths_tensor[indices] # Move to GPU now that we have the whole tensor slot_indices = slot_indices.to(device) if self.adapter_meta is not None: adapter_indices = self.adapter_meta.adapter_indices[indices] adapter_segments, adapter_segment_indices = find_segments( adapter_indices ) adapter_segments = torch.tensor(adapter_segments, dtype=torch.int32) adapter_meta = AdapterBatchMetadata( adapter_indices=adapter_indices, adapter_set=adapter_set, adapter_segments=adapter_segments, segment_indices=adapter_segment_indices, ) else: adapter_meta = None htorch.core.mark_step() return type(self)( batch_id=self.batch_id, requests=requests, requests_idx_mapping=requests_idx_mapping, input_ids=input_ids, position_ids=position_ids, cu_seqlen_prefill=None, prefill_cache_indices=None, slot_indices=slot_indices, block_tables=block_tables, block_tables_tensor=block_tables_tensor, slots=slots, cu_slots=cu_slots, max_input_length=max_input_length, max_current_length=max_current_length, prefilling=self.prefilling, prefilling_mask=prefilling_mask, prefill_head_indices=None, prefill_next_token_indices=None, prefill_cu_outlens=None, prefill_logprob_tokens=prefill_logprob_tokens, prompt_lengths=prompt_lengths, prompt_lengths_tensor=prompt_lengths_tensor, input_lengths=input_lengths, input_lengths_tensor=input_lengths_tensor, cache_lengths=cache_lengths, cache_lengths_tensor=cache_lengths_tensor, prefix_offsets=prefix_offsets, read_offsets=read_offsets, all_input_ids=all_input_ids, all_input_ids_tensor=self.all_input_ids_tensor, next_token_chooser=self.next_token_chooser, stopping_criterias=stopping_criterias, top_n_tokens=self.top_n_tokens, top_n_tokens_tensor=self.top_n_tokens_tensor, num_blocks=num_blocks, max_blocks=max_blocks, speculative_ids=self.speculative_ids, adapter_meta=adapter_meta, hpu_attn_meta=None, valid_indices=indices, next_token_logits=self.next_token_logits, speculative_logits=self.speculative_logits, ) @classmethod @tracer.start_as_current_span("concatenate") def concatenate( cls, batches: List["FlashCausalLMBatch"], padded_total_bs: int = 0 ) -> "FlashCausalLMBatch": # Batch attributes requests = [] requests_idx_mapping = {} prefilling = False num_blocks = 0 total_batch_size = 0 total_slots = 0 max_blocks = 0 max_length = 0 max_input_length = 0 max_current_length = 0 ADAPTER_TO_INDEX = get_adapter_to_index() for b in batches: total_batch_size += len(b) max_blocks = max(max_blocks, b.max_blocks) total_slots += len(b.slots) num_blocks += b.num_blocks speculative_length = ( b.speculative_ids.shape[1] if b.speculative_ids is not None else 0 ) max_input_length = max(max_input_length, b.max_input_length) max_current_length = max(max_current_length, b.max_current_length) max_length = max( max_length, max( prompt_length + stopping_criteria.max_new_tokens + speculative_length for prompt_length, stopping_criteria in zip( b.prompt_lengths, b.stopping_criterias ) ), ) prefilling = prefilling or b.prefilling slots = batches[0].slots.new_empty(total_slots) cu_slots = torch.zeros(total_batch_size + 1, dtype=torch.int64) if prefilling: input_ids = [] # These values will be set by `FlashCausalLMBatch.prepare_for_prefill` position_ids = None slot_indices = None cache_lengths_tensor = None input_lengths_tensor = None adapter_meta = None adapter_segment_builder = None else: if padded_total_bs == batches[0].input_ids.shape[0]: input_ids = batches[0].input_ids else: input_ids = batches[0].input_ids.new_empty(total_batch_size) if ( batches[0].position_ids is not None and batches[0].position_ids.dim() == 2 ): # Qwen2_vl case: position_ids = batches[0].position_ids.new_empty( (total_batch_size, batches[0].position_ids.shape[-1]) ) else: position_ids = batches[0].position_ids.new_empty(total_batch_size) slot_indices = batches[0].slot_indices.new_empty(total_batch_size) input_lengths_tensor = batches[0].input_lengths_tensor.new_empty( total_batch_size ) cache_lengths_tensor = batches[0].cache_lengths_tensor.new_empty( total_batch_size ) if ADAPTER_TO_INDEX: total_indices_size = sum( b.adapter_meta.adapter_indices.shape[0] for b in batches ) adapter_indices = batches[0].adapter_meta.adapter_indices.new_empty( total_indices_size ) adapter_segment_builder = SegmentConcatBuilder() adapter_set = set() prompt_lengths_tensor = batches[0].prompt_lengths_tensor.new_empty( total_batch_size ) block_tables_tensor = batches[0].block_tables_tensor.new_zeros( (total_batch_size, max_blocks) ) all_input_ids_tensor = batches[0].all_input_ids_tensor top_n_tokens_tensor = batches[0].top_n_tokens_tensor.new_zeros( total_batch_size, ) block_tables = [] cache_lengths = [] all_input_ids = [] prompt_lengths = [] input_lengths = [] prefix_offsets = [] read_offsets = [] prefill_logprob_tokens = [] next_token_chooser_parameters = [] fsm_grammar_states = [] stopping_criterias = [] top_n_tokens = [] prefilling_mask = [] # Cumulative length cumulative_batch_size = 0 cumulative_slots = 0 cumulative_adapter_indices_size = 0 for i, batch in enumerate(batches): requests.extend(batch.requests) valid_bsize = len(batch) if i == 0: requests_idx_mapping = batch.requests_idx_mapping else: # We need to offset the mapping for each batch by the cumulative batch size for k, v in batch.requests_idx_mapping.items(): requests_idx_mapping[k] = v + cumulative_batch_size start_index = cumulative_batch_size end_index = cumulative_batch_size + valid_bsize index = torch.tensor(list(range(start_index, end_index)), device="cpu") top_n_tokens_tensor.index_copy_(0, index, batch.top_n_tokens_tensor) if i > 0: all_input_ids_tensor.index_copy_( 0, index.to(batch.all_input_ids_tensor.device), batch.all_input_ids_tensor[:valid_bsize, :], ) block_tables_tensor[ start_index:end_index, : batch.block_tables_tensor.shape[1] ] = batch.block_tables_tensor[:, :max_blocks] prompt_lengths_tensor.index_copy_(0, index, batch.prompt_lengths_tensor) slots_start_index = cumulative_slots slots_end_index = cumulative_slots + len(batch.slots) slot_index = torch.tensor( list(range(slots_start_index, slots_end_index)), device=batch.slots.device, ) slots.index_copy_(0, slot_index, batch.slots) cu_slots[start_index + 1 : end_index + 1] = ( batch.cu_slots[1:] + cumulative_slots ) if not prefilling: if padded_total_bs != batches[0].input_ids.shape[0] or i > 0: input_ids.index_copy_( 0, index.to(input_ids.device), batch.input_ids[:valid_bsize] ) position_ids.index_copy_(0, index, batch.position_ids[:valid_bsize]) slot_indices.index_copy_( 0, index, batch.slot_indices + cumulative_slots ) input_lengths_tensor.index_copy_( 0, index, batch.input_lengths_tensor[:valid_bsize] ) cache_lengths_tensor.index_copy_( 0, index, batch.cache_lengths_tensor[:valid_bsize] ) if ADAPTER_TO_INDEX: adapter_start_index = cumulative_adapter_indices_size adapter_end_index = ( cumulative_adapter_indices_size + batch.adapter_meta.adapter_indices.shape[0] ) adapter_indices[adapter_start_index:adapter_end_index] = ( batch.adapter_meta.adapter_indices ) cumulative_adapter_indices_size = adapter_end_index adapter_set.update(batch.adapter_meta.adapter_set) adapter_segment_builder.concat( batch.adapter_meta.adapter_segments, batch.adapter_meta.segment_indices, ) else: if isinstance(batch.input_ids, torch.Tensor): batch.input_ids = batch.input_ids.view(-1, 1).tolist() input_ids.extend(batch.input_ids) prefilling_mask.extend(batch.prefilling_mask) block_tables.extend(batch.block_tables) cache_lengths.extend(batch.cache_lengths) all_input_ids.extend(batch.all_input_ids) prompt_lengths.extend(batch.prompt_lengths) input_lengths.extend(batch.input_lengths) prefix_offsets.extend(batch.prefix_offsets) read_offsets.extend(batch.read_offsets) prefill_logprob_tokens.extend(batch.prefill_logprob_tokens) next_token_chooser_parameters.extend([r.parameters for r in batch.requests]) fsm_grammar_states.extend(batch.next_token_chooser.fsm_grammar_states) stopping_criterias.extend(batch.stopping_criterias) top_n_tokens.extend(batch.top_n_tokens) # Update cumulative_slots += len(batch.slots) cumulative_batch_size += len(batch) next_token_chooser = HeterogeneousNextTokenChooser.from_pb( next_token_chooser_parameters, dtype=batches[0].next_token_chooser.dtype, device=batches[0].next_token_chooser.device, tokenizer=batches[0].next_token_chooser.tokenizer, fsm_grammar_states=fsm_grammar_states, ) # We skip computing the speculative_ids when the batch size is too large, so # we must check that all batches have them, otherwise they must be discarded if get_speculate() > 0 and all(b.speculative_ids is not None for b in batches): speculative_ids = torch.cat([b.speculative_ids for b in batches], dim=0) else: speculative_ids = None if ADAPTER_TO_INDEX and adapter_segment_builder is not None: adapter_segments, adapter_segment_indices = adapter_segment_builder.build() adapter_meta = AdapterBatchMetadata( adapter_indices=adapter_indices, adapter_set=adapter_set, adapter_segments=adapter_segments, segment_indices=adapter_segment_indices, ) return cls( batch_id=batches[0].batch_id, requests=requests, requests_idx_mapping=requests_idx_mapping, input_ids=input_ids, position_ids=position_ids, cu_seqlen_prefill=None, prefill_cache_indices=None, slot_indices=slot_indices, block_tables=block_tables, block_tables_tensor=block_tables_tensor, cache_lengths=cache_lengths, cache_lengths_tensor=cache_lengths_tensor, slots=slots, cu_slots=cu_slots, max_input_length=max_input_length, max_current_length=max_current_length, prefilling=prefilling, prefilling_mask=prefilling_mask, prefill_head_indices=None, prefill_next_token_indices=None, prefill_cu_outlens=None, prefill_logprob_tokens=prefill_logprob_tokens, prompt_lengths=prompt_lengths, prompt_lengths_tensor=prompt_lengths_tensor, input_lengths=input_lengths, input_lengths_tensor=input_lengths_tensor, prefix_offsets=prefix_offsets, read_offsets=read_offsets, all_input_ids=all_input_ids, all_input_ids_tensor=all_input_ids_tensor, next_token_chooser=next_token_chooser, stopping_criterias=stopping_criterias, top_n_tokens=top_n_tokens, top_n_tokens_tensor=top_n_tokens_tensor, num_blocks=num_blocks, max_blocks=max_blocks, speculative_ids=speculative_ids, adapter_meta=adapter_meta if ADAPTER_TO_INDEX else None, hpu_attn_meta=None, next_token_logits=None, speculative_logits=None, valid_indices=None, ) def prepare_for_decode( self, dtype, use_contiguous_pa, bucketing_ctx, pad_token_id, sliding_window ): block_num = [length // BLOCK_SIZE + 1 for length in self.cache_lengths] block_tables = [] for i, bt in enumerate(self.block_tables): block_tables.append(bt[0 : block_num[i]]) if bucketing_ctx is not None: padded_bs = bucketing_ctx.get_padded_decode_batch_size( self.input_ids.shape[0] ) else: padded_bs = self.input_ids.shape[0] slots = self.slots[self.slot_indices] block_list, block_groups, block_usage, _, block_bucket_size = ( generate_block_metadata( dtype, use_contiguous_pa, slots, block_tables, bucketing_ctx, ) ) meta = HPUPagedAttentionMetadata( block_list=_async_h2d_tensor_copy(block_list), block_groups=_async_h2d_tensor_copy(block_groups), block_usage=_async_h2d_tensor_copy(block_usage), block_mapping=None, attn_bias=None, ) if sliding_window is not None: block_tables_in_window = [] for i, bt in enumerate(self.block_tables): block_num_in_window = ( sliding_window + 2 * BLOCK_SIZE - 2 - slots[i] % BLOCK_SIZE ) // BLOCK_SIZE block_tables_in_window.append( bt[max(0, block_num[i] - block_num_in_window) : block_num[i]] ) slots_in_window = [] for i, indice in enumerate(self.slot_indices): start_idx = indice - self.cache_lengths[i] mask = ( indice - torch.arange( start_idx, indice + 1, device=self.slots.device, ) ) < sliding_window slots_in_window.append(self.slots[start_idx : indice + 1][mask]) slots_in_window = torch.cat(slots_in_window, dim=0) ( block_list_in_window, block_groups_in_window, block_usage_in_window, slots_in_window_mask, _, ) = generate_block_metadata( dtype, use_contiguous_pa, slots, block_tables_in_window, bucketing_ctx, slots_in_window, block_bucket_size, ) meta.block_list_in_window = _async_h2d_tensor_copy(block_list_in_window) meta.block_groups_in_window = _async_h2d_tensor_copy(block_groups_in_window) meta.block_usage_in_window = _async_h2d_tensor_copy(block_usage_in_window) meta.slots_in_window_mask = _async_h2d_tensor_copy(slots_in_window_mask) self.hpu_attn_meta = trim_attn_metadata(meta) self.input_ids = F.pad( self.input_ids, (0, padded_bs - self.input_ids.shape[0]), value=pad_token_id ) if self.position_ids.dim() == 2: # Qwen VL case self.position_ids = F.pad( self.position_ids, (0, 0, 0, padded_bs - self.position_ids.shape[0]), value=1, ) else: self.position_ids = F.pad( self.position_ids, (0, padded_bs - self.position_ids.shape[0]), value=1 ) self.input_lengths_tensor = F.pad( self.input_lengths_tensor, (0, padded_bs - self.input_lengths_tensor.shape[0]), value=0, ) self.cache_lengths_tensor = F.pad( self.cache_lengths_tensor, (0, padded_bs - self.cache_lengths_tensor.shape[0]), value=0, ) if len(self.next_token_chooser.do_sample) != padded_bs: next_token_chooser_parameters = [] next_token_chooser_parameters.extend([r.parameters for r in self.requests]) pad_next_token_chooser_parameters(next_token_chooser_parameters, padded_bs) # update past grammar states fsm_grammar_states = [0] * padded_bs for i, req in enumerate(self.requests): fsm_grammar_states[i] = self.next_token_chooser.fsm_grammar_states[i] self.next_token_chooser = HeterogeneousNextTokenChooser.from_pb( next_token_chooser_parameters, self.next_token_chooser.dtype, self.next_token_chooser.device, self.next_token_chooser.tokenizer, fsm_grammar_states, ) def prepare_for_prefill( self, max_padded_input_len, max_padded_bs, max_total_tokens, pad_token_id ): # Prepare values if we need to continue prefilling # Speculation must be ignored while we prefill even with chunking # it simplifies everything assert self.speculative_ids is None # device = self.block_tables_tensor.device # hpu does not support varlen for prefill, use sdpa instead. so need to pad input_tensor, position # padding to left to work with sliding window # use prefill_cache_indices to indicate the valid kv slot, update prefill_next_token_indices to indicate # the right logit position input_ids_padded_length = [] # need extra pad to match warmup seq extra_pad = max_padded_input_len - self.max_input_length extra_pad_bs = max_padded_bs - len(self) device = "hpu" if isinstance(self.input_ids, list) and len(self) > 1: input_ids_padded_length = [] input_ids = [] for input_id in self.input_ids: padded = self.max_input_length - len(input_id) + extra_pad if padded > 0: input_id = [pad_token_id] * padded + input_id input_ids.append(input_id) input_ids_padded_length.append(padded) input_ids = np.concatenate(input_ids, dtype=np.int64) self.input_ids = torch.tensor(input_ids, dtype=torch.int64, device=device) elif isinstance(self.input_ids, list): input_ids = self.input_ids[0] input_ids_padded_length.append(extra_pad) input_ids = [pad_token_id] * extra_pad + input_ids self.input_ids = torch.tensor(input_ids, dtype=torch.int64, device=device) else: input_ids = torch.full( (max_padded_input_len * len(self),), pad_token_id, dtype=torch.int64, device=self.input_ids.device, ) src_pos = 0 for i in range(len(self)): end_pos = (i + 1) * max_padded_input_len start_pos = end_pos - self.input_lengths[i] input_ids[start_pos:end_pos] = self.input_ids[ src_pos : src_pos + self.input_lengths[i] ] input_ids_padded_length.append( max_padded_input_len - self.input_lengths[i] ) src_pos += self.input_lengths[i] self.input_ids = input_ids self.input_ids = F.pad( self.input_ids, (0, extra_pad_bs * max_padded_input_len), value=pad_token_id ) self.input_lengths_tensor = torch.tensor(self.input_lengths, dtype=torch.int32) self.input_lengths_tensor = F.pad( self.input_lengths_tensor, (0, extra_pad_bs), value=0 ) cu_seqlen_prefill = self.input_lengths_tensor.new_zeros(max_padded_bs + 1) torch.cumsum(self.input_lengths_tensor, out=cu_seqlen_prefill[1:], dim=0) self.cu_seqlen_prefill = cu_seqlen_prefill.to(torch.int32) self.cache_lengths_tensor = torch.tensor(self.cache_lengths, dtype=torch.int32) self.cache_lengths_tensor = F.pad( self.cache_lengths_tensor, (0, extra_pad_bs), value=0 ) position_ids = [] slot_indices = [] prefill_cache_indices = [] all_prefill_logprobs = True no_prefill_logprobs = True prefill_cu_outlens = [0] # Cumulative length cumulative_length = 0 cumulative_slot_tokens = 0 prefill_out_cumulative_length = 0 adapter_indices_list = [] adapter_set = set() for i, ( r, cache_length, input_length, prompt_length, request_prefilling, blocks, ) in enumerate( zip( self.requests, self.cache_lengths, self.input_lengths, self.prompt_lengths, self.prefilling_mask, self.block_tables, ) ): next_chunk_length = input_length # Position ids request_position_ids = torch.arange( cache_length, cache_length + input_length, dtype=torch.int32 ) request_position_ids = F.pad( request_position_ids, (input_ids_padded_length[i], 0), value=1 ) position_ids.append(request_position_ids) if not r.slots: request_slots = [ s for b in blocks for s in range(b * BLOCK_SIZE, (b + 1) * BLOCK_SIZE) ] else: request_slots = r.slots request_slot_indices = torch.arange( cache_length + cumulative_slot_tokens, cache_length + cumulative_slot_tokens + input_length, dtype=torch.int64, ) slot_indices.append(request_slot_indices) # Update cumulative_slot_tokens += len(request_slots) # Create tensor to slice into the kv tensor in prefill # hpu need request_prefill_cache_indices to skip padding in kv cache sliding_window = input_length cumulative_length += input_ids_padded_length[i] if sliding_window is not None: request_prefill_cache_indices = torch.arange( cumulative_length + max(0, input_length - sliding_window), cumulative_length + input_length, dtype=torch.int64, ) # Prefill logprobs is ignored if the request is done prefilling prefill_logprobs = r.prefill_logprobs and request_prefilling all_prefill_logprobs = all_prefill_logprobs and prefill_logprobs no_prefill_logprobs = no_prefill_logprobs and not prefill_logprobs if prefill_logprobs: prefill_cu_outlens.append(prefill_out_cumulative_length + input_length) prefill_out_cumulative_length += input_length else: prefill_cu_outlens.append(prefill_out_cumulative_length + 1) prefill_out_cumulative_length += 1 prefill_cache_indices.append(request_prefill_cache_indices) ADAPTER_TO_INDEX = get_adapter_to_index() if ADAPTER_TO_INDEX: adapter_index = ADAPTER_TO_INDEX.get(r.adapter_id, 0) adapter_indices_list.append( torch.full((next_chunk_length,), adapter_index) ) adapter_set.add(adapter_index) # Update cumulative_length += next_chunk_length if not all_prefill_logprobs and not no_prefill_logprobs: prefill_head_indices = [] prefill_next_token_indices = [] # Cumulative length cumulative_length = 0 prefill_out_cumulative_length = 0 for i, ( r, input_length, request_prefilling, ) in enumerate( zip( self.requests, self.input_lengths, self.prefilling_mask, ) ): # Prefill logprobs is ignored if the request is done prefilling prefill_logprobs = r.prefill_logprobs and request_prefilling if prefill_logprobs: prefill_head_indices.append( torch.arange( cumulative_length, cumulative_length + input_length, dtype=torch.int32, ) ) prefill_next_token_indices.append( prefill_out_cumulative_length + input_length - 1 ) prefill_out_cumulative_length += input_length else: prefill_head_indices.append( torch.tensor( [cumulative_length + input_length - 1], dtype=torch.int32, ) ) prefill_next_token_indices.append(prefill_out_cumulative_length) prefill_out_cumulative_length += 1 # Update cumulative_length += input_length if len(self) > 1: if position_ids: position_ids = torch.cat(position_ids) if slot_indices: slot_indices = torch.cat(slot_indices) prefill_cache_indices = torch.cat(prefill_cache_indices) else: if position_ids: position_ids = position_ids[0] if slot_indices: slot_indices = slot_indices[0] prefill_cache_indices = prefill_cache_indices[0] self.position_ids = position_ids self.position_ids = F.pad( self.position_ids, (0, extra_pad_bs * max_padded_input_len), value=1 ) self.slot_indices = slot_indices self.prefill_cu_outlens = prefill_cu_outlens self.prefill_cache_indices = torch.zeros_like( self.input_ids, dtype=torch.bool, device="cpu" ) self.prefill_cache_indices[prefill_cache_indices] = True if all_prefill_logprobs: prefill_head_indices = None prefill_next_token_indices = self.cu_seqlen_prefill[1:] - 1 elif no_prefill_logprobs: prefill_head_indices = self.cu_seqlen_prefill[1:] - 1 prefill_next_token_indices = None else: prefill_head_indices = torch.cat(prefill_head_indices) prefill_next_token_indices = torch.tensor( prefill_next_token_indices, dtype=torch.int64 ) self.prefill_head_indices = prefill_head_indices self.prefill_next_token_indices = prefill_next_token_indices input_ids_padded_length_tensor = torch.cumsum( torch.tensor(input_ids_padded_length, dtype=torch.int32), dim=-1, ).to(torch.int32) input_ids_padded_length_tensor = F.pad( input_ids_padded_length_tensor, (0, extra_pad_bs), value=0 ) if self.prefill_head_indices is not None: self.prefill_head_indices = ( self.prefill_head_indices + input_ids_padded_length_tensor ) if self.prefill_next_token_indices is not None: self.prefill_next_token_indices = ( self.prefill_next_token_indices + input_ids_padded_length_tensor ) all_input_ids_tensor = torch.full( (max_padded_bs, max(max_total_tokens, self.all_input_ids_tensor.shape[-1])), pad_token_id, dtype=torch.int64, device="hpu", ) for i in range(len(self)): all_input_ids_tensor[i, : self.all_input_ids_tensor.shape[-1]] = ( self.all_input_ids_tensor[i] ) self.all_input_ids_tensor = all_input_ids_tensor if len(self.next_token_chooser.do_sample) != max_padded_bs: next_token_chooser_parameters = [] next_token_chooser_parameters.extend([r.parameters for r in self.requests]) pad_next_token_chooser_parameters( next_token_chooser_parameters, max_padded_bs ) # update past grammar states fsm_grammar_states = [0] * max_padded_bs for i, req in enumerate(self.requests): fsm_grammar_states[i] = self.next_token_chooser.fsm_grammar_states[i] self.next_token_chooser = HeterogeneousNextTokenChooser.from_pb( next_token_chooser_parameters, self.next_token_chooser.dtype, self.next_token_chooser.device, self.next_token_chooser.tokenizer, fsm_grammar_states, ) if ADAPTER_TO_INDEX: if adapter_set: adapter_indices = torch.cat(adapter_indices_list).to(dtype=torch.int64) adapter_segments, adapter_segment_indices = find_segments( adapter_indices ) else: adapter_indices = torch.zeros_like(self.input_ids) adapter_segments = [0, len(adapter_indices)] adapter_segment_indices = [len(adapter_indices) - 1] adapter_segments = torch.tensor(adapter_segments, dtype=torch.int32) self.adapter_meta = AdapterBatchMetadata( adapter_indices=adapter_indices, adapter_set=adapter_set, adapter_segments=adapter_segments, segment_indices=adapter_segment_indices, ) def __len__(self): return len(self.requests) ADAPTER_LAYERS = [ "q_proj", "k_proj", "v_proj", "o_proj", "gate_proj", "up_proj", "down_proj", ] ROW_PARALLEL = {"o_proj", "down_proj", "lm_head"} class FlashCausalLM(Model): def __init__( self, model_id: str, model_class, revision: Optional[str] = None, quantize: Optional[str] = None, speculator: Optional[str] = None, dtype: Optional[torch.dtype] = None, trust_remote_code: bool = False, lora_adapter_ids: Optional[list] = [], tokenizer_class: PreTrainedTokenizerBase = AutoTokenizer, config_class: PreTrainedTokenizerBase = AutoConfig, default_dtype=torch.float16, aliases=None, # Used for Santacoder override of config num_kv_heads: Optional[int] = None, # Deepseek V2 uses different QK and V dims. head_size: Optional[int] = None, skip_special_tokens: bool = True, kv_cache_dtype: Optional[torch.dtype] = None, support_chunking: bool = True, ): self.quantize = quantize self.process_group, rank, world_size = initialize_torch_distributed() if world_size > 1: self.process_group_cpu = torch.distributed.new_group(backend="gloo") device = torch.device("hpu") dtype = torch.bfloat16 if dtype is None else dtype tokenizer = tokenizer_class.from_pretrained( model_id, revision=revision, padding_side="left", truncation_side="left", trust_remote_code=trust_remote_code, ) try: generation_config = GenerationConfig.from_pretrained( model_id, revision=revision, trust_remote_code=trust_remote_code ) if isinstance(generation_config.eos_token_id, (list, set)): # TODO Huge hack tokenizer._eos_token_ids = set(generation_config.eos_token_id) except Exception: pass config = config_class.from_pretrained( model_id, revision=revision, trust_remote_code=trust_remote_code ) config.quantize = quantize config.speculator = speculator torch.distributed.barrier(group=self.process_group) weights_loader = get_loader(quantize, model_id, revision) filenames = weight_files(model_id, revision=revision, extension=".safetensors") weights = Weights( filenames, device, dtype, process_group=self.process_group, aliases=aliases, weights_loader=weights_loader, ) prefix = None model = model_class(prefix, config, weights) torch.distributed.barrier(group=self.process_group) # VLM models define the config we care about in their text_config text_config = getattr(config, "text_config", None) if text_config is not None: config = text_config if getattr(config, "sliding_window", None) is None: config.sliding_window = None if getattr(config, "use_sliding_window", True) is False: config.sliding_window = None self.num_layers = config.num_hidden_layers self.num_heads = config.num_attention_heads // self.process_group.size() self.config = config # Validation is done in the model itself if num_kv_heads is None: num_kv_heads = getattr(config, "num_key_value_heads", None) # GPT-2 workaround if num_kv_heads is None: num_kv_heads = getattr(config, "n_head", None) if num_kv_heads is None: raise ValueError("Cannot get the number of key/value heads") self.num_kv_heads = ( num_kv_heads // self.process_group.size() if num_kv_heads // self.process_group.size() > 0 else num_kv_heads ) assert self.num_kv_heads > 0 if head_size is None: # Some models use GQA and different sizes for o_proj # and q_proj, that allows for that. if getattr(config, "head_dim", None) is not None: self.head_size = config.head_dim else: self.head_size = config.hidden_size // config.num_attention_heads else: self.head_size = head_size self.cuda_graphs = {} self.kv_cache = [] self.kv_cache_dtype = dtype if kv_cache_dtype is None else kv_cache_dtype self.bucketing_ctx = None self.max_total_tokens = None self.max_input_tokens = None htorch.core.hpu_set_env() if htorch.utils.internal.is_lazy(): htorch.hpu.wrap_in_hpu_graph(model, disable_tensor_cache=True) environment.set_model_config(self.config) self.use_contiguous_pa = ( os.environ.get("VLLM_CONTIGUOUS_PA", "true").lower() == "true" ) self.limit_hpu_graph = ( os.environ.get("LIMIT_HPU_GRAPH", "false").lower() == "true" ) self.skip_warmup = os.getenv("VLLM_SKIP_WARMUP", "false").lower() == "true" self.max_seq_len_to_capture = 8192 if tokenizer.pad_token_id is None: if config.pad_token_id is not None: tokenizer.pad_token_id = config.pad_token_id elif config.eos_token_id is not None: tokenizer.pad_token_id = ( config.eos_token_id[0] if isinstance(config.eos_token_id, list) else config.eos_token_id ) elif tokenizer.eos_token_id is not None: tokenizer.pad_token_id = tokenizer.eos_token_id else: tokenizer.pad_token_id = 0 super().__init__( model_id=model_id, model=model, tokenizer=tokenizer, requires_padding=False, dtype=dtype, device=device, rank=rank, world_size=world_size, sliding_window=config.sliding_window, support_chunking=support_chunking, ) @property def batch_type(self) -> Type[FlashCausalLMBatch]: return FlashCausalLMBatch def max_past(self) -> int: return getattr(self.model, "max_past", None) def init_kv_cache( self, num_blocks: int, num_layers: int, num_heads: int, head_size: int, dtype: torch.dtype, device: torch.device, ): self.kv_cache = [] empty_cache() if self.config.model_type in ["deepseek_v3", "deepseek_v2"]: self.kv_cache = [ KVCompressCache( num_blocks=num_blocks, head_size=self.config.kv_lora_rank + self.config.qk_rope_head_dim, dtype=dtype, device=device, ) for _ in range(num_layers) ] else: self.kv_cache = [ KVCache( num_blocks=num_blocks, num_heads=num_heads, head_size=head_size, dtype=dtype, device=device, ) for _ in range(num_layers) ] def warmup( self, batch: FlashCausalLMBatch, max_input_tokens: Optional[int], max_total_tokens: Optional[int], ): if os.environ.get("MAX_BATCH_SIZE") is None: raise RuntimeError( "MAX_BATCH_SIZE is not set, it should be set in the launcher " "using `--max-batch-size xxx`" ) # The warmup batch is the biggest batch we could ever receive self.kv_cache = [] empty_cache() self.graphed_buckets = set() # Inspired by the original implementation in [vllm](https://github.com/vllm-project/vllm) # Calculate the number of blocks that can be allocated with the free memory dtype_size = torch.tensor([], dtype=self.kv_cache_dtype).element_size() if self.config.model_type in ["deepseek_v3", "deepseek_v2"]: cache_block_size = BLOCK_SIZE * ( self.config.kv_lora_rank + self.config.qk_rope_head_dim ) else: cache_block_size = BLOCK_SIZE * self.num_kv_heads * self.head_size cache_block_size = cache_block_size * 2 total_cache_size = self.num_layers * cache_block_size * dtype_size free_memory = get_free_memory(self.device, TGI_WIGGLE_ROOM) self.mem_reserved = int(free_memory * (1 - MEMORY_FRACTION)) graph_reserved_mem = ( float(os.environ.get("TGI_GRAPH_RESERVED_MEM", "0.1")) if htorch.utils.internal.is_lazy() else 0 ) mem_used_from_graph = int( (free_memory - self.mem_reserved) * graph_reserved_mem ) log_master( logger.info, f"Free memory on device {self.device}: {format_bytes(free_memory)} used_for_graph: {format_bytes(mem_used_from_graph)} ratio {graph_reserved_mem} reserved_for_runtime: {format_bytes(self.mem_reserved)}", ) if max_total_tokens is None: max_total_tokens = sum(batch.input_lengths) if max_input_tokens is None: max_input_tokens = max_total_tokens - 1 self.max_total_tokens = max_total_tokens self.max_input_tokens = max_input_tokens try: self.init_kv_cache( batch.num_blocks, self.num_layers, self.num_kv_heads, self.head_size, self.kv_cache_dtype, self.device, ) batch_num_blocks = batch.num_blocks num_tokens = batch.to_pb().current_tokens synchronize(self.device) _, _batch, _ = self.generate_token([batch]) except Exception: raise RuntimeError( f"Not enough memory to handle {num_tokens} prefill tokens. " f"You need to decrease `--max-batch-prefill-tokens`" ) synchronize(self.device) free_memory = get_free_memory(self.device, TGI_WIGGLE_ROOM) kv_memory = free_memory - self.mem_reserved - mem_used_from_graph num_blocks = ( # Leave 5% for some wiggle room int(kv_memory // total_cache_size) # Add batch.num_blocks as we allocated it above, so it is included in the peak memory. + batch_num_blocks ) log_master(logger.info, f"KV-cache blocks: {num_blocks}, size: {BLOCK_SIZE}") self.kv_cache = [] empty_cache() self.init_kv_cache( num_blocks, self.num_layers, self.num_kv_heads, self.head_size, self.kv_cache_dtype, self.device, ) self.max_batch_prefill_tokens = get_max_prefill_tokens() max_num_seqs = int(os.getenv("MAX_BATCH_SIZE")) HPUBucketingContext = get_bucketing_context() # need to warmup one more step since block is allocated from 1 block_step = os.getenv("VLLM_DECODE_BLOCK_BUCKET_STEP", BLOCK_SIZE) max_total_tokens_aligned = math.ceil( max_total_tokens / BLOCK_SIZE ) * BLOCK_SIZE + math.ceil(block_step * BLOCK_SIZE / max_num_seqs) model_max_length = self.tokenizer.model_max_length max_position_embeddings = getattr( self.config, "max_position_embeddings", model_max_length ) self.bucketing_ctx = HPUBucketingContext( max_num_seqs, max_num_seqs, # self.max_num_prefill_seqs, #TODO BLOCK_SIZE, max_num_seqs * max_total_tokens_aligned, False, min(model_max_length, max_position_embeddings), max_input_tokens, max_total_tokens_aligned, ) max_blocks = max( BLOCK_SIZE, max_num_seqs * max_total_tokens_aligned // BLOCK_SIZE ) self.bucketing_ctx.num_hpu_blocks = min(max_blocks, num_blocks) synchronize(self.device) if self.skip_warmup: self.bucketing_ctx.generate_prompt_buckets() self.bucketing_ctx.generate_decode_buckets( self.bucketing_ctx.num_hpu_blocks ) log_master( logger.info, "skip warmup hpu graph, not recommmended, may cause OOM" ) del _batch, batch return int(num_blocks * BLOCK_SIZE), max_input_tokens, max_total_tokens self.warmup_hpu_graph(batch) del _batch, batch return int(num_blocks * BLOCK_SIZE), max_input_tokens, max_total_tokens def log_warmup(self, prefilling, i, max_i, batch_size, seq_len): free_mem = format_bytes(HabanaMemoryProfiler.current_free_device_memory()) phase = "Prompt" if prefilling else "Decode" dim = "seq_len" if prefilling else "num_blocks" graphed_bucket = (batch_size, seq_len, prefilling) bypass = graphed_bucket not in self.graphed_buckets msg = ( f"[Warmup][{phase}][{i+1}/{max_i}] " f"batch_size:{batch_size} " f"{dim}:{seq_len} " f"bypass:{bypass} " f"free_mem:{free_mem}" ", this may take a while..." ) log_master(logger.info, msg) def use_graphs(self, prefill, seq_len, batch_size): if self.limit_hpu_graph and prefill: return False if self.skip_warmup: return True return (batch_size, seq_len, prefill) in self.graphed_buckets def align_workers(self, value, op): if self.world_size <= 1: return value value_t = torch.tensor(value, device="cpu") torch.distributed.all_reduce(value_t, op=op, group=self.process_group_cpu) return value_t.item() def warmup_hpu_graph(self, batch): prompt_graph_mem_ratio = float(os.environ.get("VLLM_GRAPH_PROMPT_RATIO", "0.3")) free_mem = HabanaMemoryProfiler.current_free_device_memory() graph_free_mem = free_mem - self.mem_reserved graph_free_mem = self.align_workers( graph_free_mem, torch.distributed.ReduceOp.MIN ) prompt_available_memory = prompt_graph_mem_ratio * graph_free_mem decode_available_memory = graph_free_mem - prompt_available_memory msg = ( f"Using {format_bytes(graph_free_mem)}" f"/{format_bytes(free_mem)} " "of free device memory for HPUGraphs, " f"{format_bytes(prompt_available_memory)} for prompt and " f"{format_bytes(decode_available_memory)} for decode " f"(VLLM_GRAPH_PROMPT_RATIO={prompt_graph_mem_ratio})" ) log_master(logger.info, msg) start_time = time.time() warmup_shape_count = 0 warmup_times = 3 self.bucketing_ctx.generate_prompt_buckets() def ordering_function_min_tokens(b): return (b[0] * b[1], b[1], b[0]) buckets = list( sorted(self.bucketing_ctx.prompt_buckets, key=ordering_function_min_tokens) ) total_batch_seq = 0.001 total_mem = 0 available_mem = prompt_available_memory msg = ( f"Prefill batch size list:{[bsz[0] for bsz in buckets]}\n" f"Prefill sequence length list:{[seq[1] for seq in buckets]}\n" ) log_master(logger.info, msg) for i, (batch_size, seq_len) in enumerate(buckets): if batch_size * seq_len > self.max_batch_prefill_tokens: continue # Graph memory usage is proportional to seq dimension in a batch batch_seq = batch_size * seq_len mem_estimate = batch_seq / total_batch_seq * total_mem graphed_bucket = (batch_size, seq_len, True) if not ( mem_estimate >= available_mem or batch_seq > self.max_seq_len_to_capture ): if graphed_bucket not in self.graphed_buckets: self.graphed_buckets.add(graphed_bucket) warmup_shape_count += 1 self.log_warmup(True, i, len(buckets), batch_size, seq_len) with HabanaMemoryProfiler() as mem_prof: for index in range(warmup_times): self.warmup_prefill(seq_len, batch_size, batch) synchronize(self.device) used_mem = self.align_workers( mem_prof.consumed_device_memory, torch.distributed.ReduceOp.MAX ) if graphed_bucket in self.graphed_buckets: available_mem -= used_mem total_mem += used_mem total_batch_seq += batch_seq log_master(logger.info, "Prefill warmup successful.\n") def ordering_function_max_bs(b): return (-b[0], b[1]) self.bucketing_ctx.generate_decode_buckets(self.bucketing_ctx.num_hpu_blocks) buckets = list( sorted(self.bucketing_ctx.decode_buckets, key=ordering_function_max_bs) ) free_mem = HabanaMemoryProfiler.current_free_device_memory() total_batch_seq = 0.001 total_mem = 0 available_mem = free_mem - self.mem_reserved log_master( logger.info, f"Decode batch size list:{[bsz[0] for bsz in buckets]}\n" ) for i, (batch_size, block_num) in enumerate(buckets): if batch_size > block_num: continue # Graph memory usage is proportional to seq dimension in a batch batch_seq = batch_size mem_estimate = batch_seq / total_batch_seq * total_mem graphed_bucket = (batch_size, block_num, False) if not mem_estimate >= available_mem: if graphed_bucket not in self.graphed_buckets: self.graphed_buckets.add(graphed_bucket) warmup_shape_count += 1 self.log_warmup(False, i, len(buckets), batch_size, block_num) with HabanaMemoryProfiler() as mem_prof: for index in range(warmup_times): self.warmup_decode(batch_size, block_num, batch) synchronize(self.device) used_mem = self.align_workers( mem_prof.consumed_device_memory, torch.distributed.ReduceOp.MAX ) if graphed_bucket in self.graphed_buckets: available_mem -= used_mem total_mem += used_mem total_batch_seq += batch_seq log_master(logger.info, "Decode warmup successful.\n") log_master( logger.info, f"warmup hpu graph time {int(time.time() - start_time)}s warmup shape count {warmup_shape_count}", ) def warmup_prefill( self, prompt_len: int, batch_size: int, batch: FlashCausalLMBatch ): input_ids = torch.zeros(prompt_len, dtype=batch.input_ids.dtype).repeat( batch_size ) position_ids = torch.arange(prompt_len, dtype=batch.position_ids.dtype).repeat( batch_size ) max_bt = (prompt_len // BLOCK_SIZE + 1) * batch_size block_tables = torch.arange(max_bt, dtype=torch.int32).reshape(batch_size, -1) slot_acc = [] for i in range(batch_size): slots = [] for b in block_tables[i]: slots.extend(range(b * BLOCK_SIZE, (b + 1) * BLOCK_SIZE)) slot_acc.extend(slots[:prompt_len]) slots = torch.tensor(slot_acc, dtype=batch.slots.dtype) input_lengths = torch.ones(batch_size, dtype=torch.int32) * prompt_len cu_seqlen_prefill = torch.zeros(batch_size + 1, dtype=torch.int32) torch.cumsum(input_lengths, -1, out=cu_seqlen_prefill[1:]) seqlen = Seqlen( input_lengths=_async_h2d_tensor_copy(input_lengths), ) lm_head_indices = input_lengths - 1 kwargs = {} if htorch.utils.internal.is_lazy(): kwargs["bypass_hpu_graphs"] = not self.use_graphs( True, prompt_len, batch_size ) if self.sliding_window is not None: attn_mask = seqlen.make_sliding_window_bias( input_lengths.tolist(), self.sliding_window, self.dtype, prompt_len, batch_size, ) seqlen.attn_mask = _async_h2d_tensor_copy(attn_mask) # We pass a `cu_seqlen_prefill` in order not to have to deal with paged attention cache allocation/deallocation. self.model.forward( input_ids=_async_h2d_tensor_copy(input_ids), position_ids=_async_h2d_tensor_copy(position_ids), cu_seqlen_prefill=_async_h2d_tensor_copy(cu_seqlen_prefill), kv_cache=self.kv_cache, slots=_async_h2d_tensor_copy(slots), seqlen=trim_seqlen_metadata(seqlen), lm_head_indices=_async_h2d_tensor_copy(lm_head_indices), adapter_data=None, hpu_attention_meta=None, **kwargs, ) def warmup_decode(self, batch_size: int, block_num: int, batch: FlashCausalLMBatch): input_ids = torch.zeros(batch_size, dtype=batch.input_ids.dtype) position_ids = torch.arange(batch_size, dtype=batch.position_ids.dtype) blocks = [block_num // batch_size for _ in range(batch_size)] blocks[0] += block_num % batch_size block_tables = [] slots = [] start_idx = 0 slot_indices = [] # fetch the last blocked to warmup block num for i in range(batch_size): block_array = list(range(start_idx, start_idx + blocks[i])) slots.append(BLOCK_SIZE * block_array[-1] + BLOCK_SIZE - 1) slot_indices.append((start_idx + blocks[i]) * BLOCK_SIZE - 1) block_tables.append(block_array) start_idx += blocks[i] input_lengths = torch.ones(batch_size, dtype=torch.int32) cu_seqlen_prefill = torch.zeros(batch_size + 1, dtype=torch.int32) torch.cumsum(input_lengths, -1, out=cu_seqlen_prefill[1:]) seqlen = Seqlen( input_lengths=_async_h2d_tensor_copy(input_lengths), ) block_list, block_groups, block_usage, _, block_bucket_size = ( generate_block_metadata( self.dtype, self.use_contiguous_pa, slots, block_tables, self.bucketing_ctx, ) ) meta = HPUPagedAttentionMetadata( block_list=_async_h2d_tensor_copy(block_list), block_groups=_async_h2d_tensor_copy(block_groups), block_usage=_async_h2d_tensor_copy(block_usage), block_mapping=None, attn_bias=None, ) if self.sliding_window is not None: block_tables_in_window = [] for i, bt in enumerate(block_tables): block_num_in_window = ( self.sliding_window + BLOCK_SIZE - 1 ) // BLOCK_SIZE block_tables_in_window.append( bt[max(0, blocks[i] - block_num_in_window) : blocks[i]] ) slots_in_window = [] start_idx = 0 for i, indice in enumerate(slot_indices): mask = ( indice - torch.arange(start_idx, indice + 1) ) < self.sliding_window slots_in_window.append(torch.arange(start_idx, indice + 1)[mask]) start_idx += blocks[i] * BLOCK_SIZE slots_in_window = torch.cat(slots_in_window, dim=0) ( block_list_in_window, block_groups_in_window, block_usage_in_window, slots_in_window_mask, _, ) = generate_block_metadata( self.dtype, self.use_contiguous_pa, slots, block_tables_in_window, self.bucketing_ctx, slots_in_window, block_bucket_size, ) meta.block_list_in_window = _async_h2d_tensor_copy(block_list_in_window) meta.block_groups_in_window = _async_h2d_tensor_copy(block_groups_in_window) meta.block_usage_in_window = _async_h2d_tensor_copy(block_usage_in_window) meta.slots_in_window_mask = _async_h2d_tensor_copy(slots_in_window_mask) hpu_attention_meta = trim_attn_metadata(meta) slots_tensor = torch.tensor(slots, dtype=batch.slots.dtype) kwargs = {} if htorch.utils.internal.is_lazy(): kwargs["bypass_hpu_graphs"] = not self.use_graphs( False, hpu_attention_meta.block_list.shape[0], batch_size ) # We pass a `cu_seqlen_prefill` in order not to have to deal with paged attention cache allocation/deallocation. self.model.forward( input_ids=_async_h2d_tensor_copy(input_ids), position_ids=_async_h2d_tensor_copy(position_ids), cu_seqlen_prefill=None, kv_cache=self.kv_cache, slots=_async_h2d_tensor_copy(slots_tensor), seqlen=trim_seqlen_metadata(seqlen), lm_head_indices=None, adapter_data=None, hpu_attention_meta=hpu_attention_meta, **kwargs, ) def forward( self, batch: FlashCausalLMBatch, adapter_data: AdapterBatchData ) -> Tuple[torch.Tensor, Optional[torch.Tensor]]: # Model Forward if batch.speculative_ids is not None: input_ids = batch.input_ids position_ids = batch.position_ids cu_seqlen_prefill = batch.cu_seqlen_prefill kv_cache = self.kv_cache block_tables = batch.block_tables_tensor slots = batch.slots[batch.slot_indices] input_lengths = batch.input_lengths_tensor max_s = batch.max_current_length lm_head_indices = batch.prefill_head_indices speculative_ids = batch.speculative_ids B, speculative_length = speculative_ids.shape new_length = speculative_length + 1 new_input_ids = torch.cat( [input_ids.unsqueeze(-1), speculative_ids], dim=1 ).reshape(-1) arange = torch.arange(new_length, device=position_ids.device).unsqueeze(0) arange_int = arange.to(dtype=torch.int32) new_position_ids = ( position_ids.unsqueeze(-1).expand(B, new_length) + arange ).view(-1) # Slots can be discontiguous when prefix caching is enabled, so we need to expand the slot_indices, # then update the slots with the additional indices to ensure we're grabbing the ones that have been # allocated slot_indices = ( batch.slot_indices.unsqueeze(-1).expand(B, new_length) + arange_int ).view(-1) slots = batch.slots[slot_indices] input_lengths = ( input_lengths.unsqueeze(-1).expand(B, new_length) + arange_int ).view(-1) # Add Copy the block tables for all members block_tables = ( block_tables.unsqueeze(1) .expand(B, new_length, -1) .reshape(B * new_length, -1) .contiguous() ) max_s = max_s + speculative_length input_ids = new_input_ids position_ids = new_position_ids else: input_ids = batch.input_ids position_ids = batch.position_ids cu_seqlen_prefill = batch.cu_seqlen_prefill kv_cache = self.kv_cache block_tables = batch.block_tables_tensor slots = batch.slots[batch.slot_indices] input_lengths = batch.input_lengths_tensor max_s = batch.max_current_length lm_head_indices = batch.prefill_head_indices if cu_seqlen_prefill is None and self.max_past() is not None: # In decode, not prefill, we're actually overwriting the KV-cache # in a circular buffer mode. # This makes sure the max_s for the decode pass is correct. max_s = min(self.max_past(), max_s) if batch.prefill_cache_indices is not None: slots_pad = torch.zeros_like(input_ids, device=slots.device) slots_pad[batch.prefill_cache_indices] = slots slots = slots_pad else: slots_pad = torch.zeros_like(input_ids, device=slots.device) slots_pad[: slots.shape[0]] = slots slots = slots_pad seqlen = Seqlen( input_lengths=_async_h2d_tensor_copy(input_lengths), ) kwargs = {} batch_size = input_lengths.shape[0] prompt_len = ( input_ids.shape[0] // batch_size if batch.prefilling else batch.hpu_attn_meta.block_list.shape[0] ) if htorch.utils.internal.is_lazy(): kwargs["bypass_hpu_graphs"] = not self.use_graphs( batch.prefilling, prompt_len, batch_size ) if self.sliding_window is not None and batch.prefilling: attn_mask = seqlen.make_sliding_window_bias( input_lengths.tolist(), self.sliding_window, self.dtype, prompt_len, batch_size, ) seqlen.attn_mask = _async_h2d_tensor_copy(attn_mask) logits, speculative_logits = self.model.forward( input_ids=input_ids, position_ids=_async_h2d_tensor_copy(position_ids), cu_seqlen_prefill=_async_h2d_tensor_copy(cu_seqlen_prefill), kv_cache=kv_cache, slots=_async_h2d_tensor_copy(slots), seqlen=trim_seqlen_metadata(seqlen), lm_head_indices=_async_h2d_tensor_copy(lm_head_indices), # TODO not support adapter now, need the add in the future adapter_data=None, hpu_attention_meta=batch.hpu_attn_meta, **kwargs, ) return logits, speculative_logits @tracer.start_as_current_span("generate_token") def generate_token( self, batches: List[FlashCausalLMBatch] ) -> Tuple[List[Generation], Optional[FlashCausalLMBatch], Tuple[int, int]]: # In order to pipeline any actions on CPU we perform the operation in 3 main stages: # Stage 1. Collect next token ids of any previously started generations start = time.time_ns() prev_batches = [] requests_to_generate = [] for batch_id, batch in enumerate(batches): if batch.next_token_logits is not None: prefill = batch.prefilling if batch.prefilling: batch.prefilling = False batch.prefilling_mask = [False] * len(batch) speculate = get_speculate() ( next_input_ids, next_token_logprobs, logprobs, accepted_ids, speculative_ids, ) = batch.next_token_chooser( batch.all_input_ids_tensor[ : batch.next_token_logits.shape[0], : batch.max_current_length ], batch.next_token_logits, speculate, batch.speculative_ids, batch.speculative_logits, ) batch_top_token_ids, batch_top_token_logprobs = batch_top_tokens( batch.top_n_tokens, _async_h2d_tensor_copy(batch.top_n_tokens_tensor), logprobs, accepted_ids, ) if batch.valid_indices is not None: # TODO speculative decoding handling missing index = torch.arange( 0, len(batch.valid_indices), device=batch.all_input_ids_tensor.device, ) batch.all_input_ids_tensor.index_copy_( 0, index, batch.all_input_ids_tensor[batch.valid_indices] ) padded_total_bs = self.bucketing_ctx.get_padded_decode_batch_size( len(batch.valid_indices) ) next_input_ids.index_copy_( 0, index, next_input_ids[batch.valid_indices] ) next_input_ids = next_input_ids[:padded_total_bs] next_token_logprobs.index_copy_( 0, index, next_token_logprobs[batch.valid_indices] ) accepted_ids.index_copy_( 0, index, accepted_ids[batch.valid_indices] ) if speculative_ids is not None: speculative_ids = speculative_ids[batch.valid_indices] batch.top_n_tokens_tensor = batch.top_n_tokens_tensor[ batch.valid_indices ] top_n_tokens = [] batch_top_token_ids_v = [] batch_top_token_logprobs_v = [] for i in batch.valid_indices: top_n_tokens.append(batch.top_n_tokens[i]) batch_top_token_ids_v.append(batch_top_token_ids[i]) batch_top_token_logprobs_v.append(batch_top_token_logprobs[i]) batch_top_token_ids = batch_top_token_ids_v batch_top_token_logprobs = batch_top_token_logprobs_v batch.top_n_tokens = top_n_tokens batch.next_token_chooser = batch.next_token_chooser.filter( batch.valid_indices ) batch.valid_indices = None # Since we are done prefilling, all the tensors that were concatenating values for all the requests # instantly become of shape [BATCH_SIZE] if prefill: indices = batch.cu_seqlen_prefill[1:] - 1 # pad in left if batch.prefill_cache_indices is not None: batch.position_ids = batch.position_ids[ batch.prefill_cache_indices ][indices] else: batch.position_ids = batch.position_ids[indices] batch.slot_indices = batch.slot_indices[indices[: len(batch)]] if batch.adapter_meta is not None: batch.adapter_meta.adapter_indices = ( batch.adapter_meta.adapter_indices[indices] ) # For each member of the batch # Cumulative length if batch.speculative_logits is not None: cu_accepted_ids = accepted_ids.new_zeros(accepted_ids.shape[0] + 1) torch.cumsum(accepted_ids, dim=0, out=cu_accepted_ids[1:]) for i in range(len(batch)): batch.all_input_ids_tensor[ i, batch.cache_lengths[i] + batch.input_lengths[i] : batch.cache_lengths[i] + batch.input_lengths[i] + accepted_ids[i], ] = next_input_ids[cu_accepted_ids[i] : cu_accepted_ids[i + 1]] batch.input_ids = next_input_ids[cu_accepted_ids[1:] - 1] accepted_ids = accepted_ids.cpu() if batch.position_ids.dim() == 2: # Qwen2_vl case: batch.position_ids += accepted_ids.unsqueeze(-1) else: batch.position_ids += accepted_ids batch.cache_lengths_tensor += ( batch.input_lengths_tensor + accepted_ids - 1 ) batch.input_lengths_tensor = torch.ones_like( batch.input_lengths_tensor ) batch.slot_indices += accepted_ids[: len(batch)] else: index = batch.cache_lengths_tensor + batch.input_lengths_tensor index = F.pad( index, (0, next_input_ids.shape[0] - index.shape[0]), value=0 ) index = index.to(batch.all_input_ids_tensor.device) batch_idx = torch.arange( 0, index.shape[0], dtype=torch.long, device=batch.all_input_ids_tensor.device, ) batch.all_input_ids_tensor.index_put_( (batch_idx, index.long()), next_input_ids ) batch.input_ids = next_input_ids batch.position_ids += 1 batch.cache_lengths_tensor += batch.input_lengths_tensor batch.input_lengths_tensor = torch.ones_like( batch.input_lengths_tensor ) batch.slot_indices += 1 batch.speculative_ids = speculative_ids # Does a HPU <-> CPU sync internally if prefill and batch.adapter_meta is not None: # adjust segment lengths to account for all request lengths being 1 during decoding adapter_segments, _ = find_segments( batch.adapter_meta.adapter_indices ) batch.adapter_meta.adapter_segments = torch.tensor( adapter_segments, dtype=torch.int32, device=batch.adapter_meta.adapter_segments.device, ) prev_batches.append( { "next_token_ids": next_input_ids, "next_token_logprobs": next_token_logprobs, "accepted_ids": accepted_ids, } ) idx = len(prev_batches) - 1 for req_idx, req in enumerate(batch.requests): new_input_length = 1 if batch.speculative_logits is not None: new_cache_length = ( batch.cache_lengths[req_idx] + batch.input_lengths[req_idx] + accepted_ids[req_idx] - 1 ) else: new_cache_length = ( batch.cache_lengths[req_idx] + batch.input_lengths[req_idx] ) batch.cache_lengths[req_idx] = new_cache_length batch.max_input_length = max( batch.max_input_length, new_input_length ) batch.input_lengths[req_idx] = new_input_length current_length = new_cache_length + new_input_length batch.max_current_length = max( batch.max_current_length, current_length ) requests_to_generate.append( { "idx": idx, "request_id": req.id, "prefix_offset": batch.prefix_offsets[req_idx], "read_offset": batch.read_offsets[req_idx], "stopping_criteria": batch.stopping_criterias[req_idx], "all_input_ids": batch.all_input_ids[req_idx], "do_sample": batch.next_token_chooser.do_sample[req_idx], "seed": batch.next_token_chooser.seeds[req_idx], "top_n_tokens": batch.top_n_tokens[req_idx], "top_token_ids": batch_top_token_ids[req_idx], "top_token_logprobs": batch_top_token_logprobs[req_idx], } ) if prefill: # We do not need prefill tensors anymore batch.cu_seqlen_prefill = None batch.prefill_cache_indices = None batch.prefill_cu_outlens = None batch.prefill_head_indices = None batch.prefill_next_token_indices = None batch.next_token_logits = None batch.speculative_ids = None htorch.core.mark_step() # Stage 2. Prepare new batch for speculative scheduling if len(batches) > 1: if self.bucketing_ctx is not None: total_batch_size = 0 for b in batches: total_batch_size += len(b) padded_total_bs = self.bucketing_ctx.get_padded_decode_batch_size( total_batch_size ) batch = self.batch_type.concatenate( batches, padded_total_bs=padded_total_bs ) else: batch = self.batch_type.concatenate(batches) else: batch = batches[0] prefill = batch.prefilling if prefill: if self.bucketing_ctx is not None: batch.prepare_for_prefill( self.bucketing_ctx.get_padded_prompt_seq_len( batch.max_input_length ), self.bucketing_ctx.get_padded_prompt_batch_size(len(batch)), self.max_total_tokens, self.tokenizer.pad_token_id, ) else: batch.prepare_for_prefill( batch.max_input_length, len(batch), self.max_total_tokens, self.tokenizer.pad_token_id, ) else: batch.prepare_for_decode( self.dtype, self.use_contiguous_pa, self.bucketing_ctx, self.tokenizer.pad_token_id, self.sliding_window, ) if hasattr(self, "set_inputs_embeds") and callable(self.set_inputs_embeds): self.set_inputs_embeds(batch) prefill_logprobs = batch.prefill_next_token_indices is not None # Update adapter indices for speculative tokens (if present) adapter_meta = batch.adapter_meta if adapter_meta is not None: if batch.speculative_ids is not None: B, speculative_length = batch.speculative_ids.shape new_length = speculative_length + 1 adapter_indices = ( adapter_meta.adapter_indices.unsqueeze(-1) .expand(B, new_length) .reshape(-1) ) adapter_segments = adapter_meta.adapter_segments * new_length adapter_meta = AdapterBatchMetadata( adapter_indices=adapter_indices, adapter_set=adapter_meta.adapter_set, adapter_segments=adapter_segments, segment_indices=adapter_meta.segment_indices, ) # Assign pointers to adapter weights # TODO(travis): don't update this if indices haven't changed adapter_data = AdapterBatchData.from_meta( adapter_meta, self.layer_to_adapter_weights, prefill, batch.prefill_head_indices, ) else: adapter_data = None out, speculative_logits = self.forward(batch, adapter_data) if prefill: batch.next_token_logits = ( out[batch.prefill_next_token_indices] if prefill_logprobs else out ) if speculative_logits is not None: speculative_logits = ( speculative_logits[batch.prefill_next_token_indices] if prefill_logprobs else speculative_logits ) else: prefill_logprobs = None batch.next_token_logits = out batch.speculative_logits = speculative_logits # HPU->CPU sync htorch.core.mark_step() start_decode = time.time_ns() for prev_batch in prev_batches: prev_batch["next_token_logprobs"] = prev_batch[ "next_token_logprobs" ].tolist() prev_batch["next_token_ids"] = prev_batch["next_token_ids"].tolist() prev_batch["accepted_ids"] = prev_batch["accepted_ids"].tolist() htorch.core.mark_step() # Stage 3. Finish and return previous generations # Results generations: List[Generation] = [] stopped = len(requests_to_generate) > 0 # Reset max_input_length batch.max_input_length = 0 # For each member of the batch indexs = [0] * len(prev_batches) idx_accept_ids = [0] * len(prev_batches) for i, req_data in enumerate(requests_to_generate): idx = req_data["idx"] request_id = req_data["request_id"] prefix_offset = req_data["prefix_offset"] read_offset = req_data["read_offset"] stopping_criteria = req_data["stopping_criteria"] all_input_ids = req_data["all_input_ids"] do_sample = req_data["do_sample"] seed = req_data["seed"] top_n_tokens = req_data["top_n_tokens"] n_accepted_ids = prev_batches[idx]["accepted_ids"][idx_accept_ids[idx]] top_token_ids = req_data["top_token_ids"] top_token_logprobs = req_data["top_token_logprobs"] # Append next token to all tokens next_token_texts = [] left = 0 if n_accepted_ids > 1: log_master(logger.debug, f"speculated ids {n_accepted_ids - 1}") current_stopped = False index = indexs[idx] for j in range(index, index + n_accepted_ids): # Generated token next_token_id = prev_batches[idx]["next_token_ids"][j] all_input_ids.append(next_token_id) next_token_text, prefix_offset, read_offset = self.decode_token( all_input_ids, prefix_offset, read_offset, ) next_token_texts.append(next_token_text) stop, reason = stopping_criteria( next_token_id, next_token_text, ) if stop: left = index + n_accepted_ids - j - 1 current_stopped = True break else: current_stopped = False stopped = stopped and current_stopped _next_token_ids = prev_batches[idx]["next_token_ids"][ index : index + n_accepted_ids - left ] _next_token_logprobs = prev_batches[idx]["next_token_logprobs"][ index : index + n_accepted_ids - left ] # Shard generations # All generations will be appended in the rust sharded client if request_id % self.world_size == self.rank: if stop: # Decode generated tokens output_text, _, _ = self.decode_token( all_input_ids, prefix_offset=len(all_input_ids) - stopping_criteria.current_tokens - 1, read_offset=len(all_input_ids) - stopping_criteria.current_tokens, skip_special_tokens=True, ) generated_text = GeneratedText( output_text, stopping_criteria.current_tokens, reason, seed if do_sample else None, ) else: generated_text = None if top_n_tokens > 0: all_top_tokens = [] for top_token_ids, top_token_logprobs in zip( top_token_ids, top_token_logprobs ): toptoken_texts = self.tokenizer.batch_decode( top_token_ids, clean_up_tokenization_spaces=False, skip_special_tokens=False, ) special_toptokens = [ token_id in self.all_special_ids for token_id in top_token_ids ] top_tokens = Tokens( top_token_ids, top_token_logprobs, toptoken_texts, special_toptokens, ) all_top_tokens.append(top_tokens) top_tokens = all_top_tokens else: top_tokens = None generation = Generation( request_id, None, Tokens( _next_token_ids, _next_token_logprobs, next_token_texts, [nid in self.all_special_ids for nid in _next_token_ids], ), generated_text, top_tokens, ) generations.append(generation) # accept each new token for this specific request since we may # have more than one new token per request with speculative decoding for next_token_id in _next_token_ids: batch.next_token_chooser = ( batch.next_token_chooser.advance_grammar_single( i, next_token_id ) ) # Update values indexs[idx] += n_accepted_ids idx_accept_ids[idx] += 1 batch.prefix_offsets[i] = prefix_offset batch.read_offsets[i] = read_offset batch.all_input_ids[i] = all_input_ids htorch.core.mark_step() if stopped: # No need to return a batch if we know that all requests stopped forward_ns = start_decode - start decode_ns = time.time_ns() - start_decode return generations, None, (forward_ns, decode_ns) forward_ns = start_decode - start decode_ns = time.time_ns() - start_decode return generations, batch, (forward_ns, decode_ns)
text-generation-inference/backends/gaudi/server/text_generation_server/models/flash_causal_lm.py/0
{ "file_path": "text-generation-inference/backends/gaudi/server/text_generation_server/models/flash_causal_lm.py", "repo_id": "text-generation-inference", "token_count": 56151 }
289
import torch from abc import ABC, abstractmethod from contextlib import contextmanager from pathlib import Path from typing import Dict, List, Optional, Union, Type from safetensors import safe_open from dataclasses import dataclass class WeightsLoader(ABC): """ Instances of this type implement higher-level weight loading. At a low-level, every weight is stored in the Safetensors format. The interpretation of weights may be different however, for instance could be packed, quantized weights. Loaders are responsible for interpreting the raw tensors, sharding tensors in a manner compatible with the format, etc. """ @abstractmethod def get_weights(self, weights: "Weights", prefix: str): """ Get weights at the given prefix and apply without tensor paralllism. """ ... @abstractmethod def get_weights_col_packed( self, weights: "Weights", prefix: str, block_sizes: Union[int, List[int]], ): """ Get the packed weights at the given prefix with column-splitting for tensor parallelism. This method should be used when multiple different weights are packed into a tensor, for instance, query/key/value weights or a gate/up projection. The `block_sizes` determines the proportions of the packed tensors. The columns are split in equally sized blocks when `block_sizes` is an `int`, or in blocks proportional given to the sizes. For instance `[2, 1, 1]` will divide an input with dimensionality `1024` in `[512, 256, 256]`. """ ... def get_weights_col(self, weights: "Weights", prefix: str): """ Get weights at the given prefix and apply column-splitting for tensor paralllism. """ return weights.get_multi_weights_col([prefix], 0) @abstractmethod def get_multi_weights_col(self, weights: "Weights", prefixes: List[str], dim: int): """ Get the weights at the given prefixes, column-split them for tensor parallelim, and then concatenate the weights along the given dimension. """ ... @abstractmethod def get_multi_weights(self, weights: "Weights", prefixes: List[str], dim: int): """ Get the weights at the given prefixes, column-split them for tensor parallelim, and then concatenate the weights along the given dimension. """ ... @abstractmethod def get_weights_row(self, weights: "Weights", prefix: str): """ Get the weights at the given prefix and apply row-splitting for tensor parallism. """ ... class Weight(ABC): """Instances of this type implement unquantized/quantized/to-be quantized weights.""" @abstractmethod def get_linear(self, bias: torch.Tensor): """Create a linear layer from this weight.""" ... @dataclass class UnquantizedWeight(Weight): weight: torch.Tensor def get_linear(self, bias: torch.Tensor): from text_generation_server.layers.linear import FastLinear return FastLinear(self.weight, bias) class DefaultWeightsLoader(WeightsLoader): """Weight loader that loads (unquantized) Torch tensors.""" def __init__(self, weight_class: Type[UnquantizedWeight]): """Create a loader. Weights will be wrapped using the given `weights_class`, normally this will be `UnquantizedWeight`, but a quantizer-specific class such as `Fp8Weight` can be used to quantize the weights during loading. """ self.weight_class = weight_class """ Loader that uses tensors as-is with the exception of applying sharding and/or concatenation. """ def get_weights(self, weights: "Weights", prefix: str): return weights.get_tensor(f"{prefix}.weight") def get_weights_col_packed( self, weights: "Weights", prefix: str, block_sizes: Union[int, List[int]], ): return self.weight_class( weights.get_packed_sharded( f"{prefix}.weight", dim=0, block_sizes=block_sizes ), ) def get_multi_weights_col(self, weights: "Weights", prefixes: List[str], dim: int): w = [weights.get_sharded(f"{p}.weight", dim=0) for p in prefixes] return self.weight_class(torch.cat(w, dim=dim)) def get_weights_row(self, weights: "Weights", prefix: str): return self.weight_class( weights.get_sharded(f"{prefix}.weight", dim=1), ) def get_multi_weights(self, weights: "Weights", prefixes: List[str], dim: int): w = [weights.get_tensor(f"{p}.weight") for p in prefixes] return self.weight_class(torch.cat(w, dim=dim)) class Weights: def __init__( self, filenames: List[Path], device, dtype, process_group, weights_loader: WeightsLoader, aliases: Optional[Dict[str, List[str]]] = None, prefix: Optional[str] = None, ): routing = {} for filename in filenames: with safe_open(filename, framework="pytorch") as f: for k in f.keys(): if k in routing: raise RuntimeError( f"Key {k} was found in multiple files: {filename} and {routing[k]}" ) routing[k] = filename if aliases is None: aliases = {} self.aliases = aliases self.routing = routing self.device = device self.dtype = dtype self.process_group = process_group self.prefix = prefix self.weights_loader = weights_loader self._handles = {} def _get_handle(self, filename): if filename not in self._handles: f = safe_open(filename, framework="pytorch") self._handles[filename] = f return self._handles[filename] def get_filename(self, tensor_name: str) -> (str, str): names = [tensor_name] if self.prefix is not None: prefixed = f"{self.prefix}.{tensor_name}" names.append(prefixed) for name in names: filename = self.routing.get(name, None) if filename is not None: return str(filename), name aliases = self.aliases.get(name, []) for alias in aliases: filename = self.routing.get(alias, None) if filename is not None: return str(filename), alias raise RuntimeError(f"weight {tensor_name} does not exist") def _get_slice(self, tensor_name: str): filename, tensor_name = self.get_filename(tensor_name) f = self._get_handle(filename) slice_ = f.get_slice(tensor_name) return slice_ def has_tensor(self, tensor_name: str): try: self.get_filename(tensor_name) except Exception: return False return True def get_shape(self, tensor_name: str): return self._get_slice(tensor_name).get_shape() def get_tensor( self, tensor_name: str, to_device: bool = True, to_dtype: bool = True ) -> torch.Tensor: filename, tensor_name = self.get_filename(tensor_name) f = self._get_handle(filename) tensor = f.get_tensor(tensor_name) # Special case for gptq which shouldn't convert # u4 which are disguised as int32. Exl2 uses int16 # as well. FP8 uses torch.float8_e4m3fn if ( tensor.dtype not in [ torch.float8_e4m3fn, torch.int8, torch.int16, torch.int32, torch.int64, ] and to_dtype ): tensor = tensor.to(dtype=self.dtype) if to_device: tensor = tensor.to(device=self.device) return tensor def get_partial_sharded( self, tensor_name: str, dim: int, to_device=True, to_dtype=True ): filename, tensor_name = self.get_filename(tensor_name) f = self._get_handle(filename) slice_ = f.get_slice(tensor_name) world_size = self.process_group.size() rank = self.process_group.rank() size = slice_.get_shape()[dim] block_size = (size + world_size - 1) // world_size start = rank * block_size stop = (rank + 1) * block_size if dim == 0: tensor = slice_[start:stop] elif dim == 1: tensor = slice_[:, start:stop] else: raise NotImplementedError("Let's make that generic when needed") # Special case for gptq which shouldn't convert # u4 which are disguised as int32. exl2 uses int16. # FP8 uses torch.float8_e4m3fn. if ( tensor.dtype not in (torch.float8_e4m3fn, torch.int8, torch.int16, torch.int32) and to_dtype ): tensor = tensor.to(dtype=self.dtype) if to_device: tensor = tensor.to(device=self.device) return tensor def get_sharded(self, tensor_name: str, dim: int, to_device=True, to_dtype=True): filename, tensor_name = self.get_filename(tensor_name) f = self._get_handle(filename) slice_ = f.get_slice(tensor_name) world_size = self.process_group.size() size = slice_.get_shape()[dim] assert ( size % world_size == 0 ), f"The choosen size {size} is not compatible with sharding on {world_size} shards" return self.get_partial_sharded( tensor_name, dim, to_device=to_device, to_dtype=to_dtype ) def get_packed_sharded( self, tensor_name: str, dim: int, block_sizes: Union[int, List[int]], to_dtype=True, ) -> torch.Tensor: """ Get a shard from a tensor that packs multiple tensors. When a tensor packs multiple tensors (such as QKV or an up projection + gate projection), sharding with `get_sharded` is not safe since it would not split the packed tensors across shards. This method shards a tensor, such that the packed tensors are split across shards. The columns are split in equally sized blocks when blocks is an `int`, or in blocks proportional given to the sizes. For instance `[2, 1, 1]` will divide an input with dimensionality `1024` in `[512, 256, 256]`. This is convenient for e.g. splitting QKV without knowing the storage details of quantized weights. """ slice_ = self._get_slice(tensor_name) total_size = slice_.get_shape()[dim] block_sizes = _blocks_to_block_sizes(total_size=total_size, blocks=block_sizes) world_size = self.process_group.size() rank = self.process_group.rank() tensors_slices = [] block_offset = 0 for block_size in block_sizes: assert ( block_size % world_size == 0 ), f"Prepacked tensor cannot be sharded across {world_size} shards" shard_block_size = block_size // world_size start = rank * shard_block_size stop = (rank + 1) * shard_block_size tensors_slices += range(block_offset + start, block_offset + stop) block_offset += block_size if dim == 0: tensor = slice_[tensors_slices, ...] elif dim == 1 or dim == -2: tensor = slice_[:, tensors_slices, ...] elif dim == 2 or dim == -1: tensor = slice_[..., tensors_slices] else: raise ValueError(f"Unsupported dim {dim}, only dim 0, 1 or 2 are supported") tensor = tensor.to(device=self.device) # Avoid casting quantizer dtypes. if ( tensor.dtype not in [ torch.float8_e4m3fn, torch.int8, torch.int16, torch.int32, torch.int64, ] and to_dtype ): tensor = tensor.to(dtype=self.dtype) return tensor def get_weights(self, prefix: str): return self.weights_loader.get_weights(self, prefix) def get_weights_col_packed_qkv( self, prefix: str, num_heads: int, num_key_value_heads: int, ): return self.get_weights_col_packed( prefix, [num_heads, num_key_value_heads, num_key_value_heads] ) def get_weights_col_packed_gate_up(self, prefix: str): return self.get_weights_col_packed(prefix, 2) def get_weights_col_packed(self, prefix: str, block_sizes: Union[int, List[int]]): """ The columns are split in equally sized blocks when blocks is an `int`, or in blocks proportional given to the sizes. For instance `[2, 1, 1]` will divide an input with dimensionality `1024` in `[512, 256, 256]`. This is convenient for e.g. splitting QKV without knowing the storage details of quantized weights. """ return self.weights_loader.get_weights_col_packed(self, prefix, block_sizes) def get_weights_col(self, prefix: str): return self.weights_loader.get_weights_col(self, prefix) def get_multi_weights_col(self, prefixes: List[str], dim: int): return self.weights_loader.get_multi_weights_col(self, prefixes, dim) def get_tensor_shard(self, var, dim): world_size = self.process_group.size() rank = self.process_group.rank() block_size = var.size()[dim] // world_size start = rank * block_size stop = (rank + 1) * block_size if dim == 0: tensor = var[start:stop] elif dim == 1: tensor = var[:, start:stop] else: raise NotImplementedError("Let's make that generic when needed") tensor = tensor.to(dtype=self.dtype) tensor = tensor.to(device=self.device) return tensor def get_weights_row(self, prefix: str): return self.weights_loader.get_weights_row(self, prefix) def get_multi_weights(self, prefixes: List[str], dim: int): return self.weights_loader.get_multi_weights(self, prefixes, dim) @contextmanager def use_loader(self, weights_loader: WeightsLoader): """ This method is a context manager that can be used to use `Weights` with a different loader for the duration of the context. """ old_loader = self.weights_loader self.weights_loader = weights_loader try: yield finally: self.weights_loader = old_loader @property def loader(self): return self.weights_loader def _blocks_to_block_sizes(total_size: int, blocks: Union[int, List[int]]) -> List[int]: """ Convert block count or proportions to block sizes. This function accepts - The number of blocks (int), in which case the block size is total_size//blocks; or - A list of block sizes (List[int]). In the latter case, if sum(blocks) < total_size, the ratios between the block sizes will be preserved. For instance, if blocks is [2, 1, 1] and total_size is 1024, the returned block sizes are [512, 256, 256]. """ if isinstance(blocks, list): total_blocks = sum(blocks) assert ( total_size % total_blocks == 0 ), f"Cannot split {total_size} in proportional blocks: {blocks}" part_size = total_size // total_blocks return [part_size * block for block in blocks] else: assert total_size % blocks == 0, f"Prepacked is not divisible by {blocks}" single_size = total_size // blocks return [single_size] * blocks
text-generation-inference/backends/gaudi/server/text_generation_server/utils/weights.py/0
{ "file_path": "text-generation-inference/backends/gaudi/server/text_generation_server/utils/weights.py", "repo_id": "text-generation-inference", "token_count": 6935 }
290
# Initialize base variables SHELL := /bin/bash pkg_name := text_generation_server BUILDDIR ?= $(CURDIR)/build VERSION ?= 0.0.1 mkfile_path := $(abspath $(lastword $(MAKEFILE_LIST))) mkfile_dir := $(dir $(mkfile_path)) pkg_dir := $(BUILDDIR)/$(pkg_name) py_version := $(subst -,.,${VERSION}) pkg_dist := ${BUILDDIR}/dist/${pkg_name}-$(py_version).tar.gz clean: rm -rf $(BUILDDIR)/* ${BUILDDIR}: install -d $@ # List static sources to be deployed in the package src_dir := $(mkfile_dir)/$(pkg_name) sources := $(wildcard $(src_dir)/*.py) deployed_sources := $(subst $(src_dir), $(pkg_dir), $(sources)) # Static files are just copied define COPY cp -f $< $@ endef # We use a PHONY target to represent the VERSION .PHONY: VERSION VERSION: ${BUILDDIR} # The trick is to compare the value of the variable with the content of a file in the build directory @if [[ `cat ${BUILDDIR}/VERSION 2>&1` != '$(VERSION)' ]]; then echo -n $(VERSION) >${BUILDDIR}/VERSION; fi # Depending on the PHONY VERSION target makes sure the pyproject.toml is regenerated if the version changes $(BUILDDIR)/pyproject.toml: $(mkfile_dir)/pyproject.toml VERSION mkdir -p $(BUILDDIR) $(COPY) sed -i -e 's/version = "VERSION"/version = \"${VERSION}\"/' $@ $(pkg_dir)/%.py: $(src_dir)/%.py mkdir -p $(pkg_dir) $(COPY) # Generated files are produced by grpcio tools # If not provided, get local proto files ifndef PROTODIR PROTODIR := $(mkfile_dir)/../../../proto endif # Three python files are generated for each protobuf protobufs := $(PROTODIR)/generate.proto pkg_pb_dir := $(pkg_dir)/pb generated_sources_base := $(foreach proto, $(protobufs), $(proto:.proto=_pb2.py)) generated_sources := $(subst $(PROTODIR), $(pkg_pb_dir), $(generated_sources_base)) generated_sources += $(subst $(PROTODIR), $(pkg_pb_dir), $(generated_sources_base:.py=.pyi)) generated_sources += $(subst $(PROTODIR), $(pkg_pb_dir), $(generated_sources_base:.py=_grpc.py)) $(pkg_pb_dir)/%_pb2.py $(pkg_pb_dir)/%_pb2.pyi $(pkg_pb_dir)/%_pb2_grpc.py: $(PROTODIR)/%.proto mkdir -p $(pkg_pb_dir) python -m grpc_tools.protoc -I$(PROTODIR) --python_out=$(pkg_pb_dir) \ --grpc_python_out=$(pkg_pb_dir) --mypy_out=$(pkg_pb_dir) $^ sed -i -e 's/^\(import.*pb2\)/from . \1/g' $(pkg_pb_dir)/$*_pb2_grpc.py ${pkg_dist}: $(BUILDDIR)/pyproject.toml $(deployed_sources) $(generated_sources) python -m build $(BUILDDIR) package: ${pkg_dist} install: ${pkg_dist} python3 -m pip uninstall -y ${pkg_name} python3 -m pip install ${pkg_dist}
text-generation-inference/backends/neuron/server/Makefile/0
{ "file_path": "text-generation-inference/backends/neuron/server/Makefile", "repo_id": "text-generation-inference", "token_count": 1003 }
291
from helpers import create_request from text_generation_server.generator import NeuronGenerator from text_generation_server.pb.generate_pb2 import Batch def test_continuous_batching_two_requests(neuron_model_config): """Verify that two requests added to the batch at different generation steps generate the same outputs (continuous batching). """ neuron_model_path = neuron_model_config["neuron_model_path"] generator = NeuronGenerator.from_pretrained(neuron_model_path) assert generator.model.neuron_config.batch_size > 1 input_text = "Once upon a time" max_new_tokens = 20 # Prefill a single request, remembering the generated token tokens = {0: [], 1: []} request = create_request(id=0, inputs=input_text, max_new_tokens=max_new_tokens) max_length = generator.model.neuron_config.sequence_length batch = Batch(id=0, requests=[request], size=1, max_tokens=max_length) generations, next_batch = generator.prefill(batch) assert next_batch.size == 1 assert len(generations) == 1 g = generations[0] tokens[g.request_id].append(g.tokens.ids[0]) assert len(tokens[0]) == 1 # Decode a few tokens gen_tokens = 4 for _ in range(gen_tokens - 1): generations, next_batch = generator.decode([next_batch]) assert len(generations) == 1 g = generations[0] tokens[g.request_id].append(g.tokens.ids[0]) assert len(tokens[0]) == gen_tokens assert next_batch.size == 1 # Add a second request request = create_request(id=1, inputs=input_text, max_new_tokens=max_new_tokens) batch = Batch(id=1, requests=[request], size=1, max_tokens=max_length) generations, next_batch_1 = generator.prefill(batch) assert next_batch_1.size == 1 # We should have generated only a single token assert len(generations) == 1 g = generations[0] tokens[g.request_id].append(g.tokens.ids[0]) assert len(tokens[0]) == gen_tokens assert len(tokens[1]) == 1 # Decode more tokens until we reach the maximum for the first request batches = [next_batch, next_batch_1] for _ in range(max_new_tokens - gen_tokens): generations, next_batch = generator.decode(batches) for g in generations: tokens[g.request_id].append(g.tokens.ids[0]) batches = [next_batch] # Verify we now only have one pending request assert next_batch.size == 1 assert len(tokens[0]) == max_new_tokens assert len(tokens[1]) == max_new_tokens - gen_tokens + 1 # Verify we have the output for the first request for g in generations: if g.request_id == 0: output = g.generated_text assert output.text != "" assert output.generated_tokens == max_new_tokens generated_text = output.text # Continue decoding until the end of the second request for _ in range(gen_tokens - 1): generations, next_batch = generator.decode([next_batch]) assert len(generations) == 1 g = generations[0] tokens[g.request_id].append(g.tokens.ids[0]) assert next_batch is None output = generations[0].generated_text assert output.generated_tokens == max_new_tokens assert tokens[0] == tokens[1] assert output.text == generated_text
text-generation-inference/backends/neuron/tests/server/test_continuous_batching.py/0
{ "file_path": "text-generation-inference/backends/neuron/tests/server/test_continuous_batching.py", "repo_id": "text-generation-inference", "token_count": 1271 }
292
#include <ranges> #include <nlohmann/json.hpp> #include "backend.hpp" #include "hardware.hpp" namespace huggingface::tgi::backends::trtllm { tle::ParallelConfig backend_workspace_t::parallel_config() const { // Single engine (TP = PP = 1) -> using leader mode (no MPI involved) const auto world_size = config_["/pretrained_config/mapping/world_size"_json_pointer].get<size_t>(); auto mode = tle::CommunicationMode::kLEADER; std::optional<tle::OrchestratorConfig> orchestratorConfig = std::nullopt; if (world_size > 1) { SPDLOG_INFO("Detected sharded engine deployment, using orchestrator mode"); mode = tle::CommunicationMode::kORCHESTRATOR; orchestratorConfig = std::make_optional<tle::OrchestratorConfig>(true, executor_worker_path_, nullptr, true); } else { SPDLOG_INFO("Detected single engine deployment, using leader mode"); } return tle::ParallelConfig(tle::CommunicationType::kMPI, mode, std::nullopt, std::nullopt, orchestratorConfig); } tle::ExecutorConfig backend_workspace_t::executor_config() const { // Retrieve the compute capabilities to enable some options at runtime const auto compute_capabilities = hardware::cuda::compute_capabilities_t(); // Allocate the config tle::ExecutorConfig executor_config(/* maxBeamWidth = */ 1); // Set the parallel config as inferred executor_config.setParallelConfig(parallel_config()); // Define some configuration variables executor_config.setKvCacheConfig(tle::KvCacheConfig(true)); executor_config.setEnableChunkedContext(compute_capabilities.is_at_least_ampere()); executor_config.setSchedulerConfig(tle::SchedulerConfig(tle::CapacitySchedulerPolicy::kMAX_UTILIZATION)); return executor_config; } backend_t::backend_t(std::filesystem::path &engines_folder, std::filesystem::path &executor_worker_path) : workspace(engines_folder, executor_worker_path), executor_(executor_factory_initializer(workspace)) {} size_t backend_t::num_tokens_ready() const noexcept { return executor_.getNumResponsesReady(); } std::expected<request_id_t, backend_error_t> backend_t::submit(std::span<const token_id_t> token_ids, const generation_params_t g_params, const sampling_params_t s_params) noexcept { SPDLOG_DEBUG("Submit {:d} tokens for scheduling ({}, {})", token_ids.size(), g_params, s_params); return executor_.enqueueRequest(tle::Request{ {token_ids.begin(), token_ids.end()}, // Making actual copy of the tokens static_cast<tle::SizeType32>(g_params.max_new_tokens), true, (tle::SamplingConfig) s_params, tle::OutputConfig{ /* returnLogProbs= */ true}, std::nullopt, std::nullopt, std::nullopt, std::nullopt, workspace.generation_config().stop_words }); } std::vector<tle::Response> backend_t::pull_tokens() noexcept { SPDLOG_TRACE(FMT_STRING("Pulling out tokens ({:d} available)"), num_tokens_ready()); return executor_.awaitResponses(); } void backend_t::cancel(request_id_t request_id) noexcept { SPDLOG_TRACE(FMT_STRING("Cancelling request: {:d}"), request_id); executor_.cancelRequest(request_id); } }
text-generation-inference/backends/trtllm/csrc/backend.cpp/0
{ "file_path": "text-generation-inference/backends/trtllm/csrc/backend.cpp", "repo_id": "text-generation-inference", "token_count": 1511 }
293
use crate::block_allocator::{BlockAllocation, BlockAllocator}; use crate::client; use crate::client::{ Batch, GrammarType, NextTokenChooserParameters, Request, StoppingCriteriaParameters, }; use nohash_hasher::{BuildNoHashHasher, IntMap}; use std::cmp::max; use std::collections::VecDeque; use text_generation_router::infer::InferError; use text_generation_router::infer::InferStreamResponse; use text_generation_router::usage_stats::Env; use text_generation_router::validation::{ Chunk, ChunksToString, ValidGenerateRequest, ValidGrammar, ValidParameters, ValidStoppingParameters, }; use tokio::sync::{mpsc, oneshot}; use tokio::time::Instant; use tracing::{info_span, instrument, Instrument, Span}; /// Queue entry #[derive(Debug)] pub(crate) struct Entry { /// Request pub request: ValidGenerateRequest, /// Response sender to communicate between the Infer struct and the batching_task pub response_tx: mpsc::UnboundedSender<Result<InferStreamResponse, InferError>>, /// Span that will live as long as entry pub span: Span, /// Temporary span used as a guard when logging inference, wait times... pub temp_span: Option<Span>, /// Instant when this entry was queued pub queue_time: Instant, /// Instant when this entry was added to a batch pub batch_time: Option<Instant>, /// Block Allocation pub block_allocation: Option<BlockAllocation>, } /// Request Queue #[derive(Debug, Clone)] pub(crate) struct Queue { /// Channel to communicate with the background queue task queue_sender: mpsc::UnboundedSender<QueueCommand>, } impl Queue { pub(crate) fn new( requires_padding: bool, block_size: u32, prefix_caching: bool, window_size: Option<u32>, speculate: u32, max_batch_total_tokens: u32, support_chunking: bool, ) -> Self { // Create channel let (queue_sender, queue_receiver) = mpsc::unbounded_channel(); // Launch background queue task tokio::spawn(queue_task( requires_padding, block_size, prefix_caching, window_size, speculate, max_batch_total_tokens, support_chunking, queue_receiver, )); Self { queue_sender } } /// Append an entry to the queue #[instrument(skip_all)] pub(crate) fn append(&self, entry: Entry) { // Send append command to the background task managing the state // Unwrap is safe here self.queue_sender .send(QueueCommand::Append(Box::new(entry), Span::current())) .unwrap(); } // Get the next batch #[instrument(skip(self))] pub(crate) async fn next_batch( &self, min_size: Option<usize>, max_size: Option<usize>, prefill_token_budget: u32, token_budget: u32, ) -> Option<NextBatch> { if prefill_token_budget == 0 || token_budget == 0 { return None; }; // Create response channel let (response_sender, response_receiver) = oneshot::channel(); // Send next batch command to the background task managing the state // Unwrap is safe here self.queue_sender .send(QueueCommand::NextBatch { min_size, max_size, prefill_token_budget, token_budget, response_sender, span: Span::current(), }) .unwrap(); // Await on response channel // Unwrap is safe here response_receiver.await.unwrap() } } // Background task responsible of the queue state #[allow(clippy::too_many_arguments)] async fn queue_task( requires_padding: bool, block_size: u32, prefix_caching: bool, window_size: Option<u32>, speculate: u32, max_batch_total_tokens: u32, support_chunking: bool, mut receiver: mpsc::UnboundedReceiver<QueueCommand>, ) { let mut state = State::new( requires_padding, block_size, prefix_caching, window_size, speculate, max_batch_total_tokens, support_chunking, ); while let Some(cmd) = receiver.recv().await { match cmd { QueueCommand::Append(entry, span) => { span.in_scope(|| state.append(*entry)); metrics::gauge!("tgi_queue_size").increment(1.0); } QueueCommand::NextBatch { min_size, max_size, prefill_token_budget, token_budget, response_sender, span, } => { let next_batch = state .next_batch(min_size, max_size, prefill_token_budget, token_budget) .instrument(span) .await; response_sender.send(next_batch).unwrap(); metrics::gauge!("tgi_queue_size").set(state.entries.len() as f64); } } } } /// Queue State #[derive(Debug)] struct State { /// Queue entries organized in a Vec entries: VecDeque<(u64, Entry)>, /// Id of the next entry next_id: u64, /// Id of the next batch next_batch_id: u64, /// Paged Attention block size block_size: u32, /// Speculation amount speculate: u32, /// Whether the model allow the prefill chunking /// If it does, the last request in the batch will be split to exactly match the prefill /// token budget support_chunking: bool, /// Paged Attention Block Allocation block_allocator: Option<BlockAllocator>, /// indicate if it's hpu device, the hpu device needs padding to generate first token. is_hpu_device: bool, } impl State { fn new( requires_padding: bool, block_size: u32, prefix_caching: bool, window_size: Option<u32>, speculate: u32, max_batch_total_tokens: u32, support_chunking: bool, ) -> Self { let block_allocator = (!requires_padding).then(|| { BlockAllocator::new( max_batch_total_tokens, block_size, prefix_caching, window_size, ) }); Self { entries: VecDeque::with_capacity(128), next_id: 0, next_batch_id: 0, block_size, speculate, support_chunking, block_allocator, is_hpu_device: Env::new().is_hpu_device(), } } /// Append an entry to the queue fn append(&mut self, mut entry: Entry) { // Create a span that will live as long as the entry is in the queue waiting to be batched let queue_span = info_span!(parent: &entry.span, "queued"); entry.temp_span = Some(queue_span); // Push entry in the queue self.entries.push_back((self.next_id, entry)); self.next_id += 1; } // Get the next batch async fn next_batch( &mut self, min_size: Option<usize>, max_size: Option<usize>, prefill_token_budget: u32, token_budget: u32, ) -> Option<NextBatch> { if self.entries.is_empty() { tracing::debug!("No queue"); return None; } // Check if we have enough entries if let Some(min_size) = min_size { if self.entries.len() < min_size { tracing::debug!("Not enough entries"); return None; } } if let Some(max_size) = max_size { if max_size == 0 { tracing::debug!("No capacity"); return None; } } // Pad prefill_token_budget to be a multiple of block size let prefill_token_budget = prefill_token_budget.div_ceil(self.block_size) * self.block_size; // Create span for this batch to add context to inference calls let next_batch_span = info_span!(parent: None, "batch", batch_size = tracing::field::Empty); next_batch_span.follows_from(Span::current()); let mut batch = Vec::with_capacity(self.entries.len()); let mut max_input_length = 0; let mut prefill_tokens: u32 = 0; let mut decode_tokens: u32 = 0; let mut max_blocks = 0; // Pop entries starting from the front of the queue 'entry_loop: while let Some((id, entry)) = self.entries.pop_front() { // Filter entries where the response receiver was dropped (== entries where the request // was dropped by the client) if entry.response_tx.is_closed() { metrics::counter!("tgi_request_failure", "err" => "dropped").increment(1); tracing::debug!("Dropping entry"); continue; } let block_allocation = match &self.block_allocator { None => { // We pad to max input length in the Python shards // We need to take these padding tokens into the equation max_input_length = max_input_length.max(entry.request.input_length); prefill_tokens = (batch.len() + 1) as u32 * max_input_length; decode_tokens += entry.request.stopping_parameters.max_new_tokens; let total_tokens = prefill_tokens + decode_tokens + self.speculate; if prefill_tokens > prefill_token_budget || total_tokens > token_budget { // Entry is over budget // Add it back to the front tracing::debug!("Over budget: prefill_tokens={prefill_tokens} > {prefill_token_budget} || {prefill_tokens} + {decode_tokens} + {} > {token_budget}", self.speculate); self.entries.push_front((id, entry)); break 'entry_loop; } None } Some(block_allocator) => { // If users wants the prefill logprobs, we cannot reuse the cache. // So no input_ids for the radix tree. let input_ids = if entry.request.decoder_input_details { None } else { entry.request.input_ids.clone() }; let tokens = entry.request.input_length + entry.request.stopping_parameters.max_new_tokens + self.speculate - 1; // tracing::debug!("Allocating {tokens} with {input_ids:?}"); let block_allocation = match block_allocator.allocate(tokens, input_ids).await { None => { // Entry is over budget // Add it back to the front tracing::debug!("Over budget: not enough free blocks"); self.entries.push_front((id, entry)); break 'entry_loop; } Some(mut block_allocation) => { // tracing::debug!("Allocation: {block_allocation:?}"); max_blocks = max(max_blocks, block_allocation.blocks.len() as u32); if block_allocation.prefix_len == entry.request.input_length { // The whole request was found in the radix trie // However, for the transformer forward to work, we need to // have at least one token of postfix. block_allocation.prefix_len -= 1; } block_allocation } }; let postfix_len = entry.request.input_length - block_allocation.prefix_len; if prefill_tokens + postfix_len > prefill_token_budget { // Entry is over budget if self.support_chunking { // We support chunking, just set postfix_len to exactly match prefill_token_budget let chunk_len = prefill_token_budget.saturating_sub(prefill_tokens); if chunk_len > 0 { // Push this entry inside the batch batch.push((id, entry, Some(block_allocation), Some(chunk_len))); } else { // We cannot prefill even one token for this entry // Add it back to the queue self.entries.push_front((id, entry)); } tracing::debug!( "Matched budget: prefill_tokens={} == {prefill_token_budget}", prefill_tokens + postfix_len ); break 'entry_loop; } else { // We don't support chunking, this entry needs to go back to the buffer // Add it back to the front tracing::debug!( "Over budget: prefill_tokens={} > {prefill_token_budget}", prefill_tokens + postfix_len ); self.entries.push_front((id, entry)); break 'entry_loop; } } if self.is_hpu_device { //HPU needs to pad for the prefill max_input_length = max_input_length.max(entry.request.input_length); let actual_prefill_tokens_for_hpu = (batch.len() + 1) as u32 * max_input_length; if actual_prefill_tokens_for_hpu > prefill_token_budget { // Entry is over budget // Add it back to the front tracing::debug!("Over budget: prefill_tokens={actual_prefill_tokens_for_hpu} > {prefill_token_budget}"); self.entries.push_front((id, entry)); break 'entry_loop; } } prefill_tokens += postfix_len; Some(block_allocation) } }; batch.push((id, entry, block_allocation, None)); if Some(batch.len()) == max_size { break; } } // Empty batch if batch.is_empty() { tracing::debug!("Filterered out all entries"); return None; } // XXX We haven't allocated yet, so we're allowed to ditch the results. // Check if our batch is big enough if let Some(min_size) = min_size { // Batch is too small if batch.len() < min_size { // Add back entries to the queue in the correct order for (id, entry, _, _) in batch.into_iter().rev() { self.entries.push_front((id, entry)); } return None; } } let mut batch_requests = Vec::with_capacity(self.entries.len()); let mut batch_entries = IntMap::with_capacity_and_hasher(self.entries.len(), BuildNoHashHasher::default()); for (id, mut entry, block_allocation, chunk_len) in batch { // Create a new span to link the batch back to this entry let entry_batch_span = info_span!(parent: &entry.span, "infer"); // Add relationships next_batch_span.follows_from(&entry_batch_span); entry_batch_span.follows_from(&next_batch_span); // Update entry entry.temp_span = Some(entry_batch_span); let (blocks, slots, prefix_len) = match &block_allocation { None => (Vec::new(), Vec::new(), 0), Some(block_allocation) => ( block_allocation.blocks.clone(), block_allocation.slots.clone(), block_allocation.prefix_len, ), }; entry.block_allocation = block_allocation; batch_requests.push(Request { id, prefill_logprobs: entry.request.decoder_input_details, input_chunks: Some(client::Input { chunks: entry .request .inputs .clone() .into_iter() .map(|c| client::InputChunk { chunk: Some(match c { Chunk::Text(text) => client::Chunk::Text(text), Chunk::Image(image) => client::Chunk::Image(client::Image { data: image.data, mimetype: image.mimetype, }), }), }) .collect(), }), inputs: entry.request.inputs.chunks_to_string(), truncate: entry.request.truncate, add_special_tokens: entry.request.add_special_tokens, parameters: Some(NextTokenChooserParameters::from( entry.request.parameters.clone(), )), stopping_parameters: Some(StoppingCriteriaParameters::from( entry.request.stopping_parameters.clone(), )), top_n_tokens: entry.request.top_n_tokens, blocks, slots, cache_len: prefix_len, adapter_id: entry.request.adapter_id.clone(), chunk_len, }); // Set batch_time entry.batch_time = Some(Instant::now()); // Insert in batch_entries IntMap batch_entries.insert(id, entry); } // Final batch size let size = batch_requests.len() as u32; next_batch_span.record("batch_size", size); let batch = Batch { id: self.next_batch_id, requests: batch_requests, size, max_tokens: (prefill_tokens + decode_tokens), max_blocks, }; // Increment batch id self.next_batch_id += 1; metrics::histogram!("tgi_batch_next_size").record(batch.size as f64); Some((batch_entries, batch, next_batch_span)) } } type NextBatch = (IntMap<u64, Entry>, Batch, Span); #[derive(Debug)] enum QueueCommand { Append(Box<Entry>, Span), NextBatch { min_size: Option<usize>, max_size: Option<usize>, prefill_token_budget: u32, token_budget: u32, response_sender: oneshot::Sender<Option<NextBatch>>, span: Span, }, } impl From<ValidParameters> for NextTokenChooserParameters { fn from(value: ValidParameters) -> Self { let (grammar, grammar_type) = match value.grammar { None => (String::new(), GrammarType::None), Some(grammar) => match grammar { ValidGrammar::Json(grammar_string) => (grammar_string, GrammarType::Json), ValidGrammar::Regex(grammar_string) => (grammar_string, GrammarType::Regex), }, }; Self { temperature: value.temperature, top_k: value.top_k, top_p: value.top_p, typical_p: value.typical_p, do_sample: value.do_sample, seed: value.seed, repetition_penalty: value.repetition_penalty, frequency_penalty: value.frequency_penalty, watermark: value.watermark, grammar, grammar_type: grammar_type.into(), } } } impl From<ValidStoppingParameters> for StoppingCriteriaParameters { fn from(value: ValidStoppingParameters) -> Self { Self { max_new_tokens: value.max_new_tokens, stop_sequences: value.stop_sequences, ignore_eos_token: value.ignore_eos_token, } } } #[cfg(test)] mod tests { use std::sync::Arc; use super::*; use tracing::info_span; fn default_entry() -> ( Entry, mpsc::UnboundedReceiver<Result<InferStreamResponse, InferError>>, ) { let (response_tx, receiver_tx) = mpsc::unbounded_channel(); let entry = Entry { request: ValidGenerateRequest { inputs: vec![], input_ids: Some(Arc::new(vec![])), input_length: 1, add_special_tokens: true, truncate: 0, decoder_input_details: false, parameters: ValidParameters { temperature: 0.0, top_k: 0, top_p: 0.0, typical_p: 0.0, do_sample: false, seed: 0, repetition_penalty: 0.0, frequency_penalty: 0.0, watermark: false, grammar: None, }, stopping_parameters: ValidStoppingParameters { ignore_eos_token: false, max_new_tokens: 1, max_total_new_tokens: 1024, stop_sequences: vec![], }, top_n_tokens: 0, adapter_id: None, }, response_tx, span: info_span!("entry"), temp_span: None, queue_time: Instant::now(), batch_time: None, block_allocation: None, }; (entry, receiver_tx) } #[tokio::test] async fn test_append() { let mut state = State::new(false, 1, false, None, 0, 16, false); let (entry, _guard) = default_entry(); assert_eq!(state.next_id, 0); assert_eq!(state.entries.len(), 0); state.append(entry); assert_eq!(state.next_id, 1); assert_eq!(state.entries.len(), 1); let (id, _) = state.entries.remove(0).unwrap(); assert_eq!(id, 0); } #[tokio::test] async fn test_next_batch_empty() { let mut state = State::new(false, 1, false, None, 0, 16, false); assert!(state.next_batch(None, None, 1, 1).await.is_none()); assert!(state.next_batch(Some(1), None, 1, 1).await.is_none()); } #[tokio::test] async fn test_next_batch_min_size() { let mut state = State::new(false, 1, false, None, 0, 16, false); let (entry1, _guard1) = default_entry(); let (entry2, _guard2) = default_entry(); state.append(entry1); state.append(entry2); let (entries, batch, _) = state.next_batch(None, None, 2, 2).await.unwrap(); assert_eq!(entries.len(), 2); assert!(entries.contains_key(&0)); assert!(entries.contains_key(&1)); assert!(entries.get(&0).unwrap().batch_time.is_some()); assert!(entries.get(&1).unwrap().batch_time.is_some()); assert_eq!(batch.id, 0); assert_eq!(batch.size, 2); assert_eq!(state.next_id, 2); assert_eq!(state.entries.len(), 0); assert_eq!(state.next_batch_id, 1); let (entry3, _guard3) = default_entry(); state.append(entry3); assert!(state.next_batch(Some(2), None, 2, 2).await.is_none()); assert_eq!(state.next_id, 3); assert_eq!(state.entries.len(), 1); let (id, _) = state.entries.remove(0).unwrap(); assert_eq!(id, 2); } #[tokio::test] async fn test_next_batch_max_size() { let mut state = State::new(false, 1, false, None, 0, 16, false); let (entry1, _guard1) = default_entry(); let (entry2, _guard2) = default_entry(); state.append(entry1); state.append(entry2); let (entries, batch, _) = state.next_batch(None, Some(1), 2, 2).await.unwrap(); assert_eq!(entries.len(), 1); assert!(entries.contains_key(&0)); assert!(entries.get(&0).unwrap().batch_time.is_some()); assert_eq!(batch.id, 0); assert_eq!(batch.size, 1); assert_eq!(state.next_id, 2); assert_eq!(state.entries.len(), 1); assert_eq!(state.next_batch_id, 1); } #[tokio::test] async fn test_next_batch_token_budget() { let mut state = State::new(false, 1, false, None, 0, 16, false); let (entry1, _guard1) = default_entry(); let (entry2, _guard2) = default_entry(); state.append(entry1); state.append(entry2); let (entries, batch, _) = state.next_batch(None, None, 1, 1).await.unwrap(); assert_eq!(entries.len(), 1); assert!(entries.contains_key(&0)); assert_eq!(batch.id, 0); assert_eq!(batch.size, 1); assert_eq!(state.next_id, 2); assert_eq!(state.entries.len(), 1); assert_eq!(state.next_batch_id, 1); let (entry3, _guard3) = default_entry(); state.append(entry3); let (entries, batch, _) = state.next_batch(None, None, 3, 3).await.unwrap(); assert_eq!(entries.len(), 2); assert!(entries.contains_key(&1)); assert!(entries.contains_key(&2)); assert_eq!(batch.id, 1); assert_eq!(batch.size, 2); assert_eq!(state.next_id, 3); assert_eq!(state.entries.len(), 0); assert_eq!(state.next_batch_id, 2); } #[tokio::test] async fn test_queue_append() { let queue = Queue::new(false, 1, false, None, 0, 16, false); let (entry, _guard) = default_entry(); queue.append(entry); } #[tokio::test] async fn test_queue_next_batch_empty() { let queue = Queue::new(false, 1, false, None, 0, 16, false); assert!(queue.next_batch(None, None, 1, 1).await.is_none()); assert!(queue.next_batch(Some(1), None, 1, 1).await.is_none()); } #[tokio::test] async fn test_queue_next_batch_min_size() { let queue = Queue::new(false, 1, false, None, 0, 16, false); let (entry1, _guard1) = default_entry(); let (entry2, _guard2) = default_entry(); queue.append(entry1); queue.append(entry2); let (entries, batch, _) = queue.next_batch(None, None, 2, 2).await.unwrap(); assert_eq!(entries.len(), 2); assert!(entries.contains_key(&0)); assert!(entries.contains_key(&1)); assert!(entries.get(&0).unwrap().batch_time.is_some()); assert!(entries.get(&1).unwrap().batch_time.is_some()); assert_eq!(batch.id, 0); assert_eq!(batch.size, 2); let (entry3, _guard3) = default_entry(); queue.append(entry3); // Not enough requests pending assert!(queue.next_batch(Some(2), None, 2, 2).await.is_none()); // Not enough token budget assert!(queue.next_batch(Some(1), None, 0, 0).await.is_none()); // Ok let (entries2, batch2, _) = queue.next_batch(Some(1), None, 2, 2).await.unwrap(); assert_eq!(entries2.len(), 1); assert!(entries2.contains_key(&2)); assert!(entries2.get(&2).unwrap().batch_time.is_some()); assert_eq!(batch2.id, 1); assert_eq!(batch2.size, 1); } #[tokio::test] async fn test_queue_next_batch_max_size() { let queue = Queue::new(false, 1, false, None, 0, 16, false); let (entry1, _guard1) = default_entry(); let (entry2, _guard2) = default_entry(); queue.append(entry1); queue.append(entry2); let (entries, batch, _) = queue.next_batch(None, Some(1), 2, 2).await.unwrap(); assert_eq!(entries.len(), 1); assert!(entries.contains_key(&0)); assert!(entries.get(&0).unwrap().batch_time.is_some()); assert_eq!(batch.id, 0); assert_eq!(batch.size, 1); } #[tokio::test] async fn test_queue_next_batch_token_budget() { let queue = Queue::new(false, 1, false, None, 0, 16, false); let (entry1, _guard1) = default_entry(); let (entry2, _guard2) = default_entry(); queue.append(entry1); queue.append(entry2); let (entries, batch, _) = queue.next_batch(None, None, 1, 1).await.unwrap(); assert_eq!(entries.len(), 1); assert!(entries.contains_key(&0)); assert_eq!(batch.id, 0); assert_eq!(batch.size, 1); let (entry3, _guard3) = default_entry(); queue.append(entry3); let (entries, batch, _) = queue.next_batch(None, None, 3, 3).await.unwrap(); assert_eq!(entries.len(), 2); assert!(entries.contains_key(&1)); assert!(entries.contains_key(&2)); assert_eq!(batch.id, 1); assert_eq!(batch.size, 2); } #[tokio::test] async fn test_queue_next_batch_token_speculate() { let queue = Queue::new(true, 1, false, None, 2, 16, false); let (entry1, _guard1) = default_entry(); let (entry2, _guard2) = default_entry(); queue.append(entry1); queue.append(entry2); // Budget of 1 is not enough assert!(queue.next_batch(None, None, 1, 1).await.is_none()); let (entries, batch, _) = queue.next_batch(None, None, 6, 6).await.unwrap(); assert_eq!(entries.len(), 2); assert!(entries.contains_key(&0)); assert!(entries.contains_key(&1)); assert_eq!(batch.id, 0); assert_eq!(batch.size, 2); } #[tokio::test] async fn test_queue_next_batch_dropped_receiver() { let queue = Queue::new(false, 1, false, None, 0, 16, false); let (entry, _) = default_entry(); queue.append(entry); assert!(queue.next_batch(None, None, 1, 1).await.is_none()); } }
text-generation-inference/backends/v3/src/queue.rs/0
{ "file_path": "text-generation-inference/backends/v3/src/queue.rs", "repo_id": "text-generation-inference", "token_count": 15467 }
294
import pytest from text_generation import __version__ from huggingface_hub.utils import build_hf_headers @pytest.fixture def flan_t5_xxl(): return "google/flan-t5-xxl" @pytest.fixture def llama_7b(): return "meta-llama/Llama-2-7b-chat-hf" @pytest.fixture def fake_model(): return "fake/model" @pytest.fixture def unsupported_model(): return "google-bert/bert-base-uncased" @pytest.fixture def base_url(): return "https://api-inference.huggingface.co/models" @pytest.fixture def bloom_url(base_url, bloom_model): return f"{base_url}/{bloom_model}" @pytest.fixture def flan_t5_xxl_url(base_url, flan_t5_xxl): return f"{base_url}/{flan_t5_xxl}" @pytest.fixture def llama_7b_url(base_url, llama_7b): return f"{base_url}/{llama_7b}" @pytest.fixture def fake_url(base_url, fake_model): return f"{base_url}/{fake_model}" @pytest.fixture def unsupported_url(base_url, unsupported_model): return f"{base_url}/{unsupported_model}" @pytest.fixture(scope="session") def hf_headers(): return build_hf_headers( library_name="text-generation-tests", library_version=__version__ )
text-generation-inference/clients/python/tests/conftest.py/0
{ "file_path": "text-generation-inference/clients/python/tests/conftest.py", "repo_id": "text-generation-inference", "token_count": 486 }
295
# Gaudi Backend for Text Generation Inference ## Overview Text Generation Inference (TGI) has been optimized to run on Gaudi hardware via the Gaudi backend for TGI. ## Supported Hardware - **Gaudi1**: Available on [AWS EC2 DL1 instances](https://aws.amazon.com/ec2/instance-types/dl1/) - **Gaudi2**: Available on [Intel Cloud](https://console.cloud.intel.com/docs/reference/ai_instances.html) - **Gaudi3**: Available on [Intel Cloud](https://console.cloud.intel.com/docs/reference/ai_instances.html) ## Tutorial: Getting Started with TGI on Gaudi ### Basic Usage The easiest way to run TGI on Gaudi is to use the official Docker image: ```bash model=meta-llama/Meta-Llama-3.1-8B-Instruct volume=$PWD/data # share a volume with the Docker container to avoid downloading weights every run hf_token=YOUR_HF_ACCESS_TOKEN docker run --runtime=habana --cap-add=sys_nice --ipc=host \ -p 8080:80 -v $volume:/data -e HF_TOKEN=$hf_token \ ghcr.io/huggingface/text-generation-inference:3.3.4-gaudi \ --model-id $model ``` Once you see the `connected` log, the server is ready to accept requests: > 2024-05-22T19:31:48.302239Z INFO text_generation_router: router/src/main.rs:378: Connected You can find your `YOUR_HF_ACCESS_TOKEN` at [https://huggingface.co/settings/tokens](https://huggingface.co/settings/tokens). This is necessary to access gated models like llama3.1. ### Making Your First Request You can send a request from a separate terminal: ```bash curl 127.0.0.1:8080/generate \ -X POST \ -d '{"inputs":"What is Deep Learning?","parameters":{"max_new_tokens":32}}' \ -H 'Content-Type: application/json' ``` ## How-to Guides You can view the full list of supported models in the [Supported Models](https://huggingface.co/docs/text-generation-inference/backends/gaudi#supported-models) section. For example, to run Llama3.1-8B, you can use the following command: ```bash model=meta-llama/Meta-Llama-3.1-8B-Instruct volume=$PWD/data # share a volume with the Docker container to avoid downloading weights every run hf_token=YOUR_ACCESS_TOKEN docker run --runtime=habana --cap-add=sys_nice --ipc=host \ -p 8080:80 -v $volume:/data -e HF_TOKEN=$hf_token \ ghcr.io/huggingface/text-generation-inference:3.3.4-gaudi \ --model-id $model <text-generation-inference-launcher-arguments> ``` For the full list of service parameters, refer to the [launcher-arguments page](https://huggingface.co/docs/text-generation-inference/reference/launcher). The validated docker commands can be found in the [examples/docker_commands folder](https://github.com/huggingface/text-generation-inference/tree/main/backends/gaudi/examples/docker_commands). > Note: `--runtime=habana --cap-add=sys_nice --ipc=host ` is required to enable docker to use the Gaudi hardware (more details [here](https://docs.habana.ai/en/latest/Installation_Guide/Additional_Installation/Docker_Installation.html)). ### How to Enable Multi-Card Inference (Sharding) TGI-Gaudi supports sharding for multi-card inference, allowing you to distribute the load across multiple Gaudi cards. This is recommended to run large models and to speed up inference. For example, on a machine with 8 Gaudi cards, you can run: ```bash docker run --runtime=habana --ipc=host --cap-add=sys_nice \ -p 8080:80 -v $volume:/data -e HF_TOKEN=$hf_token \ tgi-gaudi \ --model-id $model --sharded true --num-shard 8 ``` <Tip> We recommend always using sharding when running on a multi-card machine. </Tip> ### How to Use Different Precision Formats #### BF16 Precision (Default) By default, all models run with BF16 precision on Gaudi hardware. #### FP8 Precision TGI-Gaudi supports FP8 precision inference, which can significantly reduce memory usage and improve performance for large models. We support model like W8A8 FP compressed-tensors parameters such as [RedHatAI/Mixtral-8x7B-Instruct-v0.1-FP8](https://huggingface.co/RedHatAI/Mixtral-8x7B-Instruct-v0.1-FP8) and AutoFP8 generated model[RedHatAI/Meta-Llama-3-8B-Instruct-FP8](https://huggingface.co/RedHatAI/Meta-Llama-3-8B-Instruct-FP8) . TGI-Gaudi supports FP8 precision inference with [Intel Neural Compressor (INC)](https://docs.habana.ai/en/latest/PyTorch/Inference_on_PyTorch/Inference_Using_FP8.html). ### How to Run Vision-Language Models (VLMs) Gaudi supports VLM inference. Example for Llava-v1.6-Mistral-7B on 1 card: Start the TGI server via the following command: ```bash model=llava-hf/llava-v1.6-mistral-7b-hf volume=$PWD/data # share a volume with the Docker container to avoid downloading weights every run docker run -p 8080:80 \ --runtime=habana \ --cap-add=sys_nice \ --ipc=host \ -v $volume:/data \ ghcr.io/huggingface/text-generation-inference:3.3.4-gaudi \ --model-id $model \ --max-input-tokens 4096 --max-batch-prefill-tokens 16384 \ --max-total-tokens 8192 --max-batch-size 4 ``` You can then send a request to the server via the following command: ```bash curl -N 127.0.0.1:8080/generate \ -X POST \ -d '{"inputs":"![](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/rabbit.png)What is this a picture of?\n\n","parameters":{"max_new_tokens":32}}' \ -H 'Content-Type: application/json' ``` > Note: In Llava-v1.6-Mistral-7B, an image usually accounts for 2000 input tokens. For example, an image of size 512x512 is represented by 2800 tokens. Thus, `max-input-tokens` must be larger than the number of tokens associated with the image. Otherwise the image may be truncated. The value of `max-batch-prefill-tokens` is 16384, which is calculated as follows: `prefill_batch_size` = `max-batch-prefill-tokens` / `max-input-tokens`. ### How to Benchmark Performance We recommend using the [inference-benchmarker tool](https://github.com/huggingface/inference-benchmarker) to benchmark performance on Gaudi hardware. This benchmark tool simulates user requests and measures the performance of the model on realistic scenarios. To run it on the same machine, you can do the following: ```bash MODEL=meta-llama/Llama-3.1-8B-Instruct HF_TOKEN=<your HF READ token> # run a benchmark to evaluate the performance of the model for chat use case # we mount results to the current directory docker run \ --rm \ -it \ --net host \ -v $(pwd):/opt/inference-benchmarker/results \ -e "HF_TOKEN=$HF_TOKEN" \ ghcr.io/huggingface/inference-benchmarker:latest \ inference-benchmarker \ --tokenizer-name "$MODEL" \ --url http://localhost:8080 \ --profile chat ``` Please refer to the [inference-benchmarker README](https://github.com/huggingface/inference-benchmarker) for more details. ## Explanation: Understanding TGI on Gaudi ### The Warmup Process Intel Gaudi accelerators perform best when operating on models with fixed tensor shapes. [Intel Gaudi Graph Compiler](https://docs.habana.ai/en/latest/Gaudi_Overview/Intel_Gaudi_Software_Suite.html#graph-compiler-and-runtime) generates optimized binary code that implements the given model topology on Gaudi. In its default configuration, the produced binary code may be highly dependent on input and output tensor shapes, requiring graph recompilation when encountering tensors with different shapes within the same topology. While these binaries efficiently utilize Gaudi, the compilation process itself can introduce noticeable overhead in end-to-end execution. In dynamic inference serving scenarios, minimizing the number of graph compilations and reducing the risk of graph compilation occurring during server runtime is important. To ensure optimal performance, warmup is performed at the beginning of each server run. This process creates queries with various input shapes based on provided parameters and runs basic TGI operations (prefill, decode). Note: Model warmup can take several minutes, especially for FP8 inference. For faster subsequent runs, refer to [Disk Caching Eviction Policy](https://docs.habana.ai/en/latest/PyTorch/Model_Optimization_PyTorch/Optimization_in_PyTorch_Models.html#disk-caching-eviction-policy). ### Understanding Parameter Tuning #### Sequence Length Parameters - `--max-input-tokens` is the maximum possible input prompt length. Default value is `4095`. - `--max-total-tokens` is the maximum possible total length of the sequence (input and output). Default value is `4096`. #### Batch Size Parameters - For prefill operation, please set `--max-batch-prefill-tokens` as `bs * max-input-tokens`, where `bs` is your expected maximum prefill batch size. - For decode operation, please set `--max-batch-size` as `bs`, where `bs` is your expected maximum decode batch size. - Please note that batch size will be always padded to the nearest shapes that has been warmed up. This is done to avoid out of memory issues and to ensure that the graphs are reused efficiently. ## Reference This section contains reference information about the Gaudi backend. ### Supported Models Text Generation Inference enables serving optimized models on Gaudi hardware. The following sections list which models (VLMs & LLMs) are supported on Gaudi. **Large Language Models (LLMs)** - [deepseek-R1](https://huggingface.co/deepseek-ai/DeepSeek-R1) - [deepseek-v2](https://huggingface.co/deepseek-ai/DeepSeek-V2) - [Llama2](https://huggingface.co/collections/meta-llama/llama-2-family-661da1f90a9d678b6f55773b) - [Llama3](https://huggingface.co/collections/meta-llama/llama-31-669fc079a0c406a149a5738f) - [CodeLlama](https://huggingface.co/codellama/CodeLlama-13b-hf) - [Mixtral](https://huggingface.co/mistralai/Mixtral-8x7B-Instruct-v0.1) - [Mistral](https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.3) - [Qwen 2](https://huggingface.co/collections/Qwen/qwen2-6659360b33528ced941e557f) - [Qwen 3](https://huggingface.co/collections/Qwen/qwen3-67dd247413f0e2e4f653967f) - [Qwen 3 Moe](https://huggingface.co/collections/Qwen/qwen3-67dd247413f0e2e4f653967f) - [Phi-1.5](https://huggingface.co/microsoft/phi-1_5) - [Phi-3](https://huggingface.co/microsoft/Phi-3-mini-4k-instruct) - [PhiMoe](https://huggingface.co/microsoft/Phi-3.5-MoE-instruct) - [Gemma](https://huggingface.co/google/gemma-7b-it) - [Gemma2](https://huggingface.co/collections/google/gemma-2-release-667d6600fd5220e7b967f315) - [Gemma3 Text](https://huggingface.co/collections/google/gemma-3-release-67c6c6f89c4f76621268bb6d) - [Granite](https://huggingface.co/ibm-granite/granite-3.0-8b-instruct) - [Cohere](https://huggingface.co/CohereForAI/c4ai-command-r-plus) - [dbrx](https://huggingface.co/databricks/dbrx-instruct) - [Starcoder2](https://huggingface.co/bigcode/starcoder2-3b) - [Falcon](https://huggingface.co/tiiuae/falcon-7b-instruct) - [GPT-2](https://huggingface.co/openai-community/gpt2) - [gpt-j-6b](https://huggingface.co/EleutherAI/gpt-j-6b) - [gpt-bigcode](https://huggingface.co/bigcode/gpt_bigcode-santacoder) - [Baichuan](https://huggingface.co/baichuan-inc/Baichuan2-7B-Chat) **Vision-Language Models (VLMs)** - [Llava Next (1.6)](https://huggingface.co/llava-hf/llava-v1.6-vicuna-13b-hf) - [Mllama (Multimodal Llama from Meta)](https://huggingface.co/meta-llama/Llama-3.2-11B-Vision-Instruct) - [idefics 2](https://huggingface.co/HuggingFaceM4/idefics2-8b) - [idefics 3](https://huggingface.co/HuggingFaceM4/Idefics3-8B-Llama3) - [PaliGemma](https://huggingface.co/google/paligemma-3b-pt-224) - [Llama4](https://huggingface.co/collections/meta-llama/llama-4-67f0c30d9fe03840bc9d0164) - [Gemma3](https://huggingface.co/collections/google/gemma-3-release-67c6c6f89c4f76621268bb6d) - [Qwen 2.5 VL](https://huggingface.co/collections/Qwen/qwen25-vl-6795ffac22b334a837c0f9a5) - [Qwen 2 VL](https://huggingface.co/collections/Qwen/qwen2-vl-66cee7455501d7126940800d) If you have an issue with a model, please open an issue on the [Gaudi backend repository](https://github.com/huggingface/text-generation-inference/issues). ### Environment Variables The following table contains the environment variables that can be used to configure the Gaudi backend: | Name | Value(s) | Default | Description | Usage | |-----------------------------| :--------- | :--------------- | :------------------------------------------------------------------------------------------------------------------------------- | :--------------------------- | | LIMIT_HPU_GRAPH | True/False | True | Skip HPU graph usage for prefill to save memory, set to `True` for large sequence/decoding lengths(e.g. 300/212) | add -e in docker run command | | SKIP_TOKENIZER_IN_TGI | True/False | False | Skip tokenizer for input/output processing | add -e in docker run command | | VLLM_SKIP_WARMUP | True/False | False | Skip graph warmup during server initialization which is not recommended, but could be used for debug. | add -e in docker run command | ## Contributing Contributions to the TGI-Gaudi project are welcome. Please refer to the [contributing guide](https://github.com/huggingface/text-generation-inference/blob/main/CONTRIBUTING.md). **Guidelines for contributing to Gaudi on TGI:** All changes should be made within the `backends/gaudi` folder. In general, you should avoid modifying the router, launcher, or benchmark to accommodate Gaudi hardware, as all Gaudi-specific logic should be contained within the `backends/gaudi` folder. ### Building the Docker Image from Source To build the Docker image from source: ```bash make -C backends/gaudi image ``` This builds the image and saves it as `tgi-gaudi`. You can then run TGI-Gaudi with this image: ```bash model=meta-llama/Meta-Llama-3.1-8B-Instruct volume=$PWD/data hf_token=YOUR_ACCESS_TOKEN docker run --runtime=habana --ipc=host --cap-add=sys_nice \ -p 8080:80 -v $volume:/data -e HF_TOKEN=$hf_token \ tgi-gaudi \ --model-id $model ``` For more details, see the [README of the Gaudi backend](https://github.com/huggingface/text-generation-inference/blob/main/backends/gaudi/README.md) and the [Makefile of the Gaudi backend](https://github.com/huggingface/text-generation-inference/blob/main/backends/gaudi/Makefile).
text-generation-inference/docs/source/backends/gaudi.mdx/0
{ "file_path": "text-generation-inference/docs/source/backends/gaudi.mdx", "repo_id": "text-generation-inference", "token_count": 5146 }
296
# Using TGI with Google TPUs Check out this [guide](https://huggingface.co/docs/optimum-tpu) on how to serve models with TGI on TPUs.
text-generation-inference/docs/source/installation_tpu.md/0
{ "file_path": "text-generation-inference/docs/source/installation_tpu.md", "repo_id": "text-generation-inference", "token_count": 48 }
297
{ "choices": [ { "finish_reason": "length", "index": 0, "logprobs": null, "message": { "content": "Both an elephant and a mouse are mammals. However, the differences between elephants and mice are:\n\n1", "role": "assistant" } } ], "created": 1732541189, "id": "", "model": "TinyLlama/TinyLlama-1.1B-Chat-v1.0", "object": "chat.completion", "system_fingerprint": "2.4.1-dev0-native", "usage": { "completion_tokens": 30, "prompt_tokens": 49, "total_tokens": 79 } }
text-generation-inference/integration-tests/models/__snapshots__/test_continue_final_message/test_llama_completion_single_prompt.json/0
{ "file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_continue_final_message/test_llama_completion_single_prompt.json", "repo_id": "text-generation-inference", "token_count": 258 }
298
{ "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [], "seed": null, "tokens": [ { "id": 688, "logprob": -0.546875, "special": false, "text": "**" }, { "id": 103889, "logprob": -0.49023438, "special": false, "text": "Hydrogen" }, { "id": 190213, "logprob": -0.48632812, "special": false, "text": "**," }, { "id": 2611, "logprob": -0.58203125, "special": false, "text": " light" }, { "id": 578, "logprob": -0.099121094, "special": false, "text": " and" }, { "id": 2223, "logprob": -1.078125, "special": false, "text": " free" }, { "id": 235269, "logprob": -0.025756836, "special": false, "text": "," }, { "id": 108, "logprob": -0.29101562, "special": false, "text": "\n" }, { "id": 688, "logprob": -0.0035858154, "special": false, "text": "**" }, { "id": 1949, "logprob": -4.1007996e-05, "special": false, "text": "He" } ], "top_tokens": null }, "generated_text": "**Hydrogen**, light and free,\n**He" }
text-generation-inference/integration-tests/models/__snapshots__/test_flash_gemma2/test_flash_gemma2.json/0
{ "file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_flash_gemma2/test_flash_gemma2.json", "repo_id": "text-generation-inference", "token_count": 877 }
299
[ { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [], "seed": null, "tokens": [ { "id": 29896, "logprob": -0.7709961, "special": false, "text": "1" }, { "id": 29906, "logprob": -0.33740234, "special": false, "text": "2" }, { "id": 29941, "logprob": -0.00995636, "special": false, "text": "3" }, { "id": 29946, "logprob": -0.64208984, "special": false, "text": "4" }, { "id": 29945, "logprob": -0.4970703, "special": false, "text": "5" }, { "id": 29953, "logprob": -0.46533203, "special": false, "text": "6" }, { "id": 29992, "logprob": -0.5336914, "special": false, "text": "@" }, { "id": 21980, "logprob": -0.5361328, "special": false, "text": "gmail" }, { "id": 29889, "logprob": -0.00088739395, "special": false, "text": "." }, { "id": 510, "logprob": -0.0022735596, "special": false, "text": "com" } ], "top_tokens": null }, "generated_text": "123456@gmail.com" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [], "seed": null, "tokens": [ { "id": 29896, "logprob": -0.7685547, "special": false, "text": "1" }, { "id": 29906, "logprob": -0.33666992, "special": false, "text": "2" }, { "id": 29941, "logprob": -0.01008606, "special": false, "text": "3" }, { "id": 29946, "logprob": -0.64160156, "special": false, "text": "4" }, { "id": 29945, "logprob": -0.5, "special": false, "text": "5" }, { "id": 29953, "logprob": -0.46557617, "special": false, "text": "6" }, { "id": 29992, "logprob": -0.5341797, "special": false, "text": "@" }, { "id": 21980, "logprob": -0.5361328, "special": false, "text": "gmail" }, { "id": 29889, "logprob": -0.00088739395, "special": false, "text": "." }, { "id": 510, "logprob": -0.0022907257, "special": false, "text": "com" } ], "top_tokens": null }, "generated_text": "123456@gmail.com" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [], "seed": null, "tokens": [ { "id": 29896, "logprob": -0.7709961, "special": false, "text": "1" }, { "id": 29906, "logprob": -0.33740234, "special": false, "text": "2" }, { "id": 29941, "logprob": -0.00995636, "special": false, "text": "3" }, { "id": 29946, "logprob": -0.64208984, "special": false, "text": "4" }, { "id": 29945, "logprob": -0.4970703, "special": false, "text": "5" }, { "id": 29953, "logprob": -0.46533203, "special": false, "text": "6" }, { "id": 29992, "logprob": -0.5336914, "special": false, "text": "@" }, { "id": 21980, "logprob": -0.5361328, "special": false, "text": "gmail" }, { "id": 29889, "logprob": -0.00088739395, "special": false, "text": "." }, { "id": 510, "logprob": -0.0022735596, "special": false, "text": "com" } ], "top_tokens": null }, "generated_text": "123456@gmail.com" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [], "seed": null, "tokens": [ { "id": 29896, "logprob": -0.7709961, "special": false, "text": "1" }, { "id": 29906, "logprob": -0.33740234, "special": false, "text": "2" }, { "id": 29941, "logprob": -0.00995636, "special": false, "text": "3" }, { "id": 29946, "logprob": -0.64208984, "special": false, "text": "4" }, { "id": 29945, "logprob": -0.4970703, "special": false, "text": "5" }, { "id": 29953, "logprob": -0.46533203, "special": false, "text": "6" }, { "id": 29992, "logprob": -0.5336914, "special": false, "text": "@" }, { "id": 21980, "logprob": -0.5361328, "special": false, "text": "gmail" }, { "id": 29889, "logprob": -0.00088739395, "special": false, "text": "." }, { "id": 510, "logprob": -0.0022735596, "special": false, "text": "com" } ], "top_tokens": null }, "generated_text": "123456@gmail.com" } ]
text-generation-inference/integration-tests/models/__snapshots__/test_flash_grammar_llama/test_flash_llama_grammar_load.json/0
{ "file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_flash_grammar_llama/test_flash_llama_grammar_load.json", "repo_id": "text-generation-inference", "token_count": 4039 }
300
{ "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [], "seed": 0, "tokens": [ { "id": 13, "logprob": -2.2539062, "special": false, "text": "." }, { "id": 578, "logprob": -0.15563965, "special": false, "text": " The" }, { "id": 3622, "logprob": -0.8203125, "special": false, "text": " server" }, { "id": 706, "logprob": 0.0, "special": false, "text": " has" }, { "id": 539, "logprob": 0.0, "special": false, "text": " not" }, { "id": 3686, "logprob": 0.0, "special": false, "text": " yet" }, { "id": 3288, "logprob": 0.0, "special": false, "text": " sent" }, { "id": 904, "logprob": 0.0, "special": false, "text": " any" }, { "id": 828, "logprob": 0.0, "special": false, "text": " data" }, { "id": 382, "logprob": -1.5517578, "special": false, "text": ".\n\n" } ], "top_tokens": null }, "generated_text": "Test request. The server has not yet sent any data.\n\n" }
text-generation-inference/integration-tests/models/__snapshots__/test_flash_llama_gptq/test_flash_llama_gptq_all_params.json/0
{ "file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_flash_llama_gptq/test_flash_llama_gptq_all_params.json", "repo_id": "text-generation-inference", "token_count": 859 }
301
{ "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [], "seed": null, "tokens": [ { "id": 20910, "logprob": -0.96484375, "special": false, "text": "Grad" }, { "id": 722, "logprob": -0.003168106, "special": false, "text": "ient" }, { "id": 24871, "logprob": -0.16540527, "special": false, "text": " descent" }, { "id": 349, "logprob": -0.08886719, "special": false, "text": " is" }, { "id": 396, "logprob": -0.75878906, "special": false, "text": " an" }, { "id": 18586, "logprob": -0.5703125, "special": false, "text": " optimization" }, { "id": 9464, "logprob": -0.11242676, "special": false, "text": " algorithm" }, { "id": 1307, "logprob": -0.7939453, "special": false, "text": " used" }, { "id": 298, "logprob": -0.17102051, "special": false, "text": " to" }, { "id": 26518, "logprob": -0.34326172, "special": false, "text": " minimize" } ], "top_tokens": null }, "generated_text": "Gradient descent is an optimization algorithm used to minimize" }
text-generation-inference/integration-tests/models/__snapshots__/test_flash_mixtral/test_flash_mixtral.json/0
{ "file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_flash_mixtral/test_flash_mixtral.json", "repo_id": "text-generation-inference", "token_count": 868 }
302
[ { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [], "seed": null, "tokens": [ { "id": 13, "logprob": -0.007621765, "special": false, "text": "\n" }, { "id": 13, "logprob": -0.20812988, "special": false, "text": "\n" }, { "id": 16114, "logprob": -1.2587891, "special": false, "text": "Once" }, { "id": 3714, "logprob": -0.20825195, "special": false, "text": " upon" }, { "id": 264, "logprob": -0.0017709732, "special": false, "text": " a" }, { "id": 727, "logprob": -0.011932373, "special": false, "text": " time" }, { "id": 28725, "logprob": -0.17297363, "special": false, "text": "," }, { "id": 736, "logprob": -0.9057617, "special": false, "text": " there" }, { "id": 403, "logprob": -0.05758667, "special": false, "text": " was" }, { "id": 264, "logprob": -0.00970459, "special": false, "text": " a" } ], "top_tokens": null }, "generated_text": "\n\nOnce upon a time, there was a" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [], "seed": null, "tokens": [ { "id": 13, "logprob": -0.007621765, "special": false, "text": "\n" }, { "id": 13, "logprob": -0.20275879, "special": false, "text": "\n" }, { "id": 16114, "logprob": -1.2578125, "special": false, "text": "Once" }, { "id": 3714, "logprob": -0.2084961, "special": false, "text": " upon" }, { "id": 264, "logprob": -0.0017738342, "special": false, "text": " a" }, { "id": 727, "logprob": -0.011932373, "special": false, "text": " time" }, { "id": 28725, "logprob": -0.17297363, "special": false, "text": "," }, { "id": 736, "logprob": -0.9057617, "special": false, "text": " there" }, { "id": 403, "logprob": -0.05758667, "special": false, "text": " was" }, { "id": 264, "logprob": -0.00970459, "special": false, "text": " a" } ], "top_tokens": null }, "generated_text": "\n\nOnce upon a time, there was a" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [], "seed": null, "tokens": [ { "id": 13, "logprob": -0.007621765, "special": false, "text": "\n" }, { "id": 13, "logprob": -0.20275879, "special": false, "text": "\n" }, { "id": 16114, "logprob": -1.2578125, "special": false, "text": "Once" }, { "id": 3714, "logprob": -0.2084961, "special": false, "text": " upon" }, { "id": 264, "logprob": -0.0017738342, "special": false, "text": " a" }, { "id": 727, "logprob": -0.011932373, "special": false, "text": " time" }, { "id": 28725, "logprob": -0.17297363, "special": false, "text": "," }, { "id": 736, "logprob": -0.9057617, "special": false, "text": " there" }, { "id": 403, "logprob": -0.05758667, "special": false, "text": " was" }, { "id": 264, "logprob": -0.00970459, "special": false, "text": " a" } ], "top_tokens": null }, "generated_text": "\n\nOnce upon a time, there was a" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [], "seed": null, "tokens": [ { "id": 13, "logprob": -0.007621765, "special": false, "text": "\n" }, { "id": 13, "logprob": -0.20812988, "special": false, "text": "\n" }, { "id": 16114, "logprob": -1.2587891, "special": false, "text": "Once" }, { "id": 3714, "logprob": -0.20825195, "special": false, "text": " upon" }, { "id": 264, "logprob": -0.0017709732, "special": false, "text": " a" }, { "id": 727, "logprob": -0.011932373, "special": false, "text": " time" }, { "id": 28725, "logprob": -0.17297363, "special": false, "text": "," }, { "id": 736, "logprob": -0.9057617, "special": false, "text": " there" }, { "id": 403, "logprob": -0.05758667, "special": false, "text": " was" }, { "id": 264, "logprob": -0.00970459, "special": false, "text": " a" } ], "top_tokens": null }, "generated_text": "\n\nOnce upon a time, there was a" } ]
text-generation-inference/integration-tests/models/__snapshots__/test_llava_next/test_flash_llava_next_load.json/0
{ "file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_llava_next/test_flash_llava_next_load.json", "repo_id": "text-generation-inference", "token_count": 4048 }
303
{ "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [], "seed": null, "tokens": [ { "id": 42, "logprob": -0.86279297, "special": false, "text": "I" }, { "id": 1353, "logprob": -0.94921875, "special": false, "text": "'m" }, { "id": 7016, "logprob": -2.1835938, "special": false, "text": " sorry" }, { "id": 13, "logprob": -0.074035645, "special": false, "text": "," }, { "id": 1394, "logprob": -0.86376953, "special": false, "text": "You" }, { "id": 452, "logprob": -1.2070312, "special": false, "text": " have" }, { "id": 247, "logprob": -1.4365234, "special": false, "text": " a" }, { "id": 4327, "logprob": -1.109375, "special": false, "text": " choice" }, { "id": 273, "logprob": -0.93408203, "special": false, "text": " of" }, { "id": 752, "logprob": -1.8808594, "special": false, "text": " what" } ] }, "generated_text": "I'm sorry,You have a choice of what" }
text-generation-inference/integration-tests/models/__snapshots__/test_neox/test_neox.json/0
{ "file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_neox/test_neox.json", "repo_id": "text-generation-inference", "token_count": 853 }
304
import pytest @pytest.fixture(scope="module") def flash_llama_chat_handle(launcher): with launcher( "TinyLlama/TinyLlama-1.1B-Chat-v1.0", num_shard=2, disable_grammar_support=False ) as handle: yield handle @pytest.fixture(scope="module") async def flash_llama_chat(flash_llama_chat_handle): await flash_llama_chat_handle.health(300) return flash_llama_chat_handle.client @pytest.mark.private async def test_flash_llama_simple(flash_llama_chat, response_snapshot): response = await flash_llama_chat.chat( max_tokens=100, seed=1, messages=[ { "role": "system", "content": "Youre a helpful assistant! Answer the users question best you can.", }, { "role": "user", "content": "What is the weather like in Brooklyn, New York?", }, ], ) print(repr(response.choices[0].message.content)) assert ( response.choices[0].message.content == "As of your last question, the weather in Brooklyn, New York, is typically hot and humid throughout the year. The suburbs around New York City are jealously sheltered, and at least in the Lower Bronx, there are very few outdoor environments to appreciate nature.\n\nIn terms of temperature, the warmest times of the year are from June to August, when average high temperatures typically range from around 73°F or 23°C" ) assert response == response_snapshot
text-generation-inference/integration-tests/models/test_chat_llama.py/0
{ "file_path": "text-generation-inference/integration-tests/models/test_chat_llama.py", "repo_id": "text-generation-inference", "token_count": 594 }
305
import pytest @pytest.fixture(scope="module") def flash_gemma_gptq_handle(launcher): with launcher("TechxGenus/gemma-2b-GPTQ", num_shard=1, quantize="gptq") as handle: yield handle @pytest.fixture(scope="module") async def flash_gemma_gptq(flash_gemma_gptq_handle): await flash_gemma_gptq_handle.health(300) return flash_gemma_gptq_handle.client @pytest.mark.release @pytest.mark.asyncio @pytest.mark.private async def test_flash_gemma_gptq(flash_gemma_gptq, ignore_logprob_response_snapshot): response = await flash_gemma_gptq.generate( "Test request", max_new_tokens=10, decoder_input_details=True ) assert response.details.generated_tokens == 10 assert response == ignore_logprob_response_snapshot @pytest.mark.release @pytest.mark.asyncio @pytest.mark.private async def test_flash_gemma_gptq_all_params( flash_gemma_gptq, ignore_logprob_response_snapshot ): response = await flash_gemma_gptq.generate( "Test request", max_new_tokens=10, repetition_penalty=1.2, return_full_text=True, stop_sequences=["test"], temperature=0.5, top_p=0.9, top_k=10, truncate=5, typical_p=0.9, watermark=True, decoder_input_details=True, seed=0, ) assert response.details.generated_tokens == 10 assert response == ignore_logprob_response_snapshot @pytest.mark.release @pytest.mark.asyncio @pytest.mark.private async def test_flash_gemma_gptq_load( flash_gemma_gptq, generate_load, ignore_logprob_response_snapshot ): responses = await generate_load( flash_gemma_gptq, "Test request", max_new_tokens=10, n=4 ) assert len(responses) == 4 assert all([r.generated_text == responses[0].generated_text for r in responses]) assert responses == ignore_logprob_response_snapshot
text-generation-inference/integration-tests/models/test_flash_gemma_gptq.py/0
{ "file_path": "text-generation-inference/integration-tests/models/test_flash_gemma_gptq.py", "repo_id": "text-generation-inference", "token_count": 804 }
306
import pytest @pytest.fixture(scope="module") def flash_mixtral_gptq_handle(launcher): with launcher( "TheBloke/Mixtral-8x7B-Instruct-v0.1-GPTQ", revision="gptq-4bit-128g-actorder_True", num_shard=2, ) as handle: yield handle @pytest.fixture(scope="module") async def flash_mixtral_gptq(flash_mixtral_gptq_handle): await flash_mixtral_gptq_handle.health(300) return flash_mixtral_gptq_handle.client @pytest.mark.asyncio async def test_flash_mixtral_gptq(flash_mixtral_gptq, response_snapshot): response = await flash_mixtral_gptq.generate( "What is deep learning?", max_new_tokens=10, decoder_input_details=True ) assert response.details.generated_tokens == 10 assert ( response.generated_text == "\n\nDeep learning is a subset of machine learning" ) assert response == response_snapshot @pytest.mark.asyncio async def test_flash_mixtral_gptq_all_params(flash_mixtral_gptq, response_snapshot): response = await flash_mixtral_gptq.generate( "What is deep learning?", max_new_tokens=10, repetition_penalty=1.2, return_full_text=True, stop_sequences=["test"], temperature=0.5, top_p=0.9, top_k=10, truncate=5, typical_p=0.9, watermark=True, decoder_input_details=True, seed=0, ) assert response.details.generated_tokens == 10 assert ( response.generated_text == "What is deep learning?\nDeep Learning is a subset of Machine Learning," ) assert response == response_snapshot @pytest.mark.asyncio async def test_flash_mixtral_gptq_load( flash_mixtral_gptq, generate_load, response_snapshot ): responses = await generate_load( flash_mixtral_gptq, "What is deep learning?", max_new_tokens=10, n=4 ) assert len(responses) == 4 assert ( responses[0].generated_text == "\n\nDeep learning is a subset of machine learning" ) assert all( [r.generated_text == responses[0].generated_text for r in responses] ), f"{[r.generated_text for r in responses]}" assert responses == response_snapshot
text-generation-inference/integration-tests/models/test_flash_mixtral_gptq.py/0
{ "file_path": "text-generation-inference/integration-tests/models/test_flash_mixtral_gptq.py", "repo_id": "text-generation-inference", "token_count": 950 }
307
import pytest import requests from pydantic import BaseModel from typing import List @pytest.fixture(scope="module") def llama_grammar_handle(launcher): with launcher( "TinyLlama/TinyLlama-1.1B-Chat-v1.0", num_shard=1, disable_grammar_support=False, use_flash_attention=False, max_batch_prefill_tokens=3000, ) as handle: yield handle @pytest.fixture(scope="module") async def llama_grammar(llama_grammar_handle): await llama_grammar_handle.health(300) return llama_grammar_handle.client @pytest.mark.release @pytest.mark.asyncio async def test_grammar_response_format_llama_json(llama_grammar, response_snapshot): class Weather(BaseModel): unit: str temperature: List[int] json_payload = { "model": "tgi", "messages": [ { "role": "system", "content": f"Respond to the users questions and answer them in the following format: {Weather.model_json_schema()}", }, { "role": "user", "content": "What's the weather like the next 3 days in San Francisco, CA?", }, ], "seed": 42, "max_tokens": 500, "response_format": {"type": "json_object", "value": Weather.model_json_schema()}, } # send the request response = requests.post( f"{llama_grammar.base_url}/v1/chat/completions", headers=llama_grammar.headers, json=json_payload, ) chat_completion = response.json() called = chat_completion["choices"][0]["message"]["content"] assert response.status_code == 200 assert called == '{ "unit": "fahrenheit", "temperature": [ 72, 79, 88 ] }' assert chat_completion == response_snapshot json_payload["response_format"]["type"] = "json" response = requests.post( f"{llama_grammar.base_url}/v1/chat/completions", headers=llama_grammar.headers, json=json_payload, ) chat_completion = response.json() called = chat_completion["choices"][0]["message"]["content"] assert response.status_code == 200 assert called == '{ "unit": "fahrenheit", "temperature": [ 72, 79, 88 ] }' assert chat_completion == response_snapshot json_payload["response_format"] = { "type": "json_schema", "value": {"name": "weather", "strict": True, "schema": Weather.model_json_schema()}, } response = requests.post( f"{llama_grammar.base_url}/v1/chat/completions", headers=llama_grammar.headers, json=json_payload, ) chat_completion = response.json() called = chat_completion["choices"][0]["message"]["content"] assert response.status_code == 200 assert called == '{ "unit": "fahrenheit", "temperature": [ 72, 79, 88 ] }' assert chat_completion == response_snapshot @pytest.mark.release @pytest.mark.asyncio async def test_grammar_response_format_llama_error_if_tools_not_installed( llama_grammar, ): class Weather(BaseModel): unit: str temperature: List[int] # send the request response = requests.post( f"{llama_grammar.base_url}/v1/chat/completions", headers=llama_grammar.headers, json={ "model": "tgi", "messages": [ { "role": "system", "content": f"Respond to the users questions and answer them in the following format: {Weather.model_json_schema()}", }, { "role": "user", "content": "What's the weather like the next 3 days in San Francisco, CA?", }, ], "seed": 42, "max_tokens": 500, "tools": [], "response_format": {"type": "json_object", "value": Weather.model_json_schema()}, }, ) # 422 means the server was unable to process the request because it contains invalid data. assert response.status_code == 422 assert response.json() == { "error": "Tool error: Grammar and tools are mutually exclusive", "error_type": "tool_error", }
text-generation-inference/integration-tests/models/test_grammar_response_format_llama.py/0
{ "file_path": "text-generation-inference/integration-tests/models/test_grammar_response_format_llama.py", "repo_id": "text-generation-inference", "token_count": 1857 }
308
import pytest from openai import OpenAI from huggingface_hub import InferenceClient from huggingface_hub.inference._generated.types.chat_completion import ( ChatCompletionOutputToolCall, ChatCompletionOutputFunctionDefinition, ) @pytest.fixture(scope="module") def flash_llama_grammar_tools_handle(launcher): with launcher( "meta-llama/Meta-Llama-3.1-8B-Instruct", num_shard=2, disable_grammar_support=False, ) as handle: yield handle @pytest.fixture(scope="module") async def flash_llama_grammar_tools(flash_llama_grammar_tools_handle): await flash_llama_grammar_tools_handle.health(300) return flash_llama_grammar_tools_handle.client # tools to be used in the following tests tools = [ { "type": "function", "function": { "name": "get_current_weather", "description": "Get the current weather", "parameters": { "type": "object", "properties": { "location": { "type": "string", "description": "The city and state, e.g. San Francisco, CA", }, "format": { "type": "string", "enum": ["celsius", "fahrenheit"], "description": "The temperature unit to use. Infer this from the users location.", }, }, "required": ["location", "format"], "additionalProperties": False, }, }, }, { "type": "function", "function": { "name": "get_n_day_weather_forecast", "description": "Get an N-day weather forecast", "parameters": { "type": "object", "properties": { "location": { "type": "string", "description": "The city and state, e.g. San Francisco, CA", }, "format": { "type": "string", "enum": ["celsius", "fahrenheit"], "description": "The temperature unit to use. Infer this from the users location.", }, "num_days": { "type": "integer", "description": "The number of days to forecast", }, }, "required": ["location", "format", "num_days"], "additionalProperties": False, }, }, }, ] @pytest.mark.asyncio @pytest.mark.private async def test_flash_llama_grammar_tools_nostream( flash_llama_grammar_tools, response_snapshot ): client = InferenceClient(base_url=f"{flash_llama_grammar_tools.base_url}/v1") response = client.chat_completion( max_tokens=100, seed=1, tools=tools, temperature=0.0, messages=[ { "role": "system", "content": "Youre a helpful assistant! Answer the users question best you can.", }, { "role": "user", "content": "What is the weather like in Brooklyn, New York?", }, ], ) assert response.choices[0].message.content is None assert response.choices[0].message.tool_calls == [ ChatCompletionOutputToolCall( id="0", type="function", function=ChatCompletionOutputFunctionDefinition( description=None, name="get_current_weather", arguments='{"location":"Brooklyn, NY","format":"fahrenheit"}', ), ) ] assert response == response_snapshot @pytest.mark.asyncio @pytest.mark.private async def test_flash_llama_grammar_tools_openai( flash_llama_grammar_tools, response_snapshot ): client = OpenAI(api_key="xx", base_url=f"{flash_llama_grammar_tools.base_url}/v1") stream = client.chat.completions.create( model="tgi", max_tokens=100, seed=1, tools=tools, stream=True, temperature=0.0, messages=[ { "role": "system", "content": "Youre a helpful assistant! Answer the users question best you can.", }, { "role": "user", "content": "What is the weather like in Brooklyn, New York?", }, ], ) chunks = [] tool = "" name = "" for chunk in stream: if chunk.choices[0].delta.tool_calls[0].function.name: name += chunk.choices[0].delta.tool_calls[0].function.name tool += chunk.choices[0].delta.tool_calls[0].function.arguments chunks.append(chunk) assert name == "get_current_weather" assert tool == '{ "location": "Brooklyn, NY", "format": "fahrenheit"}' assert chunks == response_snapshot @pytest.mark.asyncio @pytest.mark.private async def test_flash_llama_grammar_tools_auto_nostream( flash_llama_grammar_tools, response_snapshot ): client = InferenceClient(base_url=f"{flash_llama_grammar_tools.base_url}/v1") response = client.chat_completion( max_tokens=100, seed=1, tools=tools, temperature=0.0, tool_choice="auto", messages=[ { "role": "system", "content": "Youre a helpful assistant! Answer the users question best you can.", }, { "role": "user", "content": "What is the weather like in Brooklyn, New York?", }, ], ) assert response.choices[0].message.content is None assert response.choices[0].message.tool_calls == [ ChatCompletionOutputToolCall( id="0", type="function", function=ChatCompletionOutputFunctionDefinition( description=None, name="get_current_weather", arguments='{"location":"Brooklyn, NY","format":"fahrenheit"}', ), ) ] assert response == response_snapshot @pytest.mark.asyncio @pytest.mark.private async def test_flash_llama_grammar_tools_choice_nostream( flash_llama_grammar_tools, response_snapshot ): client = InferenceClient(base_url=f"{flash_llama_grammar_tools.base_url}/v1") response = client.chat_completion( max_tokens=100, seed=1, tools=tools, temperature=0.0, tool_choice="get_current_weather", messages=[ { "role": "system", "content": "Youre a helpful assistant! Answer the users question best you can.", }, { "role": "user", "content": "What is the weather like in Brooklyn, New York?", }, ], ) assert response.choices[0].message.content is None assert response.choices[0].message.tool_calls == [ ChatCompletionOutputToolCall( id="0", type="function", function=ChatCompletionOutputFunctionDefinition( description=None, name="get_current_weather", arguments='{"location":"Brooklyn, NY","format":"fahrenheit"}', ), ) ] assert response == response_snapshot @pytest.mark.asyncio @pytest.mark.private async def test_flash_llama_grammar_tools_choice_stream( flash_llama_grammar_tools, response_snapshot ): client = InferenceClient(base_url=f"{flash_llama_grammar_tools.base_url}/v1") stream = client.chat_completion( max_tokens=100, seed=1, tools=tools, temperature=0.0, tool_choice="get_current_weather", messages=[ { "role": "system", "content": "Youre a helpful assistant! Answer the users question best you can.", }, { "role": "user", "content": "What is the weather like in Brooklyn, New York?", }, ], stream=True, ) arguments = "" chunks = [] name = "" for chunk in stream: if chunk.choices[0].delta.tool_calls[0].function.name: name += chunk.choices[0].delta.tool_calls[0].function.name arguments += chunk.choices[0].delta.tool_calls[0].function.arguments assert chunk.choices[0].delta.content is None chunks.append(chunk) assert name == "get_current_weather" assert arguments == '{ "location": "Brooklyn, NY", "format": "fahrenheit"}' assert chunks == response_snapshot @pytest.mark.asyncio @pytest.mark.private async def test_flash_llama_grammar_tools_insufficient_information_nostream( flash_llama_grammar_tools, response_snapshot ): client = InferenceClient(base_url=f"{flash_llama_grammar_tools.base_url}/v1") response = client.chat_completion( max_tokens=20, seed=24, tools=tools, tool_choice="auto", messages=[ { "role": "system", "content": "You're a helpful assistant! Answer the users question best you can.", }, { "role": "user", "content": "Who are you?", }, ], stream=False, ) content_generated = response.choices[0].message.content assert response.choices[0].message.tool_calls is None assert ( content_generated == "I'm an artificial intelligence model known as a large language model (LLM) or conversational AI" ) assert response == response_snapshot @pytest.mark.asyncio @pytest.mark.private async def test_flash_llama_grammar_tools_insufficient_information_stream( flash_llama_grammar_tools, response_snapshot ): client = InferenceClient(base_url=f"{flash_llama_grammar_tools.base_url}/v1") stream = client.chat_completion( max_tokens=20, seed=24, tools=tools, tool_choice="auto", messages=[ { "role": "system", "content": "You're a helpful assistant! Answer the users question best you can.", }, { "role": "user", "content": "Who are you?", }, ], stream=True, ) content_generated = "" chunks = [] for chunk in stream: content_generated += chunk.choices[0].delta.content chunks.append(chunk) assert chunk.choices[0].delta.tool_calls is None ######## This is exactly the same as the non streaming case assert ( content_generated == "I'm an artificial intelligence model known as a large language model (LLM) or conversational AI" ) assert chunks == response_snapshot @pytest.mark.asyncio @pytest.mark.private async def test_flash_llama_grammar_tools_sea_creatures_stream_auto( flash_llama_grammar_tools, response_snapshot ): client = InferenceClient(base_url=f"{flash_llama_grammar_tools.base_url}/v1") stream = client.chat_completion( max_tokens=20, seed=24, tools=tools, tool_choice="auto", messages=[ { "role": "system", "content": "You're a helpful assistant! Answer the users question best you can. If the question is not answerable by the tools, just generate a response.", }, { "role": "user", "content": "Tell me a story about 3 sea creatures", }, ], stream=True, ) content_generated = "" chunks = [] for chunk in stream: content_generated += chunk.choices[0].delta.content chunks.append(chunk) assert chunk.choices[0].delta.tool_calls is None assert ( content_generated == "Once upon a time, in a vibrant ocean filled with coral reefs and schools of shimmering fish," ) assert chunks == response_snapshot @pytest.mark.asyncio @pytest.mark.private async def test_flash_llama_grammar_tools_sea_creatures_stream_required( flash_llama_grammar_tools, response_snapshot ): client = InferenceClient(base_url=f"{flash_llama_grammar_tools.base_url}/v1") stream = client.chat_completion( max_tokens=100, seed=24, tools=tools, tool_choice="required", messages=[ { "role": "system", "content": "You're a helpful assistant! Answer the users question best you can. If the question is not answerable by the tools, just generate a response.", }, { "role": "user", "content": "Tell me a story about 3 sea creatures", }, ], stream=True, ) tool_calls_generated = "" name = "" chunks = [] for chunk in stream: assert chunk.choices[0].delta.content is None if chunk.choices[0].delta.tool_calls[0].function.name: name += chunk.choices[0].delta.tool_calls[0].function.name tool_calls_generated += chunk.choices[0].delta.tool_calls[0].function.arguments assert name == "get_n_day_weather_forecast" assert ( tool_calls_generated == '{ "location": "San Francisco, CA", "format": "fahrenheit", "num_days":3}' ) assert chunks == response_snapshot @pytest.mark.asyncio @pytest.mark.private async def test_flash_llama_grammar_tools_sea_creatures_stream_none( flash_llama_grammar_tools, response_snapshot ): client = InferenceClient(base_url=f"{flash_llama_grammar_tools.base_url}/v1") stream = client.chat_completion( max_tokens=100, seed=24, tools=tools, tool_choice="none", messages=[ { "role": "system", "content": "You're a helpful assistant! Answer the users question best you can. If the question is not answerable by the tools, just generate a response.", }, { "role": "user", "content": "Tell me a story about 3 sea creatures", }, ], stream=True, ) content_generated = "" chunks = [] for chunk in stream: chunks.append(chunk) content_generated += chunk.choices[0].delta.content assert chunk.choices[0].delta.tool_calls is None assert ( content_generated == "Once upon a time, in a vibrant ocean filled with coral reefs and schools of shimmering fish, lived three dear friends: Luna the sea turtle, Finley the friendly fish, and Crusty the wise crab.\n\nLuna was the oldest of the three. She had traveled the world, exploring hidden caves and shipwrecks, and collecting sparkling shells and shiny pebbles. Her shell was a beautiful mosaic of blues and greens, and her gentle eyes twinkled with the secrets of the deep" ) assert chunks == response_snapshot @pytest.mark.asyncio @pytest.mark.private async def test_flash_llama_grammar_tools_sea_creatures_stream_function_object( flash_llama_grammar_tools, response_snapshot ): client = InferenceClient(base_url=f"{flash_llama_grammar_tools.base_url}/v1") stream = client.chat_completion( messages=[ { "role": "system", "content": "You're a helpful assistant! Answer the users question best you can. If the question is not answerable by the tools, just generate a response.", }, { "role": "user", "content": "Tell me a story about 3 sea creatures", }, ], tools=tools, tool_choice={ "type": "function", "function": {"name": "get_n_day_weather_forecast"}, }, max_tokens=100, seed=24, stream=True, ) chunks = [] tool_calls_generated = "" name = "" for chunk in stream: assert chunk.choices[0].delta.content is None if chunk.choices[0].delta.tool_calls[0].function.name: name += chunk.choices[0].delta.tool_calls[0].function.name tool_calls_generated += chunk.choices[0].delta.tool_calls[0].function.arguments assert name == "get_n_day_weather_forecast" assert ( tool_calls_generated == '{ "location": "San Francisco, CA", "format": "celsius", "num_days": 3}' ) assert chunks == response_snapshot @pytest.mark.asyncio @pytest.mark.private async def test_flash_llama_tool_reply_response( flash_llama_grammar_tools, response_snapshot ): client = InferenceClient(base_url=f"{flash_llama_grammar_tools.base_url}/v1") response = client.chat_completion( max_tokens=100, seed=42, messages=[ {"role": "user", "content": "What's the weather like in Paris today?"}, { "role": "assistant", "tool_calls": [ { "id": "0", "function": { "arguments": '{"longitude": 2.2945, "latitude": 48.8567}', "name": "get_weather", "description": None, }, "type": "function", } ], }, {"role": "tool", "tool_call_id": "0", "content": "6.7"}, ], stream=False, ) assert response.choices[0].message.tool_calls is None assert ( response.choices[0].message.content == "I can't access real-time data, but I can provide you with current conditions and forecast for Paris, France:\n\nThe current conditions in Paris are mostly cloudy with a temperature of 6.7°C (44.1°F). \n\nPlease note that the actual weather may differ from the provided information. For up-to-date information, I suggest checking a reliable weather website or app for the latest conditions and forecast." ) assert response == response_snapshot
text-generation-inference/integration-tests/models/test_tools_llama.py/0
{ "file_path": "text-generation-inference/integration-tests/models/test_tools_llama.py", "repo_id": "text-generation-inference", "token_count": 8482 }
309
import { check } from 'k6'; import { scenario } from 'k6/execution'; import http from 'k6/http'; import { Trend, Counter } from 'k6/metrics'; const host = __ENV.HOST; const model_id = __ENV.MODEL_ID; const timePerToken = new Trend('time_per_token', true); const tokens = new Counter('tokens'); const new_tokens = new Counter('new_tokens'); const input_tokens = new Counter('input_tokens'); const max_new_tokens = 50; // const shareGPT = JSON.parse(open("ShareGPT_V3_unfiltered_cleaned_split.json")) const shareGPT = JSON.parse(open("small.json")) export function get_options() { return { thresholds: { http_req_failed: ['rate==0'], // time_per_token: [{ // threshold: `p(50)<${5 * reference_latency_ms}`, // abortOnFail: true, // delayAbortEval: '10s' // }], }, scenarios: { // single_user: { // executor: 'constant-arrival-rate', // duration: '60s', // preAllocatedVUs: 1, // rate: 20, // timeUnit: '1s', // }, // load_test: { // executor: 'constant-arrival-rate', // duration: '60s', // preAllocatedVUs: 100, // rate: 1, // timeUnit: '1s', // }, // breakpoint: { // executor: 'ramping-arrival-rate', //Assure load increase if the system slows // preAllocatedVUs: 300, // stages: [ // { duration: '60s', target: 100 }, // just slowly ramp-up to a HUGE load // ], // }, throughput: { executor: 'shared-iterations', vus: 100, iterations: 200, maxDuration: '40s', }, }, }; } function generate_payload(gpt, max_new_tokens) { const input = gpt["conversations"][0]["value"]; return { "messages": [{ "role": "user", "content": input }], "temperature": 0, "model": `${model_id}`, "max_tokens": max_new_tokens } } export const options = get_options(); export default function run() { const headers = { 'Content-Type': 'application/json' }; const query = shareGPT[scenario.iterationInTest % shareGPT.length]; const payload = JSON.stringify(generate_payload(query, max_new_tokens)); const res = http.post(`http://${host}/v1/chat/completions`, payload, { headers, }); if (res.status >= 400 && res.status < 500) { return; } check(res, { 'Post status is 200': (res) => res.status === 200, }); const duration = res.timings.duration; if (res.status === 200) { const body = res.json(); const completion_tokens = body.usage.completion_tokens; const latency_ms_per_token = duration / completion_tokens; timePerToken.add(latency_ms_per_token); const prompt_tokens = body.usage.prompt_tokens; input_tokens.add(prompt_tokens); new_tokens.add(completion_tokens); tokens.add(completion_tokens + prompt_tokens); } }
text-generation-inference/load_tests/common.js/0
{ "file_path": "text-generation-inference/load_tests/common.js", "repo_id": "text-generation-inference", "token_count": 1530 }
310
[package] name = "text-generation-router" description = "Text Generation Webserver" build = "build.rs" version.workspace = true edition.workspace = true authors.workspace = true homepage.workspace = true [dependencies] anyhow = "1" async-trait = "0.1.74" async-stream = "0.3.5" axum = { version = "0.7", features = ["json"] } axum-tracing-opentelemetry = "0.16" clap = { version = "4.4.5", features = ["derive", "env"] } futures = "0.3.28" hf-hub = { workspace = true } itertools = "0.10" jsonschema = { version = "0.28.0" } metrics = { workspace = true } metrics-exporter-prometheus = { workspace = true } nohash-hasher = "0.2.0" opentelemetry = { version = "0.20.0", features = ["rt-tokio"] } opentelemetry-otlp = "0.13.0" outlines-core = { git = "https://github.com/dottxt-ai/outlines-core.git", rev = "ba10c619fc9bf3c487e43f49bdecb95a24bb465c" } rand = "0.8.5" reqwest = { version = "0.11.20", features = ["blocking"] } serde = "1.0.188" serde_json = "1.0.107" thiserror = "1.0.48" tokenizers = { workspace = true } tokio = { version = "1.32.0", features = [ "rt", "rt-multi-thread", "parking_lot", "signal", "sync", ] } tokio-stream = "0.1.14" tower-http = { version = "0.5.1", features = ["cors"] } tracing = "0.1.40" tracing-opentelemetry = "0.21.0" tracing-subscriber = { version = "0.3.18", features = ["json", "env-filter"] } utoipa = { version = "4.2.0", features = ["axum_extras"] } utoipa-swagger-ui = { version = "6.0.0", features = ["axum"] } ngrok = { version = "0.13.1", features = ["axum"], optional = true } init-tracing-opentelemetry = { version = "0.14.1", features = [ "opentelemetry-otlp", ] } minijinja = { workspace = true, features = ["loop_controls"] } minijinja-contrib = { workspace = true } futures-util = "0.3.30" regex = "1.10.3" once_cell = "1.19.0" image = "0.25.1" base64 = { workspace = true } sysinfo = "0.30.13" uuid = { version = "1.9.1", default-features = false, features = [ "v4", "fast-rng", "macro-diagnostics", ] } csv = "1.3.0" ureq = "=2.9" pyo3 = { workspace = true } chrono = "0.4.39" [build-dependencies] vergen = { version = "8.2.5", features = ["build", "git", "gitcl"] } [features] default = ["ngrok"] ngrok = ["dep:ngrok"] google = [] kserve = []
text-generation-inference/router/Cargo.toml/0
{ "file_path": "text-generation-inference/router/Cargo.toml", "repo_id": "text-generation-inference", "token_count": 944 }
311
[toolchain] # Released on: 30 January, 2025 # https://releases.rs/docs/1.84.1/ channel = "1.85.1" components = ["rustfmt", "clippy"]
text-generation-inference/rust-toolchain.toml/0
{ "file_path": "text-generation-inference/rust-toolchain.toml", "repo_id": "text-generation-inference", "token_count": 54 }
312
from setuptools import setup from torch.utils.cpp_extension import BuildExtension, CUDAExtension extra_compile_args = ["-std=c++17"] setup( name="custom_kernels", ext_modules=[ CUDAExtension( name="custom_kernels.fused_bloom_attention_cuda", sources=["custom_kernels/fused_bloom_attention_cuda.cu"], extra_compile_args=extra_compile_args, ), CUDAExtension( name="custom_kernels.fused_attention_cuda", sources=["custom_kernels/fused_attention_cuda.cu"], extra_compile_args=extra_compile_args, ), ], cmdclass={"build_ext": BuildExtension}, )
text-generation-inference/server/custom_kernels/setup.py/0
{ "file_path": "text-generation-inference/server/custom_kernels/setup.py", "repo_id": "text-generation-inference", "token_count": 309 }
313
#ifndef _config_h #define _config_h #define MAX_Q_GEMM_ROWS 50 #define MAX_Q_GEMM_WEIGHTS 4 // must be <= MAX_Q_GEMM_ROWS #define QMODE_2BIT 1 #define QMODE_3BIT 1 #define QMODE_4BIT 1 #define QMODE_5BIT 1 #define QMODE_6BIT 0 #define QMODE_8BIT 0 #endif
text-generation-inference/server/exllamav2_kernels/exllamav2_kernels/config.h/0
{ "file_path": "text-generation-inference/server/exllamav2_kernels/exllamav2_kernels/config.h", "repo_id": "text-generation-inference", "token_count": 119 }
314
#ifndef _qdq_util_cuh #define _qdq_util_cuh union half2_uint32 { uint32_t as_uint32; half2 as_half2; __device__ half2_uint32(uint32_t val) : as_uint32(val) {} __device__ half2_uint32(half2 val) : as_half2(val) {} __device__ half2_uint32() : as_uint32(0) {} }; union half_uint16 { uint16_t as_uint16; half as_half; __device__ half_uint16(uint16_t val) : as_uint16(val) {} __device__ half_uint16(half val) : as_half(val) {} __device__ half_uint16() : as_uint16(0) {} }; // Max_scale premultiplied by 1/256 __forceinline__ __device__ half dq_scale(const int qs, const half max_scale) { int qs_i = qs + 1; half qs_h = __int2half_rn(qs_i * qs_i); qs_h = __hmul(qs_h, max_scale); return qs_h; } __forceinline__ __device__ half dq(const int q, const int qzero, const half scale) { return __hmul(__int2half_rn(q - qzero), scale); } __forceinline__ __device__ half dq_ns(const int q, const int qzero) { //return __hsub(__int2half_rn(q), __int2half_rn(qzero)); return __int2half_rn(q - qzero); } __forceinline__ __device__ int exb(const uint32_t q, const int shift, const int mask) { return (int)((q >> shift) & mask); } __forceinline__ __device__ int exb(const uint32_t q1, const uint32_t q0, const int shift, const int mask) { return (int)(__funnelshift_rc(q0, q1, shift) & mask); } #endif
text-generation-inference/server/exllamav2_kernels/exllamav2_kernels/cuda/quant/qdq_util.cuh/0
{ "file_path": "text-generation-inference/server/exllamav2_kernels/exllamav2_kernels/cuda/quant/qdq_util.cuh", "repo_id": "text-generation-inference", "token_count": 602 }
315
import pytest import torch from copy import copy from transformers import AutoTokenizer from text_generation_server.pb import generate_pb2 from text_generation_server.models.seq2seq_lm import Seq2SeqLM, Seq2SeqLMBatch @pytest.fixture(scope="session") def mt0_small_tokenizer(): tokenizer = AutoTokenizer.from_pretrained( "bigscience/mt0-small", padding_side="left" ) tokenizer.bos_token_id = 0 return tokenizer @pytest.fixture(scope="session") def default_seq2seq_lm(): return Seq2SeqLM.fallback("bigscience/mt0-small") @pytest.fixture def default_pb_request(default_pb_parameters, default_pb_stop_parameters): return generate_pb2.Request( id=0, inputs="Test", input_chunks=generate_pb2.Input(chunks=[generate_pb2.InputChunk(text="Test")]), prefill_logprobs=True, truncate=100, parameters=default_pb_parameters, stopping_parameters=default_pb_stop_parameters, ) @pytest.fixture def default_pb_batch(default_pb_request): return generate_pb2.Batch(id=0, requests=[default_pb_request], size=1) @pytest.fixture def default_seq2seq_lm_batch(default_pb_batch, mt0_small_tokenizer): return Seq2SeqLMBatch.from_pb( default_pb_batch, mt0_small_tokenizer, torch.float32, torch.device("cpu") ) @pytest.fixture def default_multi_requests_seq2seq_lm_batch(default_pb_request, mt0_small_tokenizer): req_0 = copy(default_pb_request) req_0.id = 1 req_1 = default_pb_request req_1.id = 2 req_1.stopping_parameters.max_new_tokens = 5 batch_pb = generate_pb2.Batch(id=0, requests=[req_0, req_1], size=2) return Seq2SeqLMBatch.from_pb( batch_pb, mt0_small_tokenizer, torch.float32, torch.device("cpu") ) def test_batch_from_pb(default_pb_batch, default_seq2seq_lm_batch): batch = default_seq2seq_lm_batch sequence_length = len(default_seq2seq_lm_batch.input_ids[0]) assert batch.batch_id == default_pb_batch.id assert batch.requests == default_pb_batch.requests assert batch.input_ids.shape == (default_pb_batch.size, sequence_length) assert batch.input_ids[0][-2] == 4268 assert batch.input_ids[0][-1] == 1 assert torch.all(batch.input_ids[0][:-2] == 0) assert torch.all(batch.attention_mask[0][-2:] == 1) assert torch.all(batch.attention_mask[0][:-2] == 0) assert len(batch.decoder_input_ids) == default_pb_batch.size assert batch.decoder_attention_mask is None assert batch.encoder_last_hidden_state is None assert batch.past_key_values is None assert batch.input_lengths == [2] assert batch.decoder_input_lengths == [1] assert len(batch) == default_pb_batch.size assert len(batch.next_token_choosers) == len(batch.stopping_criterias) == len(batch) assert batch.max_input_length == batch.input_lengths[0] assert batch.max_decoder_input_length == batch.decoder_input_lengths[0] def test_batch_concatenate_no_prefill(default_seq2seq_lm_batch): with pytest.raises(ValueError): Seq2SeqLMBatch.concatenate([default_seq2seq_lm_batch, default_seq2seq_lm_batch]) def test_seq2seq_lm_batch_type(default_seq2seq_lm): assert default_seq2seq_lm.batch_type == Seq2SeqLMBatch def test_seq2seq_lm_generate_token(default_seq2seq_lm, default_seq2seq_lm_batch): sequence_length = len(default_seq2seq_lm_batch.input_ids[0]) generations, next_batch, _ = default_seq2seq_lm.generate_token( default_seq2seq_lm_batch ) assert len(generations) == len(next_batch) assert isinstance(next_batch, Seq2SeqLMBatch) assert next_batch.input_ids is None assert torch.equal( next_batch.attention_mask, default_seq2seq_lm_batch.attention_mask ) assert next_batch.input_lengths == default_seq2seq_lm_batch.input_lengths assert next_batch.max_input_length == default_seq2seq_lm_batch.max_input_length assert ( next_batch.next_token_choosers == default_seq2seq_lm_batch.next_token_choosers ) assert next_batch.stopping_criterias == default_seq2seq_lm_batch.stopping_criterias assert len(next_batch.decoder_input_ids) == len(next_batch) assert next_batch.all_decoder_input_ids[0][0] == 0 assert next_batch.all_decoder_input_ids[0][1] == 259 assert next_batch.decoder_attention_mask is None assert next_batch.encoder_last_hidden_state.shape == (1, sequence_length, 512) assert next_batch.decoder_input_lengths == [2] assert next_batch.max_decoder_input_length == 2 assert next_batch.past_key_values is not None assert all( [p[0].shape == (len(next_batch), 6, 1, 64) for p in next_batch.past_key_values] ) assert all( [p[1].shape == (len(next_batch), 6, 1, 64) for p in next_batch.past_key_values] ) assert all( [ p[2].shape == (len(next_batch), 6, sequence_length, 64) for p in next_batch.past_key_values ] ) assert all( [ p[3].shape == (len(next_batch), 6, sequence_length, 64) for p in next_batch.past_key_values ] ) assert all([generation.generated_text is None for generation in generations]) assert all([len(generation.prefill_tokens) == 1 for generation in generations]) assert all( [ token_id.item() == 259 for generation in generations for token_id in generation.tokens.token_ids ] ) assert all( [ token_text == " " for generation in generations for token_text in generation.tokens.texts ] ) assert generations[0].request_id == 0 def test_seq2seq_lm_generate_token_completion( default_seq2seq_lm, default_seq2seq_lm_batch ): next_batch = default_seq2seq_lm_batch for _ in range(6): generations, next_batch, _ = default_seq2seq_lm.generate_token(next_batch) assert len(generations) == len(next_batch) generations, next_batch, _ = default_seq2seq_lm.generate_token(next_batch) assert next_batch is None assert len(generations) == 1 assert generations[0].generated_text.text == "a few weeks" assert generations[0].request_id == default_seq2seq_lm_batch.requests[0].id assert generations[0].generated_text.generated_tokens == 7 def test_seq2seq_lm_generate_token_completion_multi( default_seq2seq_lm, default_multi_requests_seq2seq_lm_batch ): next_batch = default_multi_requests_seq2seq_lm_batch for i in range(4): generations, next_batch, _ = default_seq2seq_lm.generate_token(next_batch) assert len(generations) == len(next_batch) generations, next_batch, _ = default_seq2seq_lm.generate_token(next_batch) assert next_batch is not None assert len(generations) == 2 assert generations[1].generated_text.text == "a few " assert ( generations[1].request_id == default_multi_requests_seq2seq_lm_batch.requests[1].id ) assert generations[1].generated_text.generated_tokens == 5 next_batch = next_batch.filter([next_batch.requests[0].id]) generations, next_batch, _ = default_seq2seq_lm.generate_token(next_batch) assert len(generations) == len(next_batch) generations, next_batch, _ = default_seq2seq_lm.generate_token(next_batch) assert next_batch is None assert len(generations) == 1 assert generations[0].generated_text.text == "a few weeks" assert ( generations[0].request_id == default_multi_requests_seq2seq_lm_batch.requests[0].id ) assert generations[0].generated_text.generated_tokens == 7 def test_batch_concatenate( default_seq2seq_lm, default_seq2seq_lm_batch, default_multi_requests_seq2seq_lm_batch, ): next_batch_0 = default_seq2seq_lm_batch _, next_batch_0, _ = default_seq2seq_lm.generate_token(next_batch_0) _, next_batch_0, _ = default_seq2seq_lm.generate_token(next_batch_0) next_batch_1 = default_multi_requests_seq2seq_lm_batch _, next_batch_1, _ = default_seq2seq_lm.generate_token(next_batch_1) # Copy hidden state because it is removed from the concatenated branches next_batch_0_encoder_last_hidden_state = next_batch_0.encoder_last_hidden_state next_batch_1_encoder_last_hidden_state = next_batch_1.encoder_last_hidden_state # Clone past_key_values before concatenating to compare after, # because they are removed from the concatenated batches next_batch_0_past_key_values = [ [t.clone() for t in layer] for layer in next_batch_0.past_key_values ] next_batch_1_past_key_values = [ [t.clone() for t in layer] for layer in next_batch_1.past_key_values ] next_batch = Seq2SeqLMBatch.concatenate([next_batch_0, next_batch_1]) assert next_batch.batch_id == 0 assert torch.equal( next_batch.decoder_input_ids[0], next_batch_0.decoder_input_ids[0] ) assert next_batch.all_decoder_input_ids[1][0] == 0 assert next_batch.all_decoder_input_ids[2][0] == 0 assert torch.equal( next_batch.decoder_input_ids[1:, -2:], next_batch_1.decoder_input_ids ) assert torch.all(next_batch.decoder_attention_mask[0, :3] == 1) assert torch.all(next_batch.decoder_attention_mask[0, 3:] == 0) assert torch.all(next_batch.decoder_attention_mask[1:, 0] == 0) assert torch.all(next_batch.decoder_attention_mask[1:, 1:3] == 1) assert torch.equal( next_batch.encoder_last_hidden_state[0], next_batch_0_encoder_last_hidden_state[0, -2:], ) assert torch.equal( next_batch.encoder_last_hidden_state[1:], next_batch_1_encoder_last_hidden_state[:, -2:], ) assert next_batch.input_lengths == [2, 2, 2] assert next_batch.decoder_input_lengths == [3, 2, 2] assert next_batch.max_input_length == 2 assert next_batch.max_decoder_input_length == 3 assert next_batch.requests[0] == next_batch_0.requests[0] assert next_batch.requests[1:] == list(next_batch_1.requests) assert next_batch.next_token_choosers[0] == next_batch_0.next_token_choosers[0] assert next_batch.next_token_choosers[1:] == next_batch_1.next_token_choosers assert next_batch.stopping_criterias[0] == next_batch_0.stopping_criterias[0] assert next_batch.stopping_criterias[1:] == next_batch_1.stopping_criterias assert next_batch.past_key_values is not None assert all( [p[0].shape == (len(next_batch), 6, 2, 64) for p in next_batch.past_key_values] ) assert all( [p[1].shape == (len(next_batch), 6, 2, 64) for p in next_batch.past_key_values] ) assert all( [p[2].shape == (len(next_batch), 6, 2, 64) for p in next_batch.past_key_values] ) assert all( [p[3].shape == (len(next_batch), 6, 2, 64) for p in next_batch.past_key_values] ) for i, past in enumerate(next_batch.past_key_values): assert torch.equal(next_batch_0_past_key_values[i][0][0, :, -2:, :], past[0][0]) assert torch.equal( next_batch_1_past_key_values[i][0][:, :, -1:, :], past[0][1:, :, -1:, :] ) assert torch.equal(next_batch_0_past_key_values[i][1][0, :, -2:, :], past[1][0]) assert torch.equal( next_batch_1_past_key_values[i][1][:, :, -1:, :], past[1][1:, :, -1:, :] ) assert torch.equal(next_batch_0_past_key_values[i][2][0, :, -2:, :], past[2][0]) assert torch.equal( next_batch_1_past_key_values[i][2][:, :, -2:, :], past[2][1:] ) assert torch.equal(next_batch_0_past_key_values[i][3][0, :, -2:, :], past[3][0]) assert torch.equal( next_batch_1_past_key_values[i][3][:, :, -2:, :], past[3][1:] ) for _ in range(3): generations, next_batch, _ = default_seq2seq_lm.generate_token(next_batch) assert len(generations) == len(next_batch) generations, next_batch, _ = default_seq2seq_lm.generate_token(next_batch) assert next_batch is not None assert len(generations) == 3 assert generations[2].generated_text.text == "a few " assert ( generations[2].request_id == default_multi_requests_seq2seq_lm_batch.requests[1].id ) assert generations[2].generated_text.generated_tokens == 5 next_batch = next_batch.filter( [next_batch.requests[0].id, next_batch.requests[1].id] ) generations, next_batch, _ = default_seq2seq_lm.generate_token(next_batch) assert next_batch is not None assert len(generations) == 2 assert generations[0].generated_text.text == "a few weeks" assert generations[0].request_id == default_seq2seq_lm_batch.requests[0].id assert generations[0].generated_text.generated_tokens == 7 next_batch = next_batch.filter([next_batch.requests[1].id]) generations, next_batch, _ = default_seq2seq_lm.generate_token(next_batch) assert next_batch is None assert len(generations) == 1 assert generations[0].generated_text.text == "a few weeks" assert ( generations[0].request_id == default_multi_requests_seq2seq_lm_batch.requests[0].id ) assert generations[0].generated_text.generated_tokens == 7
text-generation-inference/server/tests/models/test_seq2seq_lm.py/0
{ "file_path": "text-generation-inference/server/tests/models/test_seq2seq_lm.py", "repo_id": "text-generation-inference", "token_count": 5528 }
316
from typing import List, Optional, Union, TypeVar from dataclasses import dataclass from loguru import logger import torch from compressed_tensors.quantization import QuantizationArgs, QuantizationType from text_generation_server.layers.fp8 import _load_scalar_or_matrix_scale from text_generation_server.utils.import_utils import SYSTEM from text_generation_server.utils.kernels import load_kernel from text_generation_server.utils.log import log_once from text_generation_server.utils.weights import Weight, Weights, WeightsLoader if SYSTEM == "cuda": quantization = load_kernel( module="quantization", repo_id="kernels-community/quantization" ) else: quantization = None class W8A8IntLoader(WeightsLoader): """ Loader for w8a8 integer compressed-tensors parameters. """ def __init__( self, *, input_args: Optional[QuantizationArgs], weight_args: QuantizationArgs, ): if weight_args.type != QuantizationType.INT and weight_args.num_bits != 8: raise ValueError( f"{type(self).__name__} only supports w8a8 int checkpoints" ) if not weight_args.symmetric: raise ValueError("Checkpoints with asymmetric weights are not supported") self.load_weight_scale = not weight_args.dynamic if input_args is not None: self.input_symmetric = input_args.symmetric if not input_args.dynamic: log_once( logger.warning, "Forcing dynamic input quantization for compressed_tensors w8a8 int checkpoint (for better accuracy).", ) else: self.input_symmetric = True def __str__(self) -> str: def scale_to_str(scale): return "static" if scale else "dynamic" def symmetric_to_str(symmetric): return "symmetric" if symmetric else "asymmetric" return f"{self.__class__.__name__} (w8a8 int, input: dynamic/{symmetric_to_str(self.input_symmetric)}, weight: {scale_to_str(self.load_weight_scale)}/symmetric))" def get_weights(self, weights: "Weights", prefix: str): w = weights.get_tensor(f"{prefix}.weight", to_dtype=False) weight_scale = None if self.load_weight_scale: weight_scale = weights.get_tensor( f"{prefix}.weight_scale", to_dtype=False ).reshape(-1) return Int8Weight( input_symmetric=self.input_symmetric, weight=w, weight_scale=weight_scale, ) def get_weights_col_packed( self, weights: Weights, prefix: str, block_sizes: Union[int, List[int]], ): w = weights.get_packed_sharded( f"{prefix}.weight", dim=0, block_sizes=block_sizes, to_dtype=False ) weight_scale = None if self.load_weight_scale: weight_scale = weights.get_tensor(f"{prefix}.weight_scale", to_dtype=False) if weight_scale.numel() > 1: weight_scale = weights.get_packed_sharded( f"{prefix}.weight_scale", dim=0, block_sizes=block_sizes, to_dtype=False, ) weight_scale = weight_scale.reshape(-1) return Int8Weight( input_symmetric=self.input_symmetric, weight=w, weight_scale=weight_scale, ) def get_multi_weights_col(self, weights: "Weights", prefixes: List[str], dim: int): w = [ weights.get_sharded(f"{p}.weight", dim=0, to_dtype=False) for p in prefixes ] shapes = [x.shape for x in w] w = torch.cat(w, dim=dim) weight_scale = None if self.load_weight_scale: weight_scale = [ _load_scalar_or_matrix_scale(weights, f"{p}.weight_scale", shape) for p, shape in zip(prefixes, shapes) ] weight_scale = torch.cat(weight_scale, dim=0).reshape(-1, 1) return Int8Weight( input_symmetric=self.input_symmetric, weight=w, weight_scale=weight_scale, ) def get_weights_row(self, weights: "Weights", prefix: str): w = weights.get_sharded(f"{prefix}.weight", dim=1, to_dtype=False) weight_scale = None if self.load_weight_scale: weight_scale = weights.get_tensor( f"{prefix}.weight_scale", to_dtype=False ).reshape(-1) return Int8Weight( input_symmetric=self.input_symmetric, weight=w, weight_scale=weight_scale, ) OtherT = TypeVar("OtherT") def _get_tensor_or_else( weights: Weights, prefix: str, other: OtherT ) -> Union[torch.Tensor, OtherT]: # Even if a checkpoint uses e.g. zero-points, they can be elided: # https://github.com/neuralmagic/compressed-tensors/blob/db6ccb25b265e8370813ecab5e95714a6728b5a6/src/compressed_tensors/compressors/quantized_compressors/base.py#L105 if weights.has_tensor(prefix): return weights.get_tensor(prefix, to_dtype=False) else: return other @dataclass class Int8Weight(Weight): input_symmetric: bool weight: torch.Tensor weight_scale: Optional[torch.Tensor] def get_linear(self, bias: torch.Tensor): if self.weight_scale is None: assert quantization is not None qweight, weight_scale, _ = quantization.scaled_int8_quant(self.weight) return W8A8IntLinear( bias=bias, input_symmetric=self.input_symmetric, weight=qweight, weight_scale=weight_scale, ) else: return W8A8IntLinear( bias=bias, input_symmetric=self.input_symmetric, weight=self.weight, weight_scale=self.weight_scale, ) class W8A8IntLinear(torch.nn.Module): def __init__( self, *, bias: Optional[torch.Tensor], input_symmetric: bool, weight: torch.Tensor, weight_scale: torch.Tensor, ): super().__init__() weight_scale = weight_scale.to(torch.float32) self.bias = bias self.input_symmetric = input_symmetric # cutlass kernels require transposed weights. self.weight = weight.t() self.weight_scale = weight_scale if input_symmetric: self.zero_point_adj = None else: # https://github.com/vllm-project/vllm/blob/8d59dbb00044a588cab96bcdc028006ed922eb06/csrc/quantization/cutlass_w8a8/Epilogues.md#scaledepilogueazp self.zero_point_adj = self.weight.sum( dim=0, keepdim=True, dtype=torch.int32 ) def forward(self, input: torch.Tensor) -> torch.Tensor: assert quantization is not None qinput, input_scale, input_zero_point = quantization.scaled_int8_quant( input=input, scale=None, azp=None, symmetric=self.input_symmetric, ) if self.input_symmetric: return quantization.cutlass_scaled_mm( a=qinput, b=self.weight, scale_a=input_scale, scale_b=self.weight_scale, out_dtype=input.dtype, bias=self.bias, ) else: assert ( self.zero_point_adj is not None and input_scale is not None and (self.input_symmetric or input_zero_point is not None) ) return quantization.cutlass_scaled_mm_azp( a=qinput, b=self.weight, scale_a=input_scale, scale_b=self.weight_scale, out_dtype=input.dtype, azp_adj=self.zero_point_adj, azp=input_zero_point, bias=self.bias, )
text-generation-inference/server/text_generation_server/layers/compressed_tensors/w8a8_int.py/0
{ "file_path": "text-generation-inference/server/text_generation_server/layers/compressed_tensors/w8a8_int.py", "repo_id": "text-generation-inference", "token_count": 3986 }
317
import torch from torch import nn from accelerate import init_empty_weights from text_generation_server.utils.import_utils import ( SYSTEM, ) # Monkey patching @classmethod def load_layer_norm(cls, prefix, weights, eps): weight = weights.get_tensor(f"{prefix}.weight") bias = weights.get_tensor(f"{prefix}.bias") with init_empty_weights(): ln = cls(weight.shape, eps=eps) ln.weight = torch.nn.Parameter(weight) ln.bias = torch.nn.Parameter(bias) return ln @classmethod def load_layer_norm_no_bias(cls, prefix, weights, eps): weight = weights.get_tensor(f"{prefix}.weight") with init_empty_weights(): ln = cls(weight.shape, eps=eps) ln.weight = torch.nn.Parameter(weight) ln.bias = None return ln torch.nn.LayerNorm.load = load_layer_norm torch.nn.LayerNorm.load_no_bias = load_layer_norm_no_bias if SYSTEM == "cuda": import dropout_layer_norm class FastLayerNorm(nn.LayerNorm): def forward(self, hidden_states, residual=None): if hidden_states.shape[-1] > 8192: if residual is not None: hidden_states += residual residual = hidden_states return super(FastLayerNorm, self).forward(hidden_states), residual else: ( normed_hidden_states, residual, *rest, ) = dropout_layer_norm.dropout_add_ln_fwd( hidden_states, residual, self.weight, self.bias, None, None, None, None, 0.0, self.eps, 1.0, 0, None, False, False, ) if residual is None: residual = hidden_states return normed_hidden_states, residual elif SYSTEM == "rocm": import vllm._custom_ops as ops class FastLayerNorm(nn.LayerNorm): def forward(self, hidden_states, residual=None): if residual is not None: hidden_states += residual residual = hidden_states return super().forward(hidden_states), residual elif SYSTEM == "ipex": import intel_extension_for_pytorch as ipex class FastLayerNorm(nn.LayerNorm): def forward(self, hidden_states, residual=None): out = ipex.llm.functional.add_layer_norm( residual, hidden_states, self.weight, self.bias, self.eps, residual is not None, ) return out, residual if residual is not None else hidden_states class FastRMSNorm(nn.Module): def __init__(self, weight: torch.Tensor, eps: float): super().__init__() self.weight = nn.Parameter(weight) self.variance_epsilon = eps @classmethod def load(cls, prefix, weights, eps=1e-6): weight = weights.get_tensor(f"{prefix}.weight") return cls(weight, eps) def forward(self, hidden_states, residual=None): if SYSTEM == "ipex": out = ipex.llm.functional.add_rms_norm( residual, hidden_states, self.weight, None, self.variance_epsilon, residual is not None, ) return out, residual if residual is not None else hidden_states elif SYSTEM == "rocm": # We use VLLM RMSNorm kernel that can be compiled for RoCm, instead of Flash Attention ones that can not. if residual is not None: ops.fused_add_rms_norm( hidden_states, residual, self.weight.data, self.variance_epsilon, ) return hidden_states, residual residual = hidden_states out = torch.empty_like(hidden_states) ops.rms_norm( out, hidden_states, self.weight.data, self.variance_epsilon, ) return out, residual elif hidden_states.shape[-1] > 8192: if residual is not None: hidden_states += residual residual = hidden_states hidden_states = hidden_states.to(torch.float32) variance = hidden_states.pow(2).mean(-1, keepdim=True) hidden_states = hidden_states * torch.rsqrt( variance + self.variance_epsilon ) # convert into half-precision if necessary if self.weight.dtype in [torch.float16, torch.bfloat16]: hidden_states = hidden_states.to(self.weight.dtype) return self.weight * hidden_states, residual elif SYSTEM == "cuda": # faster post attention rms norm ( normed_hidden_states, res, *rest, ) = dropout_layer_norm.dropout_add_ln_fwd( hidden_states, residual, self.weight, None, None, None, None, None, 0.0, self.variance_epsilon, 1.0, 0, None, False, True, # Activate RMSNorm ) if res is None: res = hidden_states return normed_hidden_states, res else: raise ValueError( "Your system seem to be not supported. Please check your install or open an issue at https://github.com/huggingface/text-generation-inference/issues with a clear reproduction." )
text-generation-inference/server/text_generation_server/layers/layernorm.py/0
{ "file_path": "text-generation-inference/server/text_generation_server/layers/layernorm.py", "repo_id": "text-generation-inference", "token_count": 3189 }
318
# coding=utf-8 # Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved. # # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX # and OPT implementations in this library. It has been modified from its # original forms to accommodate minor architectural differences compared # to GPT-NeoX and OPT used by the Meta AI team that trained the model. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch import torch.distributed from torch import nn from transformers.activations import ACT2FN from typing import Optional, List, Tuple from text_generation_server.layers.attention.kv_cache import get_kv_scales from text_generation_server.utils.import_utils import SYSTEM from text_generation_server.layers.attention import ( paged_attention, attention, Seqlen, ) from text_generation_server.layers import ( TensorParallelRowLinear, TensorParallelColumnLinear, TensorParallelEmbedding, SpeculativeHead, get_linear, ) from text_generation_server.layers.rotary import ( PositionRotaryEmbedding, ) from text_generation_server.layers.layernorm import ( FastLayerNorm, ) def load_attention(config, prefix: str, weights): return TensorParallelColumnLinear.load_multi( config, prefixes=[f"{prefix}.q_proj", f"{prefix}.k_proj", f"{prefix}.v_proj"], dim=0, weights=weights, bias=False, ) def load_row(config, prefix: str, weights, bias: bool): weight = weights.get_weights_row(prefix) if bias and weights.process_group.rank() == 0: # Rank is only on the first rank process bias = weights.get_tensor(f"{prefix}.bias") else: bias = None linear = get_linear(weight, bias) return TensorParallelRowLinear(linear, process_group=weights.process_group) class GPTJRotary(PositionRotaryEmbedding): def forward( self, query: torch.Tensor, key: torch.Tensor, cos: torch.Tensor, sin: torch.Tensor, ): # Such controlflows may add some overhead. if SYSTEM == "cuda": from text_generation_server.utils.kernels import load_kernel rotary = load_kernel(module="rotary", repo_id="kernels-community/rotary") q1 = query[..., ::2] q2 = query[..., 1::2] rotary.apply_rotary(q1, q2, cos, sin, q1, q2, False) k1 = key[..., ::2] k2 = key[..., 1::2] rotary.apply_rotary(k1, k2, cos, sin, k1, k2, False) elif SYSTEM == "rocm": import vllm._custom_ops as ops # NOTE: On RoCm systems, we use a ROPE implementatation adapted from VLLM which launches a single kernel for both query/key, contrary to flash-attn implementation used on NVIDIA systems. # Compiling flash-attn rotary on RoCm, it appears hipcc is unable to unroll loops, resulting in an even slower inference compared to eager: https://github.com/pytorch/pytorch/issues/113773 head_size = query.shape[-1] # Inplace operation, updating query and key. ops.rotary_embedding(query, key, head_size, cos, sin, False) elif SYSTEM == "ipex": import intel_extension_for_pytorch as ipex ipex.llm.functional.rotary_embedding( query, key, sin, cos, query.size(-1), False ) else: raise ValueError( "Your system seem to be not supported. Please check your install or open an issue at https://github.com/huggingface/text-generation-inference/issues with a clear reproduction." ) class FlashGPTJAttention(torch.nn.Module): def __init__( self, prefix: str, config, weights, ): super().__init__() self.num_heads = config.num_attention_heads self.hidden_size = config.hidden_size self.head_size = self.hidden_size // self.num_heads self.softmax_scale = self.head_size**-0.5 self.rotary_dim = config.rotary_dim if self.num_heads % weights.process_group.size() != 0: raise ValueError( f"`num_heads` must be divisible by `num_shards` (got `num_heads`: {self.num_heads} " f"and `num_shards`: {weights.process_group.size()}" ) self.num_heads = self.num_heads // weights.process_group.size() self.query_key_value = load_attention( config, prefix=prefix, weights=weights, ) self.kv_scales = get_kv_scales(weights, f"{prefix}") self.o_proj = load_row( config, prefix=f"{prefix}.out_proj", weights=weights, bias=False, ) self.kv_head_mapping = torch.arange( 0, self.num_heads, dtype=torch.int32, device=weights.device ) self.rotary_emb = GPTJRotary.static( config=config, dim=self.rotary_dim, base=10000, device=weights.device, ) def forward( self, hidden_states, cos, sin, cu_seqlen_prefill, kv_cache, block_tables, slots, seqlen, max_s, ): query, key, value = self.query_key_value(hidden_states).split( self.head_size * self.num_heads, dim=1 ) query = query.view(-1, self.num_heads, self.head_size) key = key.view(-1, self.num_heads, self.head_size) value = value.view(-1, self.num_heads, self.head_size) # Compute rotary embeddings on rotary_ndims if self.rotary_dim is not None: self.rotary_emb( query[..., : self.rotary_dim], key[..., : self.rotary_dim], cos, sin ) else: self.rotary_emb(query, key, cos, sin) kv_cache.store( key=key, value=value, slots=slots, kv_scales=self.kv_scales, ) # Prefill if cu_seqlen_prefill is not None: # flash attention attn_output = attention( query=query, key=key, value=value, kv_cache=kv_cache, kv_scales=self.kv_scales, seqlen=seqlen, block_tables=block_tables, softmax_scale=self.softmax_scale, ) # Decode else: attn_output = paged_attention( query, kv_cache, self.kv_head_mapping, self.softmax_scale, block_tables, seqlen, max_s, kv_scales=self.kv_scales, ) return self.o_proj(attn_output.view(-1, self.num_heads * self.head_size)) class GPTJMLP(nn.Module): def __init__(self, prefix: str, config, weights): super().__init__() act = config.activation_function self.act = ( ACT2FN[act] if "gelu" not in act else lambda x: torch.nn.functional.gelu( x, approximate=( "tanh" if act in ["gelu_fast", "gelu_pytorch_tanh"] else "none" ), ) ) self.fc_in = TensorParallelColumnLinear.load( config, prefix=f"{prefix}.fc_in", weights=weights, bias=True ) self.fc_out = load_row( config, prefix=f"{prefix}.fc_out", weights=weights, bias=True, ) def forward(self, hidden_states): hidden_states = self.fc_in(hidden_states) hidden_states = self.act(hidden_states) return self.fc_out(hidden_states) class FlashGPTJLayer(nn.Module): def __init__(self, prefix: str, config, weights): super().__init__() self.self_attn = FlashGPTJAttention( prefix=f"{prefix}.attn", config=config, weights=weights ) self.mlp = GPTJMLP(prefix=f"{prefix}.mlp", config=config, weights=weights) self.input_layernorm = FastLayerNorm.load( prefix=f"{prefix}.ln_1", weights=weights, eps=config.layer_norm_epsilon ) def forward( self, hidden_states, residual, cos, sin, cu_seqlen_prefill, kv_cache, block_tables, slots, seqlen, max_s, ): hidden_states, residual = self.input_layernorm(hidden_states, residual) # Self Attention attn_output = self.self_attn( hidden_states, cos, sin, cu_seqlen_prefill, kv_cache, block_tables, slots, seqlen, max_s, ) feed_forward_hidden_states = self.mlp(hidden_states) return attn_output + feed_forward_hidden_states, residual class FlashGPTJModel(torch.nn.Module): def __init__(self, prefix: str, config, weights): super().__init__() self.config = config self.wte = TensorParallelEmbedding(prefix=f"{prefix}.wte", weights=weights) self.layers = nn.ModuleList( [ FlashGPTJLayer( prefix=( f"h.{layer_id}" if not prefix else f"{prefix}.h.{layer_id}" ), config=config, weights=weights, ) for layer_id in range(config.num_hidden_layers) ] ) self.ln_f = FastLayerNorm.load( prefix="ln_f" if not prefix else f"{prefix}.ln_f", weights=weights, eps=config.layer_norm_epsilon, ) self.gradient_checkpointing = False self.head_size = self.layers[0].self_attn.head_size self.num_heads = self.layers[0].self_attn.num_heads def forward( self, input_ids: Optional[torch.LongTensor], position_ids: torch.Tensor, cu_seqlen_prefill: Optional[torch.Tensor], kv_cache: List[Tuple[torch.Tensor, torch.Tensor]], block_tables: torch.Tensor, slots: torch.Tensor, seqlen: Seqlen, max_s: int, prefill_cache_indices: Optional[torch.Tensor], ) -> torch.Tensor: hidden_states = self.wte(input_ids) # Get rotary cos and sin for this forward # Avoid to index in each layer cos, sin = self.layers[0].self_attn.rotary_emb.get_cos_sin( position_ids, max_s, hidden_states.dtype ) residual = None for i, layer in enumerate(self.layers): hidden_states, residual = layer( hidden_states, residual, cos, sin, cu_seqlen_prefill, kv_cache[i], block_tables, slots, seqlen, max_s, ) hidden_states, _ = self.ln_f(hidden_states, residual) return hidden_states class FlashGPTJForCausalLM(torch.nn.Module): def __init__(self, prefix: str, config, weights): super().__init__() if not prefix: prefix = "transformer" else: prefix = f"{prefix}.transformer" self.model = FlashGPTJModel(prefix, config, weights) self.lm_head = SpeculativeHead.load( config, prefix="lm_head", weights=weights, ) def forward( self, input_ids: torch.Tensor, position_ids: torch.Tensor, cu_seqlen_prefill: Optional[torch.Tensor], kv_cache: List[Tuple[torch.Tensor, torch.Tensor]], block_tables: torch.Tensor, slots: torch.Tensor, seqlen: Seqlen, max_s: int, prefill_cache_indices: Optional[torch.Tensor] = None, lm_head_indices: Optional[torch.Tensor] = None, adapter_data: Optional[torch.Tensor] = None, ) -> Tuple[torch.Tensor, Optional[torch.Tensor]]: hidden_states = self.model( input_ids, position_ids, cu_seqlen_prefill, kv_cache, block_tables, slots, seqlen, max_s, prefill_cache_indices=prefill_cache_indices, ) if lm_head_indices is not None: hidden_states = hidden_states[lm_head_indices] logits, speculative_logits = self.lm_head(hidden_states) return logits, speculative_logits
text-generation-inference/server/text_generation_server/models/custom_modeling/flash_gptj_modeling.py/0
{ "file_path": "text-generation-inference/server/text_generation_server/models/custom_modeling/flash_gptj_modeling.py", "repo_id": "text-generation-inference", "token_count": 6420 }
319
from dataclasses import dataclass import torch from PIL import Image from io import BytesIO from opentelemetry import trace from typing import Iterable, Optional, Tuple, List, Type, Dict from transformers import PreTrainedTokenizerBase from transformers.image_processing_utils import select_best_resolution from text_generation_server.pb import generate_pb2 from text_generation_server.models.flash_causal_lm import ( FlashCausalLMBatch, FlashCausalLM, ) from text_generation_server.models.globals import PREFIX_CACHING, ATTENTION, MEM_POOL from loguru import logger from text_generation_server.utils.log import log_master from transformers import AutoProcessor from text_generation_server.layers.attention import Seqlen from text_generation_server.models.metadata_kernels import block_tables_to_ragged tracer = trace.get_tracer(__name__) IDEFICS2_FAKE_TOKEN = "<fake_token_around_image>" IDEFICS2_IMAGE_TOKEN = "<image>" IDEFICS3_IMAGE_TOKEN = "<image>" IDEFICS3_FAKE_IMAGE_TOKEN = "<fake_token_around_image>" IDEFICS3_GLOBAL_IMG_TOKEN = "<global-img>" def prompt_split_image_llama4(aspect_ratio, num_patches_per_chunk): """ Create a structured string representation of image tokens Args: num_patches: Number of patches in the image Returns: String with appropriate image tokens """ img_string = "<|image_start|>" ratio_h, ratio_w = aspect_ratio if ratio_h * ratio_w > 1: for yy in range(ratio_h): for xx in range(ratio_w): img_string += "<|patch|>" * num_patches_per_chunk if xx < ratio_w - 1: img_string += "<|tile_x_separator|>" img_string += "<|tile_y_separator|>" img_string += "<|image|>" img_string += "<|patch|>" * num_patches_per_chunk img_string += "<|image_end|>" return img_string # copied from: https://github.com/huggingface/transformers/blob/02ed609285c2448b3b54c31e362f2c389fa952ab/src/transformers/models/idefics3/processing_idefics3.py#L44-L60 def _prompt_split_image( *, image_seq_len: int, image_rows: int, image_cols: int, fake_token_around_image: str, image_token: str, global_img_token: str, ): """Prompt with expanded image tokens for when the image is split into patches.""" text_split_images = "" for n_h in range(image_rows): for n_w in range(image_cols): text_split_images += ( f"{fake_token_around_image}" + f"<row_{n_h + 1}_col_{n_w + 1}>" + f"{image_token}" * image_seq_len ) text_split_images += "\n" text_split_images += ( f"\n{fake_token_around_image}" + f"{global_img_token}" + f"{image_token}" * image_seq_len + f"{fake_token_around_image}" ) return text_split_images def get_anyres_image_grid_shape(image_size, grid_pinpoints, patch_size): """ Calculate the shape of the image patch grid after the preprocessing for images of any resolution. Args: image_size (`tuple`): The size of the input image in the format (height, width). grid_pinpoints (`List`): A list containing possible resolutions. Each item in the list should be a tuple or list of the form `(height, width)`. patch_size (`int`): The size of each image patch. Returns: tuple: The shape of the image patch grid in the format (width, height). """ if not isinstance(grid_pinpoints, list): raise ValueError("grid_pinpoints should be a list of tuples or lists") height, width = select_best_resolution(image_size, grid_pinpoints) return height // patch_size, width // patch_size def image_text_replacement(processor, image_input, config) -> str: if config.model_type == "idefics2": image_seq_len = 64 image_str = f"{IDEFICS2_FAKE_TOKEN}{IDEFICS2_IMAGE_TOKEN * image_seq_len}{IDEFICS2_FAKE_TOKEN}" if processor.image_processor.do_image_splitting: image_str *= 5 return image_str, IDEFICS2_FAKE_TOKEN if config.model_type == "idefics3": # TODO: implement this in a more general way n_rows = image_input["rows"][0][0] n_cols = image_input["cols"][0][0] image_seq_len = int( ((config.vision_config.image_size // config.vision_config.patch_size) ** 2) / (config.scale_factor**2) ) image_str = _prompt_split_image( image_seq_len=image_seq_len, image_rows=n_rows, image_cols=n_cols, fake_token_around_image=IDEFICS3_FAKE_IMAGE_TOKEN, image_token=IDEFICS3_IMAGE_TOKEN, global_img_token=IDEFICS3_GLOBAL_IMG_TOKEN, ) return image_str, IDEFICS3_FAKE_IMAGE_TOKEN elif config.model_type == "llava_next": height, width = image_input["image_sizes"][0] num_features = get_number_of_features(height, width, config) log_master( logger.info, f"Found {num_features} features in image of resolution {height}x{width}", ) return "<image>" * num_features, "<image>" elif config.model_type == "paligemma": return "<image>" * config.text_config.num_image_tokens, "<image>" elif config.model_type == "qwen2_vl": grid_t, grid_h, grid_w = image_input["image_grid_thw"][0] num_pads = grid_t * grid_h * grid_w // 4 padding = "<|image_pad|>" * num_pads return f"<|vision_start|>{padding}<|vision_end|>", "<|vision_start|>" elif config.model_type == "qwen2_5_vl": grid_t, grid_h, grid_w = image_input["image_grid_thw"][0] num_pads = grid_t * grid_h * grid_w // 4 padding = "<|image_pad|>" * num_pads return f"<|vision_start|>{padding}<|vision_end|>", "<|vision_start|>" elif config.model_type == "gemma3": # TODO: get correct number of features via reviewing the Gemma3 architecture # and calculating the number of image tokens num_pads = 256 padding = "<image_soft_token>" * num_pads return f"\n\n<start_of_image>{padding}<end_of_image>\n\n", "<start_of_image>" elif config.model_type == "llama4": patch_size = config.vision_config.patch_size pixel_shuffle_ratio = config.vision_config.pixel_shuffle_ratio downsample_ratio = int(round(1.0 / (pixel_shuffle_ratio**2))) aspect_ratios = image_input["aspect_ratios"][0] image_height, image_width = image_input["pixel_values"][0].shape[-2:] num_patches_per_chunk = int( (image_height // patch_size) * (image_width // patch_size) // downsample_ratio ) tokens_for_this_image = prompt_split_image_llama4( aspect_ratios, num_patches_per_chunk ) return tokens_for_this_image, "<|image_start|>" else: raise RuntimeError(f"Unknown config {config.model_type} for multimodal") def image_text_replacement_fixup(config, text: str) -> str: if config.model_type == "idefics2": return text.replace( f"{IDEFICS2_FAKE_TOKEN}{IDEFICS2_FAKE_TOKEN}", IDEFICS2_FAKE_TOKEN ) return text def preprocess_text(config, text: str) -> str: if config.model_type == "paligemma": return "<bos>" + text + "\n" return text def preprocess_image(config, img): model_type = config.model_type if model_type in {"qwen2_vl", "qwen2_5_vl"} and img.width <= 20: img = img.resize((img.width * 2, img.height * 2)) if model_type == "paligemma": img = img.convert("RGB") if model_type not in {"llava_next", "gemma3", "llama4"}: # TODO: check if this is needed img = [img] return img def get_unpadded_features( original_height: int, original_width: int, npatches: int, num_patch_height: int, num_patch_width: int, ) -> Tuple[int, int]: current_height = npatches * num_patch_height current_width = npatches * num_patch_width aspect_ratio: float = original_width / original_height current_aspect_ratio: float = current_width / current_height if aspect_ratio > current_aspect_ratio: new_height = (original_height * current_width) // original_width padding = (current_height - new_height) // 2 current_height = current_height - (2 * padding) else: new_width = (original_width * current_height) // original_height padding = (current_width - new_width) // 2 current_width = current_width - (2 * padding) unpadded_features = current_height * current_width newline_features = current_height return (unpadded_features, newline_features) def get_number_of_features(height: int, width: int, config) -> int: # From config # Hardcoded for CLIP for now # image_grid_pinpoints = [[336, 672], [672, 336], [672, 672], [1008, 336], [336, 1008]] image_grid_pinpoints = config.image_grid_pinpoints image_size = config.vision_config.image_size patch_size = config.vision_config.patch_size assert image_size % patch_size == 0 npatches = image_size // patch_size # Dimensions are intentionally swapped to be bug-compatible with # upstream: https://github.com/LLaVA-VL/LLaVA-NeXT/issues/59 num_patch_width, num_patch_height = get_anyres_image_grid_shape( [height, width], image_grid_pinpoints, image_size, ) unpadded_features, newline_features = get_unpadded_features( height, width, npatches, num_patch_height, num_patch_width ) # The base patch covers the entire image base_features = npatches**2 return unpadded_features + newline_features + base_features def scatter_image_embeds( embeds: torch.Tensor, is_embed: Optional[torch.Tensor] ) -> torch.Tensor: if is_embed is None: return embeds placeholders = embeds.new_full( (is_embed.shape[0], embeds.shape[-1]), fill_value=torch.nan, ) placeholders[is_embed] = embeds return placeholders def gather_image_embeds( embeds: torch.Tensor, is_embed: Optional[torch.Tensor] ) -> Optional[torch.Tensor]: if is_embed is None: return embeds sel = embeds[is_embed] return sel if sel.numel() else None @dataclass class ImagePositions: offset: int length: int id: int num_placeholder_tokens: int is_embed: Optional[torch.Tensor] = None class VlmCausalLMBatch(FlashCausalLMBatch): image_inputs: Optional[List[List[Dict[str, torch.Tensor]]]] image_positions: Optional[List[List[ImagePositions]]] encoder_cache: Optional[List[Dict[int, torch.Tensor]]] pixel_values: Optional[List[torch.Tensor]] pixel_attention_mask: Optional[List[torch.Tensor]] image_sizes: Optional[List[Tuple[int, int]]] image_grid_thw: Optional[torch.Tensor] cache_entries_to_free: List[Tuple[int, int]] has_image_inputs: bool = False inputs_embeds: Optional[torch.Tensor] = None @classmethod @tracer.start_as_current_span("concatenate") def concatenate(cls, batches): batch = super(VlmCausalLMBatch, cls).concatenate(batches) batch.image_inputs = [] batch.image_positions = [] batch.encoder_cache = [] for b in batches: if b.image_inputs is not None: batch.image_inputs.extend(b.image_inputs) else: batch.image_inputs.append(None) if b.image_positions is not None: batch.image_positions.extend(b.image_positions) else: batch.image_positions.append(None) if b.encoder_cache is not None: batch.encoder_cache.extend(b.encoder_cache) else: batch.encoder_cache.append(None) batch.pixel_values = None batch.pixel_attention_mask = None batch.image_sizes = None batch.image_grid_thw = None batch.inputs_embeds = None # To be filled in prepare_for_prefill batch.has_image_inputs = False batch.cache_entries_to_free = [] return batch @tracer.start_as_current_span("filter") def filter(self, request_ids: List[int]): if len(request_ids) == 0: raise ValueError("Batch must have at least one request") image_inputs = [] image_positions = [] encoder_cache = [] for request_id in request_ids: idx = self.requests_idx_mapping[request_id] image_inputs.append(self.image_inputs[idx]) image_positions.append(self.image_positions[idx]) encoder_cache.append(self.encoder_cache[idx]) batch = super().filter(request_ids) batch.pixel_values = None batch.pixel_attention_mask = None batch.image_sizes = None batch.image_grid_thw = None batch.inputs_embeds = None batch.image_inputs = image_inputs batch.image_positions = image_positions batch.encoder_cache = encoder_cache # To be filled in prepare_for_prefill batch.has_image_inputs = False batch.cache_entries_to_free = [] return batch @classmethod def batch_tokenized_inputs( cls, requests: Iterable[generate_pb2.Request], tokenizer, processor, config ): kwargs = {} if ( hasattr(processor, "image_processor_class") and processor.image_processor_class == "Idefics3ImageProcessor" ): kwargs["return_row_col_info"] = True max_length = 0 vocab = tokenizer.get_vocab() if not hasattr(config, "image_token_index"): config.image_token_index = config.image_token_id batch_tokenized_inputs: List[List[int]] = [] batch_image_inputs: List[Optional[List[dict]]] = [] batch_image_positions: List[Optional[List[ImagePositions]]] = [] for r in requests: text_parts = [] image_inputs = [] image_texts = [] image_id = 0 for chunk in r.input_chunks.chunks: chunk_type = chunk.WhichOneof("chunk") if chunk_type == "text": text = preprocess_text(config, chunk.text) text_parts.append(text) elif chunk_type == "image": img = Image.open(BytesIO(chunk.image.data)) img = preprocess_image(config, img) image_input = processor.image_processor( [img], return_tensors="pt", **kwargs ) image_inputs.append(image_input) img_text, img_start_token_str = image_text_replacement( processor, image_input, config ) text_parts.append(img_text) image_texts.append([image_id, img_start_token_str, img_text]) image_id += 1 else: raise RuntimeError(f"Invalid chunk type {chunk_type}") full_text = image_text_replacement_fixup(config, "".join(text_parts)) input_ids = tokenizer( full_text, truncation=True, max_length=r.truncate, add_special_tokens=( r.add_special_tokens if config.model_type != "paligemma" else False ), )["input_ids"] max_length = max(max_length, len(input_ids)) if len(image_inputs) > 0: img_start_token = vocab[image_texts[0][1]] image_positions = cls.get_image_positions( input_ids, image_texts, img_start_token, config, tokenizer ) else: image_inputs = None image_positions = None batch_tokenized_inputs.append(input_ids) batch_image_inputs.append(image_inputs) batch_image_positions.append(image_positions) return batch_tokenized_inputs, batch_image_inputs, batch_image_positions @classmethod def get_image_positions( cls, input_ids: List[int], image_texts: List[Tuple[int, str, str]], img_start_token: int, config, tokenizer: PreTrainedTokenizerBase, ) -> List[ImagePositions]: image_positions = [] num_images = len(image_texts) input_ids_t = torch.as_tensor(input_ids) img_start_token_pos = torch.where(input_ids_t.eq(img_start_token))[0] num_tokens = input_ids_t.numel() last_pos = 0 for i in range(num_images): image_id, img_start_token_str, img_text = image_texts[i] img_text = image_text_replacement_fixup(config, img_text) if config.model_type == "gemma3": img_text = img_text.replace("\n\n", "") tokens = tokenizer(img_text, add_special_tokens=False, return_tensors="pt")[ "input_ids" ][0] length = tokens.numel() assert ( length <= num_tokens ), f"{length} > {num_tokens} Image is truncated, try increasing --max-batch-prefill-tokens" pos = torch.searchsorted(img_start_token_pos, last_pos, right=False) index = img_start_token_pos[pos] assert torch.equal( input_ids_t[index : index + length], tokens ), "Image tokens not found in input_ids" is_embed = tokens == config.image_token_index num_placeholder_tokens = int(is_embed.sum()) if num_placeholder_tokens == length: is_embed = None pos = ImagePositions( offset=index, length=length, id=image_id, num_placeholder_tokens=num_placeholder_tokens, is_embed=is_embed, ) image_positions.append(pos) last_pos = index + length if ( config.model_type == "idefics2" and i + 1 != num_images and input_ids[last_pos] == config.image_token_index ): fake_token = last_pos - 1 fake_token_index = torch.searchsorted( img_start_token_pos, fake_token, right=False ) img_start_token_pos[fake_token_index] = last_pos image_texts[i + 1][2] = image_texts[i + 1][2][ len(img_start_token_str) : ] return image_positions @classmethod def from_pb_processor( cls, pb: generate_pb2.Batch, tokenizer: PreTrainedTokenizerBase, processor, config, dtype: torch.dtype, device: torch.device, ) -> "VlmCausalLMBatch": batch_tokenized_inputs, image_inputs, image_positions = ( cls.batch_tokenized_inputs(pb.requests, tokenizer, processor, config) ) batch = cls.from_tokenized(pb, tokenizer, batch_tokenized_inputs, dtype, device) batch.image_inputs = image_inputs batch.image_positions = image_positions batch.encoder_cache = [{} for _ in range(len(pb.requests))] if len(image_inputs): batch.pixel_values = None batch.pixel_attention_mask = None batch.image_sizes = None batch.image_grid_thw = None return batch def prepare_for_prefill(self): super().prepare_for_prefill() self.has_image_inputs = False self.cache_entries_to_free = [] self.pixel_values = [] assert ( len(self.cache_lengths) == len(self.input_lengths) == len(self.prefilling_mask) ), "Mismatch in lengths of cache_lengths, input_lengths, and prefilling_mask" for i, ( cache_length, input_length, request_prefilling, ) in enumerate( zip( self.cache_lengths, self.input_lengths, self.prefilling_mask, ) ): if not request_prefilling or self.image_positions[i] is None: continue for image_position in self.image_positions[i]: if image_position is None: continue start_pos = image_position.offset length = image_position.length if start_pos >= cache_length + input_length: # No encoder input required at this step break if start_pos + length <= cache_length: # The encode input is already processed continue self.has_image_inputs = True if image_position.id not in self.encoder_cache[i]: image_inputs = self.image_inputs[i][image_position.id] self.pixel_values.append((i, image_position.id, image_inputs)) # Remove the image from the image_inputs self.image_inputs[i][image_position.id] = None if not self.has_image_inputs: self.pixel_values = None self.pixel_attention_mask = None self.image_sizes = None self.image_grid_thw = None else: image_grid_thw_list = [ x[2]["image_grid_thw"] for x in self.pixel_values if "image_grid_thw" in x[2] ] if image_grid_thw_list: self.image_grid_thw = torch.cat(image_grid_thw_list, dim=0).to( self.input_ids.device ) else: self.image_grid_thw = None def update_encoder_cache(self, encoder_outputs, request_id, img_pos): self.encoder_cache[request_id][img_pos.id] = scatter_image_embeds( encoder_outputs, img_pos.is_embed ) def gather_vision_embeds(self): device = self.input_ids.device chunks = [] for ( i, cache_length, input_length, request_prefilling, ) in zip( range(len(self.requests)), self.cache_lengths, self.input_lengths, self.prefilling_mask, ): if not request_prefilling or self.image_positions[i] is None: continue for image_position in self.image_positions[i]: if image_position is None: continue start_pos = image_position.offset length = image_position.length if start_pos >= cache_length + input_length: # No encoder input required at this step break if start_pos + length <= cache_length: # The encode input is already processed continue start_idx = max(cache_length - start_pos, 0) end_idx = min(cache_length - start_pos + input_length, length) assert ( image_position.id in self.encoder_cache[i] ), f"image_id {image_position.id} not in encoder_cache {self.encoder_cache[i]}" encoder_output = self.encoder_cache[i][image_position.id] is_embed = image_position.is_embed if is_embed is not None: is_embed = is_embed[start_idx:end_idx] from loguru import logger logger.info( f"image_id {image_position.id} start_idx {start_idx} end_idx {end_idx}, length {length}" ) embeds = gather_image_embeds( encoder_output[start_idx:end_idx], is_embed=is_embed, ) if embeds is not None: chunks.append(embeds) if end_idx == length: self.cache_entries_to_free.append((i, image_position.id)) self.image_positions[i][image_position.id] = None if len(chunks) == 0: return None return torch.cat(chunks, dim=0).to(device) def free_encoder_cache(self): for i, image_id in self.cache_entries_to_free: self.encoder_cache[i].pop(image_id, None) self.cache_entries_to_free = [] # release any freed GPU memory immediately? class VlmCausalLM(FlashCausalLM): def __init__( self, model_id: str, *, processor_class=AutoProcessor, processor_kwargs=None, batch_class=VlmCausalLMBatch, revision, trust_remote_code: bool, support_chunking: bool = True, **kwargs, ): if PREFIX_CACHING: raise NotImplementedError("Vlm do not work with prefix caching yet") if processor_kwargs is None: processor_kwargs = {} self.processor = processor_class.from_pretrained( model_id, revision=revision, trust_remote_code=trust_remote_code, **processor_kwargs, ) self.batch_class = batch_class super().__init__( model_id=model_id, revision=revision, trust_remote_code=trust_remote_code, support_chunking=support_chunking, **kwargs, ) @property def batch_type(self) -> Type[VlmCausalLMBatch]: return self.batch_class def cuda_graph_warmup(self, bs: int, max_s: int, max_bt: int): max_bs = max(self.cuda_graphs.keys()) if self.cuda_graphs else None input_lengths = [max_s] * bs cache_lengths = [0] * bs config = getattr(self.model.config, "text_config", self.model.config) if max_bs is None: inputs_embeds = torch.zeros( (bs, config.hidden_size), device=self.device, dtype=self.dtype, ) position_ids = torch.zeros(bs, dtype=torch.int32, device=self.device) config = getattr(self.model, "config", None) rope_scaling = getattr(config, "rope_scaling", None) if config else None if ( # mrope have position_ids per section, if so repeat n times isinstance(rope_scaling, dict) and rope_scaling["rope_type"] == "mrope" ): n_sections = len(self.model.config.rope_scaling["mrope_section"]) position_ids = position_ids.unsqueeze(1).repeat(1, n_sections) slots = torch.arange(bs, dtype=torch.int64, device=self.device) input_lengths_tensor = ( torch.ones(bs, dtype=torch.int32, device=self.device) * max_s ) cache_lengths_tensor = torch.zeros( bs, dtype=torch.int32, device=self.device ) block_tables = torch.arange( max_bt, dtype=torch.int32, device=self.device ).repeat(bs) block_tables = block_tables.reshape((bs, max_bt)) if ATTENTION == "flashinfer": block_tables = block_tables_to_ragged( block_tables=block_tables, input_lengths=input_lengths, cache_lengths=cache_lengths, input_lengths_tensor=input_lengths_tensor, cache_lengths_tensor=cache_lengths_tensor, max_current_length=max_s, ) else: if bs > max_bs: raise RuntimeError( "Cuda graphs should be generated in decreasing order size to reduce VRAM usage" ) inputs_embeds = self.cuda_graphs[max_bs]["inputs_embeds"][:bs] position_ids = self.cuda_graphs[max_bs]["position_ids"][:bs] if ATTENTION == "flashinfer": block_tables = self.cuda_graphs[max_bs]["block_tables"][: bs * max_bt] else: block_tables = self.cuda_graphs[max_bs]["block_tables"][:bs] slots = self.cuda_graphs[max_bs]["slots"][:bs] input_lengths_tensor = self.cuda_graphs[max_bs]["input_lengths"][:bs] cache_lengths_tensor = self.cuda_graphs[max_bs]["cache_lengths"][:bs] if ATTENTION == "flashinfer": from text_generation_server.layers.attention.flashinfer import ( create_decode_state_cuda_graphs, ) block_tables_ptr = torch.zeros( bs + 1, dtype=torch.int32, device=self.device ) last_page_len = torch.ones(bs, dtype=torch.int32, device=self.device) state = create_decode_state_cuda_graphs( device=inputs_embeds.device, block_tables=block_tables, block_tables_ptr=block_tables_ptr, last_page_len=last_page_len, num_heads=self.num_heads, num_kv_heads=self.num_kv_heads, ) else: state = None graph = torch.cuda.CUDAGraph() self.cuda_graphs[bs] = { "inputs_embeds": inputs_embeds, "position_ids": position_ids, "kv_cache": self.kv_cache, "block_tables": block_tables, "slots": slots, "input_lengths": input_lengths_tensor, "cache_lengths": cache_lengths_tensor, "state": state, "graph": graph, } torch.cuda.synchronize() # Run once outside to warmup with self._forward_context( block_tables=block_tables, cu_seqlen_prefill=None, input_lengths_tensor=input_lengths_tensor, state=state, cache_lengths_tensor=cache_lengths_tensor, ): seqlen = Seqlen( input_lengths=input_lengths_tensor, cache_lengths=cache_lengths_tensor, cu_seqlen_q=None, max_q=1, max_k=max_s, ) self.model.forward( inputs_embeds=inputs_embeds, position_ids=position_ids, cu_seqlen_prefill=None, kv_cache=self.kv_cache, block_tables=block_tables, slots=slots, seqlen=seqlen, max_s=max_s, prefill_cache_indices=None, lm_head_indices=None, ) del seqlen torch.cuda.synchronize() with torch.cuda.graph(graph, pool=MEM_POOL): seqlen = Seqlen( input_lengths=input_lengths_tensor, cache_lengths=cache_lengths_tensor, cu_seqlen_q=None, max_q=1, max_k=max_s, ) logits, speculative_logits = self.model.forward( inputs_embeds=inputs_embeds, position_ids=position_ids, cu_seqlen_prefill=None, kv_cache=self.kv_cache, block_tables=block_tables, slots=slots, seqlen=seqlen, max_s=max_s, prefill_cache_indices=None, lm_head_indices=None, ) self.cuda_graphs[bs]["logits"] = logits self.cuda_graphs[bs]["speculative_logits"] = speculative_logits torch.cuda.synchronize() def get_vision_embeds( self, pixel_values: torch.Tensor, pixel_attention_mask: torch.Tensor, image_sizes: torch.Tensor, image_grid_thw: torch.Tensor, ): embeds = self.model.get_vision_embeds( pixel_values=pixel_values, pixel_attention_mask=pixel_attention_mask, image_sizes=image_sizes, image_grid_thw=image_grid_thw, ) return embeds def get_inputs_embeds( self, input_ids: torch.Tensor, vision_embeds: Optional[torch.Tensor] = None, ): return self.model.get_inputs_embeds( input_ids=input_ids, vision_embeds=vision_embeds, ) def encode_images(self, batch): if batch.pixel_values is not None: device = batch.input_ids.device for request_id, image_id, image_input in batch.pixel_values: pixel_values = image_input["pixel_values"].to(device) if "pixel_attention_mask" in image_input: pixel_attention_mask = image_input["pixel_attention_mask"].to( device ) else: pixel_attention_mask = None if "image_sizes" in image_input: image_sizes = image_input["image_sizes"].to(device) else: image_sizes = None if "image_grid_thw" in image_input: image_grid_thw = image_input["image_grid_thw"].to(device) else: image_grid_thw = None encoder_outputs = self.get_vision_embeds( pixel_values=pixel_values, pixel_attention_mask=pixel_attention_mask, image_sizes=image_sizes, image_grid_thw=image_grid_thw, ) batch.update_encoder_cache( encoder_outputs, request_id, batch.image_positions[request_id][image_id], ) batch.pixel_values = None batch.pixel_attention_mask = None batch.image_sizes = None def set_inputs_embeds(self, batch): if batch.has_image_inputs: self.encode_images(batch) vision_embeds = batch.gather_vision_embeds() batch.has_image_inputs = False else: vision_embeds = None inputs_embeds = self.get_inputs_embeds( batch.input_ids, vision_embeds=vision_embeds ) batch.inputs_embeds = inputs_embeds def forward( self, batch: VlmCausalLMBatch, adapter_data: Optional[Dict[str, torch.Tensor]] = None, ) -> Tuple[torch.Tensor, Optional[torch.Tensor]]: # Model Forward if batch.speculative_ids is not None: input_ids = batch.input_ids position_ids = batch.position_ids cu_seqlen_prefill = batch.cu_seqlen_prefill kv_cache = self.kv_cache block_tables = batch.block_tables_tensor slots = batch.slots[batch.slot_indices] input_lengths = batch.input_lengths_tensor max_s = batch.max_current_length lm_head_indices = batch.prefill_head_indices speculative_ids = batch.speculative_ids B, speculative_length = speculative_ids.shape new_length = speculative_length + 1 new_input_ids = torch.cat( [input_ids.unsqueeze(-1), speculative_ids], dim=1 ).reshape(-1) arange = torch.arange(new_length, device=position_ids.device).unsqueeze(0) arange_int = arange.to(dtype=torch.int32) new_position_ids = ( position_ids.unsqueeze(-1).expand(B, new_length) + arange ).view(-1) slots = (slots.unsqueeze(-1).expand(B, new_length) + arange_int).view(-1) input_lengths = ( input_lengths.unsqueeze(-1).expand(B, new_length) + arange_int ).view(-1) cache_lengths_tensor = ( batch.cache_lengths_tensor.unsqueeze(-1).expand(B, new_length) ).reshape(-1) # Add Copy the block tables for all members block_tables = ( block_tables.unsqueeze(1) .expand(B, new_length, -1) .reshape(B * new_length, -1) .contiguous() ) max_s = max_s + speculative_length input_ids = new_input_ids position_ids = new_position_ids else: input_ids = batch.input_ids inputs_embeds = batch.inputs_embeds position_ids = batch.position_ids cu_seqlen_prefill = batch.cu_seqlen_prefill kv_cache = self.kv_cache block_tables = batch.block_tables_tensor slots = batch.slots[batch.slot_indices] input_lengths = batch.input_lengths_tensor cache_lengths_tensor = batch.cache_lengths_tensor max_s = batch.max_current_length lm_head_indices = batch.prefill_head_indices if self.model.config.model_type in {"qwen2_vl", "qwen2_5_vl"}: if position_ids.dim() == 1 and batch.prefilling: position_ids = self.model.get_position_ids( input_ids, batch.image_grid_thw ) batch.position_ids = position_ids attention_mask = None attention_mask_forward = None if self.model.config.model_type == "gemma3" and cu_seqlen_prefill is not None: attention_mask = self.model.get_attention_mask( input_ids, cu_seqlen_prefill, self.dtype, bool_mask=True ) min_dtype = torch.finfo(self.dtype).min attention_mask_forward = torch.where(attention_mask, 0, min_dtype).to( input_ids.device ) attention_mask = attention_mask.reshape(-1) # Try to find an associated cuda graph bs = input_ids.shape[0] sorted_padded_bs = sorted([k for k in self.cuda_graphs.keys() if k >= bs]) if sorted_padded_bs: # Get associated cuda graph cuda_graph = self.cuda_graphs[sorted_padded_bs[0]] else: cuda_graph = None if cu_seqlen_prefill is not None or cuda_graph is None: if ATTENTION == "flashinfer": block_tables = block_tables_to_ragged( block_tables=block_tables, input_lengths=batch.input_lengths, cache_lengths=batch.cache_lengths, input_lengths_tensor=batch.input_lengths_tensor, cache_lengths_tensor=batch.cache_lengths_tensor, max_current_length=batch.max_current_length, ) with self._forward_context( block_tables=block_tables, cu_seqlen_prefill=cu_seqlen_prefill, input_lengths_tensor=input_lengths, cache_lengths_tensor=cache_lengths_tensor, attention_mask=attention_mask, ): seqlen = Seqlen( input_lengths=input_lengths, cache_lengths=cache_lengths_tensor, cu_seqlen_q=cu_seqlen_prefill, max_q=batch.max_input_length, max_k=batch.max_current_length, ) logits, speculative_logits = self.model.forward( inputs_embeds=inputs_embeds, position_ids=position_ids, cu_seqlen_prefill=cu_seqlen_prefill, kv_cache=kv_cache, block_tables=block_tables, slots=slots, seqlen=seqlen, max_s=max_s, prefill_cache_indices=batch.prefill_cache_indices, lm_head_indices=lm_head_indices, attention_mask=attention_mask_forward, ) if batch.prefill_cache_indices is not None: batch.prefill_cache_indices = None batch.image_grid_thw = None batch.free_encoder_cache() return logits, speculative_logits # Copy inputs to the static inputs of the cuda graph # Static inputs are potentially padded cuda_graph["inputs_embeds"][: inputs_embeds.shape[0]] = inputs_embeds cuda_graph["position_ids"][: position_ids.shape[0]] = position_ids if ATTENTION == "flashinfer": block_tables = block_tables_to_ragged( block_tables=block_tables, input_lengths=batch.input_lengths, cache_lengths=batch.cache_lengths, input_lengths_tensor=batch.input_lengths_tensor, cache_lengths_tensor=batch.cache_lengths_tensor, max_current_length=batch.max_current_length, ) cuda_graph["block_tables"][: block_tables.shape[0]] = block_tables else: cuda_graph["block_tables"][ : block_tables.shape[0], : block_tables.shape[1] ] = block_tables # XXX: This is working only because block 0 is reserved for the healthcheck # so it doesn't matter if we override it with bogus values. cuda_graph["slots"].fill_(0) cuda_graph["slots"][: slots.shape[0]] = slots cuda_graph["input_lengths"].zero_() cuda_graph["input_lengths"][: input_lengths.shape[0]] = input_lengths cuda_graph["cache_lengths"].zero_() cuda_graph["cache_lengths"][ : cache_lengths_tensor.shape[0] ] = cache_lengths_tensor with self._forward_context( block_tables=cuda_graph["block_tables"], cu_seqlen_prefill=None, input_lengths_tensor=cuda_graph["input_lengths"], cache_lengths_tensor=cuda_graph["cache_lengths"], state=cuda_graph["state"], ): # Replay the graph cuda_graph["graph"].replay() # Slice output to the correct shape speculative_logits = ( cuda_graph["speculative_logits"][:bs] if cuda_graph["speculative_logits"] is not None else None ) logits = cuda_graph["logits"][:bs] batch.free_encoder_cache() return logits, speculative_logits
text-generation-inference/server/text_generation_server/models/vlm_causal_lm.py/0
{ "file_path": "text-generation-inference/server/text_generation_server/models/vlm_causal_lm.py", "repo_id": "text-generation-inference", "token_count": 21444 }
320
import os from typing import Union from loguru import logger import torch from transformers import AutoTokenizer from peft import AutoPeftModelForCausalLM, AutoPeftModelForSeq2SeqLM def download_and_unload_peft(model_id, revision, trust_remote_code): torch_dtype = torch.float16 logger.info("Trying to load a Peft model. It might take a while without feedback") try: model = AutoPeftModelForCausalLM.from_pretrained( model_id, revision=revision, torch_dtype=torch_dtype, trust_remote_code=trust_remote_code, low_cpu_mem_usage=True, ) except Exception: model = AutoPeftModelForSeq2SeqLM.from_pretrained( model_id, revision=revision, torch_dtype=torch_dtype, trust_remote_code=trust_remote_code, low_cpu_mem_usage=True, ) logger.info("Peft model detected.") logger.info("Merging the lora weights.") base_model_id = model.peft_config["default"].base_model_name_or_path model = model.merge_and_unload() os.makedirs(model_id, exist_ok=True) cache_dir = model_id logger.info(f"Saving the newly created merged model to {cache_dir}") tokenizer = AutoTokenizer.from_pretrained( base_model_id, trust_remote_code=trust_remote_code ) model.save_pretrained(cache_dir, safe_serialization=True) model.config.save_pretrained(cache_dir) tokenizer.save_pretrained(cache_dir) def download_peft( model_id: Union[str, os.PathLike], revision: str, trust_remote_code: bool ): torch_dtype = torch.float16 try: _model = AutoPeftModelForCausalLM.from_pretrained( model_id, revision=revision, torch_dtype=torch_dtype, trust_remote_code=trust_remote_code, low_cpu_mem_usage=True, ) except Exception: _model = AutoPeftModelForSeq2SeqLM.from_pretrained( model_id, revision=revision, torch_dtype=torch_dtype, trust_remote_code=trust_remote_code, low_cpu_mem_usage=True, ) logger.info("Peft model downloaded.")
text-generation-inference/server/text_generation_server/utils/peft.py/0
{ "file_path": "text-generation-inference/server/text_generation_server/utils/peft.py", "repo_id": "text-generation-inference", "token_count": 981 }
321
# EditorConfig helps developers define and maintain consistent # coding styles between different editors or IDEs # http://editorconfig.org root = true [*] indent_style = space indent_size = 2 end_of_line = lf charset = utf-8 trim_trailing_whitespace = true insert_final_newline = true [*.md] trim_trailing_whitespace = false
tokenizers/bindings/node/.editorconfig/0
{ "file_path": "tokenizers/bindings/node/.editorconfig", "repo_id": "tokenizers", "token_count": 108 }
322
/* tslint:disable */ /* eslint-disable */ /* prettier-ignore */ /* auto-generated by NAPI-RS */ const { existsSync, readFileSync } = require('fs') const { join } = require('path') const { platform, arch } = process let nativeBinding = null let localFileExisted = false let loadError = null function isMusl() { // For Node 10 if (!process.report || typeof process.report.getReport !== 'function') { try { const lddPath = require('child_process').execSync('which ldd').toString().trim() return readFileSync(lddPath, 'utf8').includes('musl') } catch (e) { return true } } else { const { glibcVersionRuntime } = process.report.getReport().header return !glibcVersionRuntime } } switch (platform) { case 'android': switch (arch) { case 'arm64': localFileExisted = existsSync(join(__dirname, 'tokenizers.android-arm64.node')) try { if (localFileExisted) { nativeBinding = require('./tokenizers.android-arm64.node') } else { nativeBinding = require('tokenizers-android-arm64') } } catch (e) { loadError = e } break case 'arm': localFileExisted = existsSync(join(__dirname, 'tokenizers.android-arm-eabi.node')) try { if (localFileExisted) { nativeBinding = require('./tokenizers.android-arm-eabi.node') } else { nativeBinding = require('tokenizers-android-arm-eabi') } } catch (e) { loadError = e } break default: throw new Error(`Unsupported architecture on Android ${arch}`) } break case 'win32': switch (arch) { case 'x64': localFileExisted = existsSync(join(__dirname, 'tokenizers.win32-x64-msvc.node')) try { if (localFileExisted) { nativeBinding = require('./tokenizers.win32-x64-msvc.node') } else { nativeBinding = require('tokenizers-win32-x64-msvc') } } catch (e) { loadError = e } break case 'ia32': localFileExisted = existsSync(join(__dirname, 'tokenizers.win32-ia32-msvc.node')) try { if (localFileExisted) { nativeBinding = require('./tokenizers.win32-ia32-msvc.node') } else { nativeBinding = require('tokenizers-win32-ia32-msvc') } } catch (e) { loadError = e } break case 'arm64': localFileExisted = existsSync(join(__dirname, 'tokenizers.win32-arm64-msvc.node')) try { if (localFileExisted) { nativeBinding = require('./tokenizers.win32-arm64-msvc.node') } else { nativeBinding = require('tokenizers-win32-arm64-msvc') } } catch (e) { loadError = e } break default: throw new Error(`Unsupported architecture on Windows: ${arch}`) } break case 'darwin': localFileExisted = existsSync(join(__dirname, 'tokenizers.darwin-universal.node')) try { if (localFileExisted) { nativeBinding = require('./tokenizers.darwin-universal.node') } else { nativeBinding = require('tokenizers-darwin-universal') } break } catch {} switch (arch) { case 'x64': localFileExisted = existsSync(join(__dirname, 'tokenizers.darwin-x64.node')) try { if (localFileExisted) { nativeBinding = require('./tokenizers.darwin-x64.node') } else { nativeBinding = require('tokenizers-darwin-x64') } } catch (e) { loadError = e } break case 'arm64': localFileExisted = existsSync(join(__dirname, 'tokenizers.darwin-arm64.node')) try { if (localFileExisted) { nativeBinding = require('./tokenizers.darwin-arm64.node') } else { nativeBinding = require('tokenizers-darwin-arm64') } } catch (e) { loadError = e } break default: throw new Error(`Unsupported architecture on macOS: ${arch}`) } break case 'freebsd': if (arch !== 'x64') { throw new Error(`Unsupported architecture on FreeBSD: ${arch}`) } localFileExisted = existsSync(join(__dirname, 'tokenizers.freebsd-x64.node')) try { if (localFileExisted) { nativeBinding = require('./tokenizers.freebsd-x64.node') } else { nativeBinding = require('tokenizers-freebsd-x64') } } catch (e) { loadError = e } break case 'linux': switch (arch) { case 'x64': if (isMusl()) { localFileExisted = existsSync(join(__dirname, 'tokenizers.linux-x64-musl.node')) try { if (localFileExisted) { nativeBinding = require('./tokenizers.linux-x64-musl.node') } else { nativeBinding = require('tokenizers-linux-x64-musl') } } catch (e) { loadError = e } } else { localFileExisted = existsSync(join(__dirname, 'tokenizers.linux-x64-gnu.node')) try { if (localFileExisted) { nativeBinding = require('./tokenizers.linux-x64-gnu.node') } else { nativeBinding = require('tokenizers-linux-x64-gnu') } } catch (e) { loadError = e } } break case 'arm64': if (isMusl()) { localFileExisted = existsSync(join(__dirname, 'tokenizers.linux-arm64-musl.node')) try { if (localFileExisted) { nativeBinding = require('./tokenizers.linux-arm64-musl.node') } else { nativeBinding = require('tokenizers-linux-arm64-musl') } } catch (e) { loadError = e } } else { localFileExisted = existsSync(join(__dirname, 'tokenizers.linux-arm64-gnu.node')) try { if (localFileExisted) { nativeBinding = require('./tokenizers.linux-arm64-gnu.node') } else { nativeBinding = require('tokenizers-linux-arm64-gnu') } } catch (e) { loadError = e } } break case 'arm': localFileExisted = existsSync(join(__dirname, 'tokenizers.linux-arm-gnueabihf.node')) try { if (localFileExisted) { nativeBinding = require('./tokenizers.linux-arm-gnueabihf.node') } else { nativeBinding = require('tokenizers-linux-arm-gnueabihf') } } catch (e) { loadError = e } break case 'riscv64': if (isMusl()) { localFileExisted = existsSync(join(__dirname, 'tokenizers.linux-riscv64-musl.node')) try { if (localFileExisted) { nativeBinding = require('./tokenizers.linux-riscv64-musl.node') } else { nativeBinding = require('tokenizers-linux-riscv64-musl') } } catch (e) { loadError = e } } else { localFileExisted = existsSync(join(__dirname, 'tokenizers.linux-riscv64-gnu.node')) try { if (localFileExisted) { nativeBinding = require('./tokenizers.linux-riscv64-gnu.node') } else { nativeBinding = require('tokenizers-linux-riscv64-gnu') } } catch (e) { loadError = e } } break case 's390x': localFileExisted = existsSync(join(__dirname, 'tokenizers.linux-s390x-gnu.node')) try { if (localFileExisted) { nativeBinding = require('./tokenizers.linux-s390x-gnu.node') } else { nativeBinding = require('tokenizers-linux-s390x-gnu') } } catch (e) { loadError = e } break default: throw new Error(`Unsupported architecture on Linux: ${arch}`) } break default: throw new Error(`Unsupported OS: ${platform}, architecture: ${arch}`) } if (!nativeBinding) { if (loadError) { throw loadError } throw new Error(`Failed to load native binding`) } const { Decoder, bpeDecoder, byteFallbackDecoder, ctcDecoder, fuseDecoder, metaspaceDecoder, replaceDecoder, sequenceDecoder, stripDecoder, wordPieceDecoder, Encoding, TruncationDirection, TruncationStrategy, Model, BPE, WordPiece, WordLevel, Unigram, Normalizer, prependNormalizer, stripAccentsNormalizer, bertNormalizer, nfdNormalizer, nfkdNormalizer, nfcNormalizer, nfkcNormalizer, stripNormalizer, sequenceNormalizer, lowercase, replace, nmt, precompiled, JsSplitDelimiterBehavior, PreTokenizer, byteLevelPreTokenizer, byteLevelAlphabet, whitespacePreTokenizer, whitespaceSplitPreTokenizer, bertPreTokenizer, metaspacePreTokenizer, splitPreTokenizer, punctuationPreTokenizer, sequencePreTokenizer, charDelimiterSplit, digitsPreTokenizer, Processor, bertProcessing, robertaProcessing, byteLevelProcessing, templateProcessing, sequenceProcessing, PaddingDirection, AddedToken, Tokenizer, Trainer, slice, mergeEncodings, } = nativeBinding module.exports.Decoder = Decoder module.exports.bpeDecoder = bpeDecoder module.exports.byteFallbackDecoder = byteFallbackDecoder module.exports.ctcDecoder = ctcDecoder module.exports.fuseDecoder = fuseDecoder module.exports.metaspaceDecoder = metaspaceDecoder module.exports.replaceDecoder = replaceDecoder module.exports.sequenceDecoder = sequenceDecoder module.exports.stripDecoder = stripDecoder module.exports.wordPieceDecoder = wordPieceDecoder module.exports.Encoding = Encoding module.exports.TruncationDirection = TruncationDirection module.exports.TruncationStrategy = TruncationStrategy module.exports.Model = Model module.exports.BPE = BPE module.exports.WordPiece = WordPiece module.exports.WordLevel = WordLevel module.exports.Unigram = Unigram module.exports.Normalizer = Normalizer module.exports.prependNormalizer = prependNormalizer module.exports.stripAccentsNormalizer = stripAccentsNormalizer module.exports.bertNormalizer = bertNormalizer module.exports.nfdNormalizer = nfdNormalizer module.exports.nfkdNormalizer = nfkdNormalizer module.exports.nfcNormalizer = nfcNormalizer module.exports.nfkcNormalizer = nfkcNormalizer module.exports.stripNormalizer = stripNormalizer module.exports.sequenceNormalizer = sequenceNormalizer module.exports.lowercase = lowercase module.exports.replace = replace module.exports.nmt = nmt module.exports.precompiled = precompiled module.exports.JsSplitDelimiterBehavior = JsSplitDelimiterBehavior module.exports.PreTokenizer = PreTokenizer module.exports.byteLevelPreTokenizer = byteLevelPreTokenizer module.exports.byteLevelAlphabet = byteLevelAlphabet module.exports.whitespacePreTokenizer = whitespacePreTokenizer module.exports.whitespaceSplitPreTokenizer = whitespaceSplitPreTokenizer module.exports.bertPreTokenizer = bertPreTokenizer module.exports.metaspacePreTokenizer = metaspacePreTokenizer module.exports.splitPreTokenizer = splitPreTokenizer module.exports.punctuationPreTokenizer = punctuationPreTokenizer module.exports.sequencePreTokenizer = sequencePreTokenizer module.exports.charDelimiterSplit = charDelimiterSplit module.exports.digitsPreTokenizer = digitsPreTokenizer module.exports.Processor = Processor module.exports.bertProcessing = bertProcessing module.exports.robertaProcessing = robertaProcessing module.exports.byteLevelProcessing = byteLevelProcessing module.exports.templateProcessing = templateProcessing module.exports.sequenceProcessing = sequenceProcessing module.exports.PaddingDirection = PaddingDirection module.exports.AddedToken = AddedToken module.exports.Tokenizer = Tokenizer module.exports.Trainer = Trainer module.exports.slice = slice module.exports.mergeEncodings = mergeEncodings
tokenizers/bindings/node/index.js/0
{ "file_path": "tokenizers/bindings/node/index.js", "repo_id": "tokenizers", "token_count": 5374 }
323
{ "name": "tokenizers-linux-x64-musl", "version": "0.13.4-rc1", "os": [ "linux" ], "cpu": [ "x64" ], "main": "tokenizers.linux-x64-musl.node", "files": [ "tokenizers.linux-x64-musl.node" ], "description": "Tokenizers platform specific bindings", "keywords": [ "napi-rs", "NAPI", "N-API", "Rust", "node-addon", "node-addon-api" ], "license": "MIT", "engines": { "node": ">= 10" }, "publishConfig": { "registry": "https://registry.npmjs.org/", "access": "public" }, "repository": "tokenizers", "libc": [ "musl" ] }
tokenizers/bindings/node/npm/linux-x64-musl/package.json/0
{ "file_path": "tokenizers/bindings/node/npm/linux-x64-musl/package.json", "repo_id": "tokenizers", "token_count": 291 }
324
use crate::arc_rwlock_serde; use serde::{Deserialize, Serialize}; extern crate tokenizers as tk; use napi::bindgen_prelude::*; use napi_derive::napi; use std::sync::{Arc, RwLock}; use tk::processors::PostProcessorWrapper; use tk::Encoding; #[derive(Clone, Serialize, Deserialize)] #[napi] pub struct Processor { #[serde(flatten, with = "arc_rwlock_serde")] processor: Option<Arc<RwLock<PostProcessorWrapper>>>, } impl tk::PostProcessor for Processor { fn added_tokens(&self, is_pair: bool) -> usize { self .processor .as_ref() .expect("Uninitialized PostProcessor") .read() .unwrap() .added_tokens(is_pair) } fn process_encodings( &self, encodings: Vec<Encoding>, add_special_tokens: bool, ) -> tk::Result<Vec<Encoding>> { self .processor .as_ref() .ok_or("Uninitialized PostProcessor")? .read() .unwrap() .process_encodings(encodings, add_special_tokens) } } #[napi] pub fn bert_processing(sep: (String, u32), cls: (String, u32)) -> Result<Processor> { Ok(Processor { processor: Some(Arc::new(RwLock::new( tk::processors::bert::BertProcessing::new(sep, cls).into(), ))), }) } #[napi] pub fn roberta_processing( sep: (String, u32), cls: (String, u32), trim_offsets: Option<bool>, add_prefix_space: Option<bool>, ) -> Result<Processor> { let trim_offsets = trim_offsets.unwrap_or(true); let add_prefix_space = add_prefix_space.unwrap_or(true); let mut processor = tk::processors::roberta::RobertaProcessing::new(sep, cls); processor = processor.trim_offsets(trim_offsets); processor = processor.add_prefix_space(add_prefix_space); Ok(Processor { processor: Some(Arc::new(RwLock::new(processor.into()))), }) } #[napi] pub fn byte_level_processing(trim_offsets: Option<bool>) -> Result<Processor> { let mut byte_level = tk::processors::byte_level::ByteLevel::default(); if let Some(trim_offsets) = trim_offsets { byte_level = byte_level.trim_offsets(trim_offsets); } Ok(Processor { processor: Some(Arc::new(RwLock::new(byte_level.into()))), }) } #[napi] pub fn template_processing( single: String, pair: Option<String>, special_tokens: Option<Vec<(String, u32)>>, ) -> Result<Processor> { let special_tokens = special_tokens.unwrap_or_default(); let mut builder = tk::processors::template::TemplateProcessing::builder(); builder.try_single(single).map_err(Error::from_reason)?; builder.special_tokens(special_tokens); if let Some(pair) = pair { builder.try_pair(pair).map_err(Error::from_reason)?; } let processor = builder .build() .map_err(|e| Error::from_reason(e.to_string()))?; Ok(Processor { processor: Some(Arc::new(RwLock::new(processor.into()))), }) } #[napi] pub fn sequence_processing(processors: Vec<&Processor>) -> Processor { let sequence: Vec<tk::PostProcessorWrapper> = processors .into_iter() .filter_map(|processor| { processor .processor .as_ref() .map(|processor| (**processor).read().unwrap().clone()) }) .clone() .collect(); Processor { processor: Some(Arc::new(RwLock::new(PostProcessorWrapper::Sequence( tk::processors::sequence::Sequence::new(sequence), )))), } }
tokenizers/bindings/node/src/processors.rs/0
{ "file_path": "tokenizers/bindings/node/src/processors.rs", "repo_id": "tokenizers", "token_count": 1336 }
325
<p align="center"> <br> <img src="https://huggingface.co/landing/assets/tokenizers/tokenizers-logo.png" width="600"/> <br> <p> <p align="center"> <a href="https://badge.fury.io/py/tokenizers"> <img alt="Build" src="https://badge.fury.io/py/tokenizers.svg"> </a> <a href="https://github.com/huggingface/tokenizers/blob/master/LICENSE"> <img alt="GitHub" src="https://img.shields.io/github/license/huggingface/tokenizers.svg?color=blue"> </a> </p> <br> # Tokenizers Provides an implementation of today's most used tokenizers, with a focus on performance and versatility. Bindings over the [Rust](https://github.com/huggingface/tokenizers/tree/master/tokenizers) implementation. If you are interested in the High-level design, you can go check it there. Otherwise, let's dive in! ## Main features: - Train new vocabularies and tokenize using 4 pre-made tokenizers (Bert WordPiece and the 3 most common BPE versions). - Extremely fast (both training and tokenization), thanks to the Rust implementation. Takes less than 20 seconds to tokenize a GB of text on a server's CPU. - Easy to use, but also extremely versatile. - Designed for research and production. - Normalization comes with alignments tracking. It's always possible to get the part of the original sentence that corresponds to a given token. - Does all the pre-processing: Truncate, Pad, add the special tokens your model needs. ### Installation #### With pip: ```bash pip install tokenizers ``` #### From sources: To use this method, you need to have the Rust installed: ```bash # Install with: curl https://sh.rustup.rs -sSf | sh -s -- -y export PATH="$HOME/.cargo/bin:$PATH" ``` Once Rust is installed, you can compile doing the following ```bash git clone https://github.com/huggingface/tokenizers cd tokenizers/bindings/python # Create a virtual env (you can use yours as well) python -m venv .env source .env/bin/activate # Install `tokenizers` in the current virtual env pip install -e . ``` ### Load a pretrained tokenizer from the Hub ```python from tokenizers import Tokenizer tokenizer = Tokenizer.from_pretrained("bert-base-cased") ``` ### Using the provided Tokenizers We provide some pre-build tokenizers to cover the most common cases. You can easily load one of these using some `vocab.json` and `merges.txt` files: ```python from tokenizers import CharBPETokenizer # Initialize a tokenizer vocab = "./path/to/vocab.json" merges = "./path/to/merges.txt" tokenizer = CharBPETokenizer(vocab, merges) # And then encode: encoded = tokenizer.encode("I can feel the magic, can you?") print(encoded.ids) print(encoded.tokens) ``` And you can train them just as simply: ```python from tokenizers import CharBPETokenizer # Initialize a tokenizer tokenizer = CharBPETokenizer() # Then train it! tokenizer.train([ "./path/to/files/1.txt", "./path/to/files/2.txt" ]) # Now, let's use it: encoded = tokenizer.encode("I can feel the magic, can you?") # And finally save it somewhere tokenizer.save("./path/to/directory/my-bpe.tokenizer.json") ``` #### Provided Tokenizers - `CharBPETokenizer`: The original BPE - `ByteLevelBPETokenizer`: The byte level version of the BPE - `SentencePieceBPETokenizer`: A BPE implementation compatible with the one used by SentencePiece - `BertWordPieceTokenizer`: The famous Bert tokenizer, using WordPiece All of these can be used and trained as explained above! ### Build your own Whenever these provided tokenizers don't give you enough freedom, you can build your own tokenizer, by putting all the different parts you need together. You can check how we implemented the [provided tokenizers](https://github.com/huggingface/tokenizers/tree/master/bindings/python/py_src/tokenizers/implementations) and adapt them easily to your own needs. #### Building a byte-level BPE Here is an example showing how to build your own byte-level BPE by putting all the different pieces together, and then saving it to a single file: ```python from tokenizers import Tokenizer, models, pre_tokenizers, decoders, trainers, processors # Initialize a tokenizer tokenizer = Tokenizer(models.BPE()) # Customize pre-tokenization and decoding tokenizer.pre_tokenizer = pre_tokenizers.ByteLevel(add_prefix_space=True) tokenizer.decoder = decoders.ByteLevel() tokenizer.post_processor = processors.ByteLevel(trim_offsets=True) # And then train trainer = trainers.BpeTrainer( vocab_size=20000, min_frequency=2, initial_alphabet=pre_tokenizers.ByteLevel.alphabet() ) tokenizer.train([ "./path/to/dataset/1.txt", "./path/to/dataset/2.txt", "./path/to/dataset/3.txt" ], trainer=trainer) # And Save it tokenizer.save("byte-level-bpe.tokenizer.json", pretty=True) ``` Now, when you want to use this tokenizer, this is as simple as: ```python from tokenizers import Tokenizer tokenizer = Tokenizer.from_file("byte-level-bpe.tokenizer.json") encoded = tokenizer.encode("I can feel the magic, can you?") ```
tokenizers/bindings/python/README.md/0
{ "file_path": "tokenizers/bindings/python/README.md", "repo_id": "tokenizers", "token_count": 1621 }
326
from typing import Dict, Iterator, List, Optional, Tuple, Union from tokenizers import AddedToken, Tokenizer, decoders, pre_tokenizers, processors, trainers from tokenizers.models import BPE from tokenizers.normalizers import Lowercase, Sequence, unicode_normalizer_from_str from .base_tokenizer import BaseTokenizer class ByteLevelBPETokenizer(BaseTokenizer): """ByteLevelBPETokenizer Represents a Byte-level BPE as introduced by OpenAI with their GPT-2 model """ def __init__( self, vocab: Optional[Union[str, Dict[str, int]]] = None, merges: Optional[Union[str, List[Tuple[str, str]]]] = None, add_prefix_space: bool = False, lowercase: bool = False, dropout: Optional[float] = None, unicode_normalizer: Optional[str] = None, continuing_subword_prefix: Optional[str] = None, end_of_word_suffix: Optional[str] = None, trim_offsets: bool = False, ): if vocab is not None and merges is not None: tokenizer = Tokenizer( BPE( vocab, merges, dropout=dropout, continuing_subword_prefix=continuing_subword_prefix or "", end_of_word_suffix=end_of_word_suffix or "", ) ) else: tokenizer = Tokenizer(BPE()) # Check for Unicode normalization first (before everything else) normalizers = [] if unicode_normalizer: normalizers += [unicode_normalizer_from_str(unicode_normalizer)] if lowercase: normalizers += [Lowercase()] # Create the normalizer structure if len(normalizers) > 0: if len(normalizers) > 1: tokenizer.normalizer = Sequence(normalizers) else: tokenizer.normalizer = normalizers[0] tokenizer.pre_tokenizer = pre_tokenizers.ByteLevel(add_prefix_space=add_prefix_space) tokenizer.decoder = decoders.ByteLevel() tokenizer.post_processor = processors.ByteLevel(trim_offsets=trim_offsets) parameters = { "model": "ByteLevelBPE", "add_prefix_space": add_prefix_space, "lowercase": lowercase, "dropout": dropout, "unicode_normalizer": unicode_normalizer, "continuing_subword_prefix": continuing_subword_prefix, "end_of_word_suffix": end_of_word_suffix, "trim_offsets": trim_offsets, } super().__init__(tokenizer, parameters) @staticmethod def from_file(vocab_filename: str, merges_filename: str, **kwargs): vocab, merges = BPE.read_file(vocab_filename, merges_filename) return ByteLevelBPETokenizer(vocab, merges, **kwargs) def train( self, files: Union[str, List[str]], vocab_size: int = 30000, min_frequency: int = 2, show_progress: bool = True, special_tokens: List[Union[str, AddedToken]] = [], ): """Train the model using the given files""" trainer = trainers.BpeTrainer( vocab_size=vocab_size, min_frequency=min_frequency, show_progress=show_progress, special_tokens=special_tokens, initial_alphabet=pre_tokenizers.ByteLevel.alphabet(), ) if isinstance(files, str): files = [files] self._tokenizer.train(files, trainer=trainer) def train_from_iterator( self, iterator: Union[Iterator[str], Iterator[Iterator[str]]], vocab_size: int = 30000, min_frequency: int = 2, show_progress: bool = True, special_tokens: List[Union[str, AddedToken]] = [], length: Optional[int] = None, ): """Train the model using the given iterator""" trainer = trainers.BpeTrainer( vocab_size=vocab_size, min_frequency=min_frequency, show_progress=show_progress, special_tokens=special_tokens, initial_alphabet=pre_tokenizers.ByteLevel.alphabet(), ) self._tokenizer.train_from_iterator( iterator, trainer=trainer, length=length, )
tokenizers/bindings/python/py_src/tokenizers/implementations/byte_level_bpe.py/0
{ "file_path": "tokenizers/bindings/python/py_src/tokenizers/implementations/byte_level_bpe.py", "repo_id": "tokenizers", "token_count": 1970 }
327
# Generated content DO NOT EDIT class Trainer: """ Base class for all trainers This class is not supposed to be instantiated directly. Instead, any implementation of a Trainer will return an instance of this class when instantiated. """ class BpeTrainer(Trainer): """ Trainer capable of training a BPE model Args: vocab_size (:obj:`int`, `optional`): The size of the final vocabulary, including all tokens and alphabet. min_frequency (:obj:`int`, `optional`): The minimum frequency a pair should have in order to be merged. show_progress (:obj:`bool`, `optional`): Whether to show progress bars while training. special_tokens (:obj:`List[Union[str, AddedToken]]`, `optional`): A list of special tokens the model should know of. limit_alphabet (:obj:`int`, `optional`): The maximum different characters to keep in the alphabet. initial_alphabet (:obj:`List[str]`, `optional`): A list of characters to include in the initial alphabet, even if not seen in the training dataset. If the strings contain more than one character, only the first one is kept. continuing_subword_prefix (:obj:`str`, `optional`): A prefix to be used for every subword that is not a beginning-of-word. end_of_word_suffix (:obj:`str`, `optional`): A suffix to be used for every subword that is a end-of-word. max_token_length (:obj:`int`, `optional`): Prevents creating tokens longer than the specified size. This can help with reducing polluting your vocabulary with highly repetitive tokens like `======` for wikipedia """ class UnigramTrainer(Trainer): """ Trainer capable of training a Unigram model Args: vocab_size (:obj:`int`): The size of the final vocabulary, including all tokens and alphabet. show_progress (:obj:`bool`): Whether to show progress bars while training. special_tokens (:obj:`List[Union[str, AddedToken]]`): A list of special tokens the model should know of. initial_alphabet (:obj:`List[str]`): A list of characters to include in the initial alphabet, even if not seen in the training dataset. If the strings contain more than one character, only the first one is kept. shrinking_factor (:obj:`float`): The shrinking factor used at each step of the training to prune the vocabulary. unk_token (:obj:`str`): The token used for out-of-vocabulary tokens. max_piece_length (:obj:`int`): The maximum length of a given token. n_sub_iterations (:obj:`int`): The number of iterations of the EM algorithm to perform before pruning the vocabulary. """ def __init__( self, vocab_size=8000, show_progress=True, special_tokens=[], shrinking_factor=0.75, unk_token=None, max_piece_length=16, n_sub_iterations=2, ): pass class WordLevelTrainer(Trainer): """ Trainer capable of training a WorldLevel model Args: vocab_size (:obj:`int`, `optional`): The size of the final vocabulary, including all tokens and alphabet. min_frequency (:obj:`int`, `optional`): The minimum frequency a pair should have in order to be merged. show_progress (:obj:`bool`, `optional`): Whether to show progress bars while training. special_tokens (:obj:`List[Union[str, AddedToken]]`): A list of special tokens the model should know of. """ class WordPieceTrainer(Trainer): """ Trainer capable of training a WordPiece model Args: vocab_size (:obj:`int`, `optional`): The size of the final vocabulary, including all tokens and alphabet. min_frequency (:obj:`int`, `optional`): The minimum frequency a pair should have in order to be merged. show_progress (:obj:`bool`, `optional`): Whether to show progress bars while training. special_tokens (:obj:`List[Union[str, AddedToken]]`, `optional`): A list of special tokens the model should know of. limit_alphabet (:obj:`int`, `optional`): The maximum different characters to keep in the alphabet. initial_alphabet (:obj:`List[str]`, `optional`): A list of characters to include in the initial alphabet, even if not seen in the training dataset. If the strings contain more than one character, only the first one is kept. continuing_subword_prefix (:obj:`str`, `optional`): A prefix to be used for every subword that is not a beginning-of-word. end_of_word_suffix (:obj:`str`, `optional`): A suffix to be used for every subword that is a end-of-word. """ def __init__( self, vocab_size=30000, min_frequency=0, show_progress=True, special_tokens=[], limit_alphabet=None, initial_alphabet=[], continuing_subword_prefix="##", end_of_word_suffix=None, ): pass
tokenizers/bindings/python/py_src/tokenizers/trainers/__init__.pyi/0
{ "file_path": "tokenizers/bindings/python/py_src/tokenizers/trainers/__init__.pyi", "repo_id": "tokenizers", "token_count": 2178 }
328
use serde::Serialize; use std::collections::{hash_map::DefaultHasher, HashMap}; use std::hash::{Hash, Hasher}; use numpy::{npyffi, PyArray1, PyArrayMethods}; use pyo3::class::basic::CompareOp; use pyo3::exceptions; use pyo3::intern; use pyo3::prelude::*; use pyo3::types::*; use tk::models::bpe::BPE; use tk::tokenizer::{ Model, PaddingDirection, PaddingParams, PaddingStrategy, PostProcessor, TokenizerImpl, TruncationDirection, TruncationParams, TruncationStrategy, }; use tk::utils::iter::ResultShunt; use tokenizers as tk; use super::decoders::PyDecoder; use super::encoding::PyEncoding; use super::error::{PyError, ToPyResult}; use super::models::PyModel; use super::normalizers::PyNormalizer; use super::pre_tokenizers::PyPreTokenizer; use super::trainers::PyTrainer; use crate::processors::PyPostProcessor; use crate::utils::{MaybeSizedIterator, PyBufferedIterator}; use std::collections::BTreeMap; /// Represents a token that can be be added to a :class:`~tokenizers.Tokenizer`. /// It can have special options that defines the way it should behave. /// /// Args: /// content (:obj:`str`): The content of the token /// /// single_word (:obj:`bool`, defaults to :obj:`False`): /// Defines whether this token should only match single words. If :obj:`True`, this /// token will never match inside of a word. For example the token ``ing`` would match /// on ``tokenizing`` if this option is :obj:`False`, but not if it is :obj:`True`. /// The notion of "`inside of a word`" is defined by the word boundaries pattern in /// regular expressions (ie. the token should start and end with word boundaries). /// /// lstrip (:obj:`bool`, defaults to :obj:`False`): /// Defines whether this token should strip all potential whitespaces on its left side. /// If :obj:`True`, this token will greedily match any whitespace on its left. For /// example if we try to match the token ``[MASK]`` with ``lstrip=True``, in the text /// ``"I saw a [MASK]"``, we would match on ``" [MASK]"``. (Note the space on the left). /// /// rstrip (:obj:`bool`, defaults to :obj:`False`): /// Defines whether this token should strip all potential whitespaces on its right /// side. If :obj:`True`, this token will greedily match any whitespace on its right. /// It works just like :obj:`lstrip` but on the right. /// /// normalized (:obj:`bool`, defaults to :obj:`True` with :meth:`~tokenizers.Tokenizer.add_tokens` and :obj:`False` with :meth:`~tokenizers.Tokenizer.add_special_tokens`): /// Defines whether this token should match against the normalized version of the input /// text. For example, with the added token ``"yesterday"``, and a normalizer in charge of /// lowercasing the text, the token could be extract from the input ``"I saw a lion /// Yesterday"``. /// special (:obj:`bool`, defaults to :obj:`False` with :meth:`~tokenizers.Tokenizer.add_tokens` and :obj:`False` with :meth:`~tokenizers.Tokenizer.add_special_tokens`): /// Defines whether this token should be skipped when decoding. /// #[pyclass(dict, module = "tokenizers", name = "AddedToken")] pub struct PyAddedToken { pub content: String, pub special: bool, pub single_word: Option<bool>, pub lstrip: Option<bool>, pub rstrip: Option<bool>, pub normalized: Option<bool>, } impl PyAddedToken { pub fn from<S: Into<String>>(content: S, special: Option<bool>) -> Self { Self { content: content.into(), special: special.unwrap_or(false), single_word: None, lstrip: None, rstrip: None, normalized: None, } } pub fn get_token(&self) -> tk::tokenizer::AddedToken { let mut token = tk::AddedToken::from(&self.content, self.special); if let Some(sw) = self.single_word { token = token.single_word(sw); } if let Some(ls) = self.lstrip { token = token.lstrip(ls); } if let Some(rs) = self.rstrip { token = token.rstrip(rs); } if let Some(n) = self.normalized { token = token.normalized(n); } token } pub fn as_pydict<'py>(&self, py: Python<'py>) -> PyResult<Bound<'py, PyDict>> { let dict = PyDict::new(py); let token = self.get_token(); dict.set_item("content", token.content)?; dict.set_item("single_word", token.single_word)?; dict.set_item("lstrip", token.lstrip)?; dict.set_item("rstrip", token.rstrip)?; dict.set_item("normalized", token.normalized)?; dict.set_item("special", token.special)?; Ok(dict) } } impl From<tk::AddedToken> for PyAddedToken { fn from(token: tk::AddedToken) -> Self { Self { content: token.content, single_word: Some(token.single_word), lstrip: Some(token.lstrip), rstrip: Some(token.rstrip), normalized: Some(token.normalized), special: token.special, } } } #[pymethods] impl PyAddedToken { #[new] #[pyo3(signature = (content=None, **kwargs), text_signature = "(self, content, single_word=False, lstrip=False, rstrip=False, normalized=True, special=False)")] fn __new__(content: Option<&str>, kwargs: Option<&Bound<'_, PyDict>>) -> PyResult<Self> { let mut token = PyAddedToken::from(content.unwrap_or(""), None); if let Some(kwargs) = kwargs { for (key, value) in kwargs { let key: String = key.extract()?; match key.as_ref() { "single_word" => token.single_word = Some(value.extract()?), "lstrip" => token.lstrip = Some(value.extract()?), "rstrip" => token.rstrip = Some(value.extract()?), "normalized" => token.normalized = Some(value.extract()?), "special" => token.special = value.extract()?, _ => println!("Ignored unknown kwarg option {key}"), } } } Ok(token) } fn __getstate__<'py>(&self, py: Python<'py>) -> PyResult<Bound<'py, PyDict>> { self.as_pydict(py) } fn __setstate__(&mut self, py: Python, state: PyObject) -> PyResult<()> { match state.downcast_bound::<PyDict>(py) { Ok(state) => { for (key, value) in state { let key: String = key.extract()?; match key.as_ref() { "content" => self.content = value.extract()?, "single_word" => self.single_word = Some(value.extract()?), "lstrip" => self.lstrip = Some(value.extract()?), "rstrip" => self.rstrip = Some(value.extract()?), "normalized" => self.normalized = Some(value.extract()?), "special" => self.special = value.extract()?, _ => {} } } Ok(()) } Err(e) => Err(e.into()), } } /// Get the content of this :obj:`AddedToken` #[getter] fn get_content(&self) -> &str { &self.content } /// Set the content of this :obj:`AddedToken` #[setter] fn set_content(&mut self, content: String) { self.content = content; } /// Get the value of the :obj:`rstrip` option #[getter] fn get_rstrip(&self) -> bool { self.get_token().rstrip } /// Get the value of the :obj:`lstrip` option #[getter] fn get_lstrip(&self) -> bool { self.get_token().lstrip } /// Get the value of the :obj:`single_word` option #[getter] fn get_single_word(&self) -> bool { self.get_token().single_word } /// Get the value of the :obj:`normalized` option #[getter] fn get_normalized(&self) -> bool { self.get_token().normalized } /// Get the value of the :obj:`special` option #[getter] fn get_special(&self) -> bool { self.get_token().special } /// Set the value of the :obj:`special` option #[setter] fn set_special(&mut self, special: bool) { self.special = special; } fn __str__(&self) -> PyResult<&str> { Ok(&self.content) } fn __repr__(&self) -> PyResult<String> { let bool_to_python = |p| match p { true => "True", false => "False", }; let token = self.get_token(); Ok(format!( "AddedToken(\"{}\", rstrip={}, lstrip={}, single_word={}, normalized={}, special={})", self.content, bool_to_python(token.rstrip), bool_to_python(token.lstrip), bool_to_python(token.single_word), bool_to_python(token.normalized), bool_to_python(token.special) )) } fn __richcmp__(&self, other: Py<PyAddedToken>, op: CompareOp) -> bool { use CompareOp::*; Python::with_gil(|py| match op { Lt | Le | Gt | Ge => false, Eq => self.get_token() == other.borrow(py).get_token(), Ne => self.get_token() != other.borrow(py).get_token(), }) } fn __hash__(&self) -> u64 { let mut hasher = DefaultHasher::new(); self.get_token().hash(&mut hasher); hasher.finish() } } struct TextInputSequence<'s>(tk::InputSequence<'s>); impl<'s> FromPyObject<'s> for TextInputSequence<'s> { fn extract_bound(ob: &Bound<'s, PyAny>) -> PyResult<Self> { let err = exceptions::PyTypeError::new_err("TextInputSequence must be str"); if let Ok(s) = ob.extract::<String>() { Ok(Self(s.into())) } else { Err(err) } } } impl<'s> From<TextInputSequence<'s>> for tk::InputSequence<'s> { fn from(s: TextInputSequence<'s>) -> Self { s.0 } } struct PyArrayUnicode(Vec<String>); impl FromPyObject<'_> for PyArrayUnicode { fn extract_bound(ob: &Bound<'_, PyAny>) -> PyResult<Self> { // SAFETY Making sure the pointer is a valid numpy array requires calling numpy C code if unsafe { npyffi::PyArray_Check(ob.py(), ob.as_ptr()) } == 0 { return Err(exceptions::PyTypeError::new_err("Expected an np.array")); } let arr = ob.as_ptr() as *mut npyffi::PyArrayObject; // SAFETY Getting all the metadata about the numpy array to check its sanity let (type_num, elsize, _alignment, data, nd, flags) = unsafe { let desc = (*arr).descr; ( (*desc).type_num, npyffi::PyDataType_ELSIZE(ob.py(), desc) as usize, npyffi::PyDataType_ALIGNMENT(ob.py(), desc) as usize, (*arr).data, (*arr).nd, (*arr).flags, ) }; if nd != 1 { return Err(exceptions::PyTypeError::new_err( "Expected a 1 dimensional np.array", )); } if flags & (npyffi::NPY_ARRAY_C_CONTIGUOUS | npyffi::NPY_ARRAY_F_CONTIGUOUS) == 0 { return Err(exceptions::PyTypeError::new_err( "Expected a contiguous np.array", )); } if type_num != npyffi::types::NPY_TYPES::NPY_UNICODE as i32 { return Err(exceptions::PyTypeError::new_err( "Expected a np.array[dtype='U']", )); } // SAFETY Looking at the raw numpy data to create new owned Rust strings via copies (so it's safe afterwards). unsafe { let n_elem = *(*arr).dimensions as usize; let all_bytes = std::slice::from_raw_parts(data as *const u8, elsize * n_elem); let seq = (0..n_elem) .map(|i| { let bytes = &all_bytes[i * elsize..(i + 1) * elsize]; Ok(std::str::from_utf8(bytes)?.to_owned()) // let unicode = pyo3::ffi::PyUnicode_FromKindAndData( // pyo3::ffi::PyUnicode_4BYTE_KIND as _, // bytes.as_ptr() as *const _, // elsize as isize / alignment as isize, // ); // let py = ob.py(); // let obj = PyObject::from_owned_ptr(py, unicode); // let s = obj.downcast_bound::<PyString>(py)?; // Ok(s.to_string_lossy().trim_matches(char::from(0)).to_owned()) }) .collect::<PyResult<Vec<_>>>()?; Ok(Self(seq)) } } } impl From<PyArrayUnicode> for tk::InputSequence<'_> { fn from(s: PyArrayUnicode) -> Self { s.0.into() } } struct PyArrayStr(Vec<String>); impl FromPyObject<'_> for PyArrayStr { fn extract_bound(ob: &Bound<'_, PyAny>) -> PyResult<Self> { let array = ob.downcast::<PyArray1<PyObject>>()?; let seq = array .readonly() .as_array() .iter() .map(|obj| { let s = obj.downcast_bound::<PyString>(ob.py())?; Ok(s.to_string_lossy().into_owned()) }) .collect::<PyResult<Vec<_>>>()?; Ok(Self(seq)) } } impl From<PyArrayStr> for tk::InputSequence<'_> { fn from(s: PyArrayStr) -> Self { s.0.into() } } struct PreTokenizedInputSequence<'s>(tk::InputSequence<'s>); impl<'s> FromPyObject<'s> for PreTokenizedInputSequence<'s> { fn extract_bound(ob: &Bound<'s, PyAny>) -> PyResult<Self> { if let Ok(seq) = ob.extract::<PyArrayUnicode>() { return Ok(Self(seq.into())); } if let Ok(seq) = ob.extract::<PyArrayStr>() { return Ok(Self(seq.into())); } if let Ok(s) = ob.downcast::<PyList>() { if let Ok(seq) = s.extract::<Vec<String>>() { return Ok(Self(seq.into())); } } if let Ok(s) = ob.downcast::<PyTuple>() { if let Ok(seq) = s.extract::<Vec<String>>() { return Ok(Self(seq.into())); } } Err(exceptions::PyTypeError::new_err( "PreTokenizedInputSequence must be Union[List[str], Tuple[str]]", )) } } impl<'s> From<PreTokenizedInputSequence<'s>> for tk::InputSequence<'s> { fn from(s: PreTokenizedInputSequence<'s>) -> Self { s.0 } } struct TextEncodeInput<'s>(tk::EncodeInput<'s>); impl<'s> FromPyObject<'s> for TextEncodeInput<'s> { fn extract_bound(ob: &Bound<'s, PyAny>) -> PyResult<Self> { if let Ok(i) = ob.extract::<TextInputSequence>() { return Ok(Self(i.into())); } if let Ok((i1, i2)) = ob.extract::<(TextInputSequence, TextInputSequence)>() { return Ok(Self((i1, i2).into())); } if let Ok(arr) = ob.extract::<Vec<Bound<PyAny>>>() { if arr.len() == 2 { let first = arr[0].extract::<TextInputSequence>()?; let second = arr[1].extract::<TextInputSequence>()?; return Ok(Self((first, second).into())); } } Err(exceptions::PyTypeError::new_err( "TextEncodeInput must be Union[TextInputSequence, Tuple[InputSequence, InputSequence]]", )) } } impl<'s> From<TextEncodeInput<'s>> for tk::tokenizer::EncodeInput<'s> { fn from(i: TextEncodeInput<'s>) -> Self { i.0 } } struct PreTokenizedEncodeInput<'s>(tk::EncodeInput<'s>); impl<'s> FromPyObject<'s> for PreTokenizedEncodeInput<'s> { fn extract_bound(ob: &Bound<'s, PyAny>) -> PyResult<Self> { if let Ok(i) = ob.extract::<PreTokenizedInputSequence>() { return Ok(Self(i.into())); } if let Ok((i1, i2)) = ob.extract::<(PreTokenizedInputSequence, PreTokenizedInputSequence)>() { return Ok(Self((i1, i2).into())); } if let Ok(arr) = ob.extract::<Vec<Bound<PyAny>>>() { if arr.len() == 2 { let first = arr[0].extract::<PreTokenizedInputSequence>()?; let second = arr[1].extract::<PreTokenizedInputSequence>()?; return Ok(Self((first, second).into())); } } Err(exceptions::PyTypeError::new_err( "PreTokenizedEncodeInput must be Union[PreTokenizedInputSequence, \ Tuple[PreTokenizedInputSequence, PreTokenizedInputSequence]]", )) } } impl<'s> From<PreTokenizedEncodeInput<'s>> for tk::tokenizer::EncodeInput<'s> { fn from(i: PreTokenizedEncodeInput<'s>) -> Self { i.0 } } type Tokenizer = TokenizerImpl<PyModel, PyNormalizer, PyPreTokenizer, PyPostProcessor, PyDecoder>; /// A :obj:`Tokenizer` works as a pipeline. It processes some raw text as input /// and outputs an :class:`~tokenizers.Encoding`. /// /// Args: /// model (:class:`~tokenizers.models.Model`): /// The core algorithm that this :obj:`Tokenizer` should be using. /// #[pyclass(dict, module = "tokenizers", name = "Tokenizer")] #[derive(Clone, Serialize)] #[serde(transparent)] pub struct PyTokenizer { pub(crate) tokenizer: Tokenizer, } impl PyTokenizer { fn new(tokenizer: Tokenizer) -> Self { PyTokenizer { tokenizer } } fn from_model(model: PyModel) -> Self { PyTokenizer::new(TokenizerImpl::new(model)) } } #[pymethods] impl PyTokenizer { #[new] #[pyo3(text_signature = "(self, model)")] fn __new__(model: PyRef<PyModel>) -> Self { PyTokenizer::from_model(model.clone()) } fn __getstate__(&self, py: Python) -> PyResult<PyObject> { let data = serde_json::to_string(&self.tokenizer).map_err(|e| { exceptions::PyException::new_err(format!( "Error while attempting to pickle Tokenizer: {e}" )) })?; Ok(PyBytes::new(py, data.as_bytes()).into()) } fn __setstate__(&mut self, py: Python, state: PyObject) -> PyResult<()> { match state.extract::<&[u8]>(py) { Ok(s) => { self.tokenizer = serde_json::from_slice(s).map_err(|e| { exceptions::PyException::new_err(format!( "Error while attempting to unpickle Tokenizer: {e}" )) })?; Ok(()) } Err(e) => Err(e), } } fn __getnewargs__<'p>(&self, py: Python<'p>) -> PyResult<Bound<'p, PyTuple>> { let model: PyObject = PyModel::from(BPE::default()) .into_pyobject(py)? .into_any() .into(); PyTuple::new(py, vec![model]) } /// Instantiate a new :class:`~tokenizers.Tokenizer` from the given JSON string. /// /// Args: /// json (:obj:`str`): /// A valid JSON string representing a previously serialized /// :class:`~tokenizers.Tokenizer` /// /// Returns: /// :class:`~tokenizers.Tokenizer`: The new tokenizer #[staticmethod] #[pyo3(text_signature = "(json)")] fn from_str(json: &str) -> PyResult<Self> { let tokenizer: PyResult<_> = ToPyResult(json.parse()).into(); Ok(Self::new(tokenizer?)) } /// Instantiate a new :class:`~tokenizers.Tokenizer` from the file at the given path. /// /// Args: /// path (:obj:`str`): /// A path to a local JSON file representing a previously serialized /// :class:`~tokenizers.Tokenizer` /// /// Returns: /// :class:`~tokenizers.Tokenizer`: The new tokenizer #[staticmethod] #[pyo3(text_signature = "(path)")] fn from_file(path: &str) -> PyResult<Self> { let tokenizer: PyResult<_> = ToPyResult(Tokenizer::from_file(path)).into(); Ok(Self::new(tokenizer?)) } /// Instantiate a new :class:`~tokenizers.Tokenizer` from the given buffer. /// /// Args: /// buffer (:obj:`bytes`): /// A buffer containing a previously serialized :class:`~tokenizers.Tokenizer` /// /// Returns: /// :class:`~tokenizers.Tokenizer`: The new tokenizer #[staticmethod] #[pyo3(text_signature = "(buffer)")] fn from_buffer(buffer: &Bound<'_, PyBytes>) -> PyResult<Self> { let tokenizer = serde_json::from_slice(buffer.as_bytes()).map_err(|e| { exceptions::PyValueError::new_err(format!( "Cannot instantiate Tokenizer from buffer: {e}" )) })?; Ok(Self { tokenizer }) } /// Instantiate a new :class:`~tokenizers.Tokenizer` from an existing file on the /// Hugging Face Hub. /// /// Args: /// identifier (:obj:`str`): /// The identifier of a Model on the Hugging Face Hub, that contains /// a tokenizer.json file /// revision (:obj:`str`, defaults to `main`): /// A branch or commit id /// token (:obj:`str`, `optional`, defaults to `None`): /// An optional auth token used to access private repositories on the /// Hugging Face Hub /// /// Returns: /// :class:`~tokenizers.Tokenizer`: The new tokenizer #[staticmethod] #[pyo3(signature = (identifier, revision = String::from("main"), token = None))] #[pyo3(text_signature = "(identifier, revision=\"main\", token=None)")] fn from_pretrained( identifier: &str, revision: String, token: Option<String>, ) -> PyResult<Self> { let path = Python::with_gil(|py| -> PyResult<String> { let huggingface_hub = PyModule::import(py, intern!(py, "huggingface_hub"))?; let hf_hub_download = huggingface_hub.getattr(intern!(py, "hf_hub_download"))?; let kwargs = [ (intern!(py, "repo_id"), identifier), (intern!(py, "filename"), "tokenizer.json"), (intern!(py, "revision"), &revision), ] .into_py_dict(py)?; if let Some(token) = token { kwargs.set_item(intern!(py, "token"), token)?; } let path: String = hf_hub_download.call((), Some(&kwargs))?.extract()?; Ok(path) })?; let tokenizer: PyResult<_> = ToPyResult(Tokenizer::from_file(path)).into(); Ok(Self::new(tokenizer?)) } /// Gets a serialized string representing this :class:`~tokenizers.Tokenizer`. /// /// Args: /// pretty (:obj:`bool`, defaults to :obj:`False`): /// Whether the JSON string should be pretty formatted. /// /// Returns: /// :obj:`str`: A string representing the serialized Tokenizer #[pyo3(signature = (pretty = false))] #[pyo3(text_signature = "(self, pretty=False)")] fn to_str(&self, pretty: bool) -> PyResult<String> { ToPyResult(self.tokenizer.to_string(pretty)).into() } /// Save the :class:`~tokenizers.Tokenizer` to the file at the given path. /// /// Args: /// path (:obj:`str`): /// A path to a file in which to save the serialized tokenizer. /// /// pretty (:obj:`bool`, defaults to :obj:`True`): /// Whether the JSON file should be pretty formatted. #[pyo3(signature = (path, pretty = true))] #[pyo3(text_signature = "(self, path, pretty=True)")] fn save(&self, path: &str, pretty: bool) -> PyResult<()> { ToPyResult(self.tokenizer.save(path, pretty)).into() } fn __repr__(&self) -> PyResult<String> { crate::utils::serde_pyo3::repr(self) .map_err(|e| exceptions::PyException::new_err(e.to_string())) } fn __str__(&self) -> PyResult<String> { crate::utils::serde_pyo3::to_string(self) .map_err(|e| exceptions::PyException::new_err(e.to_string())) } /// Return the number of special tokens that would be added for single/pair sentences. /// :param is_pair: Boolean indicating if the input would be a single sentence or a pair /// :return: #[pyo3(text_signature = "(self, is_pair)")] fn num_special_tokens_to_add(&self, is_pair: bool) -> usize { self.tokenizer .get_post_processor() .map_or(0, |p| p.added_tokens(is_pair)) } /// Get the underlying vocabulary /// /// Args: /// with_added_tokens (:obj:`bool`, defaults to :obj:`True`): /// Whether to include the added tokens /// /// Returns: /// :obj:`Dict[str, int]`: The vocabulary #[pyo3(signature = (with_added_tokens = true))] #[pyo3(text_signature = "(self, with_added_tokens=True)")] fn get_vocab(&self, with_added_tokens: bool) -> HashMap<String, u32> { self.tokenizer.get_vocab(with_added_tokens) } /// Get the underlying vocabulary /// /// Returns: /// :obj:`Dict[int, AddedToken]`: The vocabulary #[pyo3(signature = ())] #[pyo3(text_signature = "(self)")] fn get_added_tokens_decoder(&self) -> BTreeMap<u32, PyAddedToken> { let mut sorted_map = BTreeMap::new(); for (key, value) in self.tokenizer.get_added_tokens_decoder() { sorted_map.insert(key, value.into()); } sorted_map } /// Get the size of the underlying vocabulary /// /// Args: /// with_added_tokens (:obj:`bool`, defaults to :obj:`True`): /// Whether to include the added tokens /// /// Returns: /// :obj:`int`: The size of the vocabulary #[pyo3(signature = (with_added_tokens = true))] #[pyo3(text_signature = "(self, with_added_tokens=True)")] fn get_vocab_size(&self, with_added_tokens: bool) -> usize { self.tokenizer.get_vocab_size(with_added_tokens) } /// Enable truncation /// /// Args: /// max_length (:obj:`int`): /// The max length at which to truncate /// /// stride (:obj:`int`, `optional`): /// The length of the previous first sequence to be included in the overflowing /// sequence /// /// strategy (:obj:`str`, `optional`, defaults to :obj:`longest_first`): /// The strategy used to truncation. Can be one of ``longest_first``, ``only_first`` or /// ``only_second``. /// /// direction (:obj:`str`, defaults to :obj:`right`): /// Truncate direction #[pyo3(signature = (max_length, **kwargs))] #[pyo3( text_signature = "(self, max_length, stride=0, strategy='longest_first', direction='right')" )] fn enable_truncation( &mut self, max_length: usize, kwargs: Option<&Bound<'_, PyDict>>, ) -> PyResult<()> { let mut params = TruncationParams { max_length, ..Default::default() }; if let Some(kwargs) = kwargs { for (key, value) in kwargs { let key: String = key.extract()?; match key.as_ref() { "stride" => params.stride = value.extract()?, "strategy" => { let value: String = value.extract()?; params.strategy = match value.as_ref() { "longest_first" => Ok(TruncationStrategy::LongestFirst), "only_first" => Ok(TruncationStrategy::OnlyFirst), "only_second" => Ok(TruncationStrategy::OnlySecond), _ => Err(PyError(format!( "Unknown `strategy`: `{value}`. Use \ one of `longest_first`, `only_first`, or `only_second`" )) .into_pyerr::<exceptions::PyValueError>()), }? } "direction" => { let value: String = value.extract()?; params.direction = match value.as_ref() { "left" => Ok(TruncationDirection::Left), "right" => Ok(TruncationDirection::Right), _ => Err(PyError(format!( "Unknown `direction`: `{value}`. Use \ one of `left` or `right`." )) .into_pyerr::<exceptions::PyValueError>()), }? } _ => println!("Ignored unknown kwarg option {key}"), } } } if let Err(error_message) = self.tokenizer.with_truncation(Some(params)) { return Err(PyError(error_message.to_string()).into_pyerr::<exceptions::PyValueError>()); } Ok(()) } /// Disable truncation #[pyo3(text_signature = "(self)")] fn no_truncation(&mut self) { self.tokenizer .with_truncation(None) .expect("Failed to set truncation to `None`! This should never happen"); } /// Get the currently set truncation parameters /// /// `Cannot set, use` :meth:`~tokenizers.Tokenizer.enable_truncation` `instead` /// /// Returns: /// (:obj:`dict`, `optional`): /// A dict with the current truncation parameters if truncation is enabled #[getter] fn get_truncation<'py>(&self, py: Python<'py>) -> PyResult<Option<Bound<'py, PyDict>>> { self.tokenizer.get_truncation().map_or(Ok(None), |params| { let dict = PyDict::new(py); dict.set_item("max_length", params.max_length)?; dict.set_item("stride", params.stride)?; dict.set_item("strategy", params.strategy.as_ref())?; dict.set_item("direction", params.direction.as_ref())?; Ok(Some(dict)) }) } /// Enable the padding /// /// Args: /// direction (:obj:`str`, `optional`, defaults to :obj:`right`): /// The direction in which to pad. Can be either ``right`` or ``left`` /// /// pad_to_multiple_of (:obj:`int`, `optional`): /// If specified, the padding length should always snap to the next multiple of the /// given value. For example if we were going to pad witha length of 250 but /// ``pad_to_multiple_of=8`` then we will pad to 256. /// /// pad_id (:obj:`int`, defaults to 0): /// The id to be used when padding /// /// pad_type_id (:obj:`int`, defaults to 0): /// The type id to be used when padding /// /// pad_token (:obj:`str`, defaults to :obj:`[PAD]`): /// The pad token to be used when padding /// /// length (:obj:`int`, `optional`): /// If specified, the length at which to pad. If not specified we pad using the size of /// the longest sequence in a batch. #[pyo3(signature = (**kwargs))] #[pyo3( text_signature = "(self, direction='right', pad_id=0, pad_type_id=0, pad_token='[PAD]', length=None, pad_to_multiple_of=None)" )] fn enable_padding(&mut self, kwargs: Option<&Bound<'_, PyDict>>) -> PyResult<()> { let mut params = PaddingParams::default(); if let Some(kwargs) = kwargs { for (key, value) in kwargs { let key: String = key.extract()?; match key.as_ref() { "direction" => { let value: String = value.extract()?; params.direction = match value.as_ref() { "left" => Ok(PaddingDirection::Left), "right" => Ok(PaddingDirection::Right), other => Err(PyError(format!( "Unknown `direction`: `{other}`. Use \ one of `left` or `right`" )) .into_pyerr::<exceptions::PyValueError>()), }?; } "pad_to_multiple_of" => { if let Some(multiple) = value.extract()? { params.pad_to_multiple_of = multiple; } } "pad_id" => params.pad_id = value.extract()?, "pad_type_id" => params.pad_type_id = value.extract()?, "pad_token" => params.pad_token = value.extract()?, "max_length" => { println!( "enable_padding(max_length=X) is deprecated, \ use enable_padding(length=X) instead" ); if let Some(l) = value.extract()? { params.strategy = PaddingStrategy::Fixed(l); } else { params.strategy = PaddingStrategy::BatchLongest; } } "length" => { if let Some(l) = value.extract()? { params.strategy = PaddingStrategy::Fixed(l); } else { params.strategy = PaddingStrategy::BatchLongest; } } _ => println!("Ignored unknown kwarg option {key}"), } } } self.tokenizer.with_padding(Some(params)); Ok(()) } /// Disable padding #[pyo3(text_signature = "(self)")] fn no_padding(&mut self) { self.tokenizer.with_padding(None); } /// Get the current padding parameters /// /// `Cannot be set, use` :meth:`~tokenizers.Tokenizer.enable_padding` `instead` /// /// Returns: /// (:obj:`dict`, `optional`): /// A dict with the current padding parameters if padding is enabled #[getter] fn get_padding<'py>(&self, py: Python<'py>) -> PyResult<Option<Bound<'py, PyDict>>> { self.tokenizer.get_padding().map_or(Ok(None), |params| { let dict = PyDict::new(py); dict.set_item( "length", match params.strategy { tk::PaddingStrategy::BatchLongest => None, tk::PaddingStrategy::Fixed(size) => Some(size), }, )?; dict.set_item("pad_to_multiple_of", params.pad_to_multiple_of)?; dict.set_item("pad_id", params.pad_id)?; dict.set_item("pad_token", &params.pad_token)?; dict.set_item("pad_type_id", params.pad_type_id)?; dict.set_item("direction", params.direction.as_ref())?; Ok(Some(dict)) }) } /// Encode the given sequence and pair. This method can process raw text sequences /// as well as already pre-tokenized sequences. /// /// Example: /// Here are some examples of the inputs that are accepted:: /// /// encode("A single sequence")` /// encode("A sequence", "And its pair")` /// encode([ "A", "pre", "tokenized", "sequence" ], is_pretokenized=True)` /// encode( /// [ "A", "pre", "tokenized", "sequence" ], [ "And", "its", "pair" ], /// is_pretokenized=True /// ) /// /// Args: /// sequence (:obj:`~tokenizers.InputSequence`): /// The main input sequence we want to encode. This sequence can be either raw /// text or pre-tokenized, according to the ``is_pretokenized`` argument: /// /// - If ``is_pretokenized=False``: :class:`~tokenizers.TextInputSequence` /// - If ``is_pretokenized=True``: :class:`~tokenizers.PreTokenizedInputSequence` /// /// pair (:obj:`~tokenizers.InputSequence`, `optional`): /// An optional input sequence. The expected format is the same that for ``sequence``. /// /// is_pretokenized (:obj:`bool`, defaults to :obj:`False`): /// Whether the input is already pre-tokenized /// /// add_special_tokens (:obj:`bool`, defaults to :obj:`True`): /// Whether to add the special tokens /// /// Returns: /// :class:`~tokenizers.Encoding`: The encoded result /// #[pyo3(signature = (sequence, pair = None, is_pretokenized = false, add_special_tokens = true))] #[pyo3( text_signature = "(self, sequence, pair=None, is_pretokenized=False, add_special_tokens=True)" )] fn encode( &self, sequence: &Bound<'_, PyAny>, pair: Option<&Bound<'_, PyAny>>, is_pretokenized: bool, add_special_tokens: bool, ) -> PyResult<PyEncoding> { let sequence: tk::InputSequence = if is_pretokenized { sequence.extract::<PreTokenizedInputSequence>()?.into() } else { sequence.extract::<TextInputSequence>()?.into() }; let input = match pair { Some(pair) => { let pair: tk::InputSequence = if is_pretokenized { pair.extract::<PreTokenizedInputSequence>()?.into() } else { pair.extract::<TextInputSequence>()?.into() }; tk::EncodeInput::Dual(sequence, pair) } None => tk::EncodeInput::Single(sequence), }; ToPyResult( self.tokenizer .encode_char_offsets(input, add_special_tokens) .map(|e| e.into()), ) .into() } /// Encode the given batch of inputs. This method accept both raw text sequences /// as well as already pre-tokenized sequences. The reason we use `PySequence` is /// because it allows type checking with zero-cost (according to PyO3) as we don't /// have to convert to check. /// /// Example: /// Here are some examples of the inputs that are accepted:: /// /// encode_batch([ /// "A single sequence", /// ("A tuple with a sequence", "And its pair"), /// [ "A", "pre", "tokenized", "sequence" ], /// ([ "A", "pre", "tokenized", "sequence" ], "And its pair") /// ]) /// /// Args: /// input (A :obj:`List`/:obj:`Tuple` of :obj:`~tokenizers.EncodeInput`): /// A list of single sequences or pair sequences to encode. Each sequence /// can be either raw text or pre-tokenized, according to the ``is_pretokenized`` /// argument: /// /// - If ``is_pretokenized=False``: :class:`~tokenizers.TextEncodeInput` /// - If ``is_pretokenized=True``: :class:`~tokenizers.PreTokenizedEncodeInput` /// /// is_pretokenized (:obj:`bool`, defaults to :obj:`False`): /// Whether the input is already pre-tokenized /// /// add_special_tokens (:obj:`bool`, defaults to :obj:`True`): /// Whether to add the special tokens /// /// Returns: /// A :obj:`List` of :class:`~tokenizers.Encoding`: The encoded batch /// #[pyo3(signature = (input, is_pretokenized = false, add_special_tokens = true))] #[pyo3(text_signature = "(self, input, is_pretokenized=False, add_special_tokens=True)")] fn encode_batch( &self, py: Python<'_>, input: Vec<Bound<'_, PyAny>>, is_pretokenized: bool, add_special_tokens: bool, ) -> PyResult<Vec<PyEncoding>> { let mut items = Vec::<tk::EncodeInput>::with_capacity(input.len()); for item in &input { let item: tk::EncodeInput = if is_pretokenized { item.extract::<PreTokenizedEncodeInput>()?.into() } else { item.extract::<TextEncodeInput>()?.into() }; items.push(item); } py.allow_threads(|| { ToPyResult( self.tokenizer .encode_batch_char_offsets(items, add_special_tokens) .map(|encodings| encodings.into_iter().map(|e| e.into()).collect()), ) .into() }) } /// Encode the given batch of inputs. This method is faster than `encode_batch` /// because it doesn't keep track of offsets, they will be all zeros. /// /// Example: /// Here are some examples of the inputs that are accepted:: /// /// encode_batch_fast([ /// "A single sequence", /// ("A tuple with a sequence", "And its pair"), /// [ "A", "pre", "tokenized", "sequence" ], /// ([ "A", "pre", "tokenized", "sequence" ], "And its pair") /// ]) /// /// Args: /// input (A :obj:`List`/:obj:`Tuple` of :obj:`~tokenizers.EncodeInput`): /// A list of single sequences or pair sequences to encode. Each sequence /// can be either raw text or pre-tokenized, according to the ``is_pretokenized`` /// argument: /// /// - If ``is_pretokenized=False``: :class:`~tokenizers.TextEncodeInput` /// - If ``is_pretokenized=True``: :class:`~tokenizers.PreTokenizedEncodeInput` /// /// is_pretokenized (:obj:`bool`, defaults to :obj:`False`): /// Whether the input is already pre-tokenized /// /// add_special_tokens (:obj:`bool`, defaults to :obj:`True`): /// Whether to add the special tokens /// /// Returns: /// A :obj:`List` of :class:`~tokenizers.Encoding`: The encoded batch /// #[pyo3(signature = (input, is_pretokenized = false, add_special_tokens = true))] #[pyo3(text_signature = "(self, input, is_pretokenized=False, add_special_tokens=True)")] fn encode_batch_fast( &self, py: Python<'_>, input: Vec<Bound<'_, PyAny>>, is_pretokenized: bool, add_special_tokens: bool, ) -> PyResult<Vec<PyEncoding>> { let mut items = Vec::<tk::EncodeInput>::with_capacity(input.len()); for item in &input { let item: tk::EncodeInput = if is_pretokenized { item.extract::<PreTokenizedEncodeInput>()?.into() } else { item.extract::<TextEncodeInput>()?.into() }; items.push(item); } py.allow_threads(|| { ToPyResult( self.tokenizer .encode_batch_fast(items, add_special_tokens) .map(|encodings| encodings.into_iter().map(|e| e.into()).collect()), ) .into() }) } /// Decode the given list of ids back to a string /// /// This is used to decode anything coming back from a Language Model /// /// Args: /// ids (A :obj:`List/Tuple` of :obj:`int`): /// The list of ids that we want to decode /// /// skip_special_tokens (:obj:`bool`, defaults to :obj:`True`): /// Whether the special tokens should be removed from the decoded string /// /// Returns: /// :obj:`str`: The decoded string #[pyo3(signature = (ids, skip_special_tokens = true))] #[pyo3(text_signature = "(self, ids, skip_special_tokens=True)")] fn decode(&self, ids: Vec<u32>, skip_special_tokens: bool) -> PyResult<String> { ToPyResult(self.tokenizer.decode(&ids, skip_special_tokens)).into() } /// Decode a batch of ids back to their corresponding string /// /// Args: /// sequences (:obj:`List` of :obj:`List[int]`): /// The batch of sequences we want to decode /// /// skip_special_tokens (:obj:`bool`, defaults to :obj:`True`): /// Whether the special tokens should be removed from the decoded strings /// /// Returns: /// :obj:`List[str]`: A list of decoded strings #[pyo3(signature = (sequences, skip_special_tokens = true))] #[pyo3(text_signature = "(self, sequences, skip_special_tokens=True)")] fn decode_batch( &self, py: Python<'_>, sequences: Vec<Vec<u32>>, skip_special_tokens: bool, ) -> PyResult<Vec<String>> { py.allow_threads(|| { let slices = sequences.iter().map(|v| &v[..]).collect::<Vec<&[u32]>>(); ToPyResult(self.tokenizer.decode_batch(&slices, skip_special_tokens)).into() }) } /// Convert the given token to its corresponding id if it exists /// /// Args: /// token (:obj:`str`): /// The token to convert /// /// Returns: /// :obj:`Optional[int]`: An optional id, :obj:`None` if out of vocabulary #[pyo3(text_signature = "(self, token)")] fn token_to_id(&self, token: &str) -> Option<u32> { self.tokenizer.token_to_id(token) } /// Convert the given id to its corresponding token if it exists /// /// Args: /// id (:obj:`int`): /// The id to convert /// /// Returns: /// :obj:`Optional[str]`: An optional token, :obj:`None` if out of vocabulary #[pyo3(text_signature = "(self, id)")] fn id_to_token(&self, id: u32) -> Option<String> { self.tokenizer.id_to_token(id) } /// Modifies the tokenizer in order to use or not the special tokens /// during encoding. /// /// Args: /// value (:obj:`bool`): /// Whether to use the special tokens or not /// #[setter] fn set_encode_special_tokens(&mut self, value: bool) { self.tokenizer.set_encode_special_tokens(value); } /// Get the value of the `encode_special_tokens` attribute /// /// Returns: /// :obj:`bool`: the tokenizer's encode_special_tokens attribute #[getter] fn get_encode_special_tokens(&self) -> bool { self.tokenizer.get_encode_special_tokens() } /// Add the given tokens to the vocabulary /// /// The given tokens are added only if they don't already exist in the vocabulary. /// Each token then gets a new attributed id. /// /// Args: /// tokens (A :obj:`List` of :class:`~tokenizers.AddedToken` or :obj:`str`): /// The list of tokens we want to add to the vocabulary. Each token can be either a /// string or an instance of :class:`~tokenizers.AddedToken` for more customization. /// /// Returns: /// :obj:`int`: The number of tokens that were created in the vocabulary #[pyo3(text_signature = "(self, tokens)")] fn add_tokens(&mut self, tokens: &Bound<'_, PyList>) -> PyResult<usize> { let tokens = tokens .into_iter() .map(|token| { if let Ok(content) = token.extract::<String>() { Ok(PyAddedToken::from(content, Some(false)).get_token()) } else if let Ok(token) = token.extract::<PyRefMut<PyAddedToken>>() { Ok(token.get_token()) } else { Err(exceptions::PyTypeError::new_err( "Input must be a List[Union[str, AddedToken]]", )) } }) .collect::<PyResult<Vec<_>>>()?; Ok(self.tokenizer.add_tokens(&tokens)) } /// Add the given special tokens to the Tokenizer. /// /// If these tokens are already part of the vocabulary, it just let the Tokenizer know about /// them. If they don't exist, the Tokenizer creates them, giving them a new id. /// /// These special tokens will never be processed by the model (ie won't be split into /// multiple tokens), and they can be removed from the output when decoding. /// /// Args: /// tokens (A :obj:`List` of :class:`~tokenizers.AddedToken` or :obj:`str`): /// The list of special tokens we want to add to the vocabulary. Each token can either /// be a string or an instance of :class:`~tokenizers.AddedToken` for more /// customization. /// /// Returns: /// :obj:`int`: The number of tokens that were created in the vocabulary #[pyo3(text_signature = "(self, tokens)")] fn add_special_tokens(&mut self, tokens: &Bound<'_, PyList>) -> PyResult<usize> { let tokens = tokens .into_iter() .map(|token| { if let Ok(content) = token.extract::<String>() { Ok(tk::tokenizer::AddedToken::from(content, true)) } else if let Ok(mut token) = token.extract::<PyRefMut<PyAddedToken>>() { token.special = true; Ok(token.get_token()) } else { Err(exceptions::PyTypeError::new_err( "Input must be a List[Union[str, AddedToken]]", )) } }) .collect::<PyResult<Vec<_>>>()?; Ok(self.tokenizer.add_special_tokens(&tokens)) } /// Train the Tokenizer using the given files. /// /// Reads the files line by line, while keeping all the whitespace, even new lines. /// If you want to train from data store in-memory, you can check /// :meth:`~tokenizers.Tokenizer.train_from_iterator` /// /// Args: /// files (:obj:`List[str]`): /// A list of path to the files that we should use for training /// /// trainer (:obj:`~tokenizers.trainers.Trainer`, `optional`): /// An optional trainer that should be used to train our Model #[pyo3(signature = (files, trainer = None))] #[pyo3(text_signature = "(self, files, trainer = None)")] fn train(&mut self, files: Vec<String>, trainer: Option<&mut PyTrainer>) -> PyResult<()> { let mut trainer = trainer.map_or_else(|| self.tokenizer.get_model().get_trainer(), |t| t.clone()); Python::with_gil(|py| { py.allow_threads(|| { ToPyResult( self.tokenizer .train_from_files(&mut trainer, files) .map(|_| {}), ) .into() }) }) } /// Train the Tokenizer using the provided iterator. /// /// You can provide anything that is a Python Iterator /// /// * A list of sequences :obj:`List[str]` /// * A generator that yields :obj:`str` or :obj:`List[str]` /// * A Numpy array of strings /// * ... /// /// Args: /// iterator (:obj:`Iterator`): /// Any iterator over strings or list of strings /// /// trainer (:obj:`~tokenizers.trainers.Trainer`, `optional`): /// An optional trainer that should be used to train our Model /// /// length (:obj:`int`, `optional`): /// The total number of sequences in the iterator. This is used to /// provide meaningful progress tracking #[pyo3(signature = (iterator, trainer = None, length = None))] #[pyo3(text_signature = "(self, iterator, trainer=None, length=None)")] fn train_from_iterator( &mut self, py: Python, iterator: &Bound<'_, PyAny>, trainer: Option<&mut PyTrainer>, length: Option<usize>, ) -> PyResult<()> { let mut trainer = trainer.map_or_else(|| self.tokenizer.get_model().get_trainer(), |t| t.clone()); let buffered_iter = PyBufferedIterator::new( iterator, |element| { // Each element of the iterator can either be: // - An iterator, to allow batching // - A string if let Ok(s) = element.downcast::<PyString>() { itertools::Either::Right(std::iter::once(s.to_cow().map(|s| s.into_owned()))) } else { match element.try_iter() { Ok(iter) => itertools::Either::Left( iter.map(|i| i?.extract::<String>()) .collect::<Vec<_>>() .into_iter(), ), Err(e) => itertools::Either::Right(std::iter::once(Err(e))), } } }, 256, )?; py.allow_threads(|| { ResultShunt::process(buffered_iter, |iter| { self.tokenizer .train(&mut trainer, MaybeSizedIterator::new(iter, length)) .map(|_| {}) .map_err(|e| exceptions::PyException::new_err(e.to_string())) })? }) } /// Apply all the post-processing steps to the given encodings. /// /// The various steps are: /// /// 1. Truncate according to the set truncation params (provided with /// :meth:`~tokenizers.Tokenizer.enable_truncation`) /// 2. Apply the :class:`~tokenizers.processors.PostProcessor` /// 3. Pad according to the set padding params (provided with /// :meth:`~tokenizers.Tokenizer.enable_padding`) /// /// Args: /// encoding (:class:`~tokenizers.Encoding`): /// The :class:`~tokenizers.Encoding` corresponding to the main sequence. /// /// pair (:class:`~tokenizers.Encoding`, `optional`): /// An optional :class:`~tokenizers.Encoding` corresponding to the pair sequence. /// /// add_special_tokens (:obj:`bool`): /// Whether to add the special tokens /// /// Returns: /// :class:`~tokenizers.Encoding`: The final post-processed encoding #[pyo3(signature = (encoding, pair = None, add_special_tokens = true))] #[pyo3(text_signature = "(self, encoding, pair=None, add_special_tokens=True)")] fn post_process( &self, encoding: &PyEncoding, pair: Option<&PyEncoding>, add_special_tokens: bool, ) -> PyResult<PyEncoding> { ToPyResult( self.tokenizer .post_process( encoding.encoding.clone(), pair.map(|p| p.encoding.clone()), add_special_tokens, ) .map(|e| e.into()), ) .into() } /// The :class:`~tokenizers.models.Model` in use by the Tokenizer #[getter] fn get_model(&self, py: Python<'_>) -> PyResult<PyObject> { self.tokenizer.get_model().get_as_subtype(py) } /// Set the :class:`~tokenizers.models.Model` #[setter] fn set_model(&mut self, model: PyRef<PyModel>) { self.tokenizer.with_model(model.clone()); } /// The `optional` :class:`~tokenizers.normalizers.Normalizer` in use by the Tokenizer #[getter] fn get_normalizer(&self, py: Python<'_>) -> PyResult<PyObject> { if let Some(n) = self.tokenizer.get_normalizer() { n.get_as_subtype(py) } else { Ok(py.None()) } } /// Set the :class:`~tokenizers.normalizers.Normalizer` #[setter] fn set_normalizer(&mut self, normalizer: Option<PyRef<PyNormalizer>>) { let normalizer_option = normalizer.map(|norm| norm.clone()); self.tokenizer.with_normalizer(normalizer_option); } /// The `optional` :class:`~tokenizers.pre_tokenizers.PreTokenizer` in use by the Tokenizer #[getter] fn get_pre_tokenizer(&self, py: Python<'_>) -> PyResult<PyObject> { if let Some(pt) = self.tokenizer.get_pre_tokenizer() { pt.get_as_subtype(py) } else { Ok(py.None()) } } /// Set the :class:`~tokenizers.normalizers.Normalizer` #[setter] fn set_pre_tokenizer(&mut self, pretok: Option<PyRef<PyPreTokenizer>>) { self.tokenizer .with_pre_tokenizer(pretok.map(|pre| pre.clone())); } /// The `optional` :class:`~tokenizers.processors.PostProcessor` in use by the Tokenizer #[getter] fn get_post_processor(&self, py: Python<'_>) -> PyResult<PyObject> { if let Some(n) = self.tokenizer.get_post_processor() { n.get_as_subtype(py) } else { Ok(py.None()) } } /// Set the :class:`~tokenizers.processors.PostProcessor` #[setter] fn set_post_processor(&mut self, processor: Option<PyRef<PyPostProcessor>>) { self.tokenizer .with_post_processor(processor.map(|p| p.clone())); } /// The `optional` :class:`~tokenizers.decoders.Decoder` in use by the Tokenizer #[getter] fn get_decoder(&self, py: Python<'_>) -> PyResult<PyObject> { if let Some(dec) = self.tokenizer.get_decoder() { dec.get_as_subtype(py) } else { Ok(py.None()) } } /// Set the :class:`~tokenizers.decoders.Decoder` #[setter] fn set_decoder(&mut self, decoder: Option<PyRef<PyDecoder>>) { self.tokenizer.with_decoder(decoder.map(|d| d.clone())); } } #[cfg(test)] mod test { use super::*; use crate::models::PyModel; use crate::normalizers::{PyNormalizer, PyNormalizerTypeWrapper}; use std::sync::{Arc, RwLock}; use tempfile::NamedTempFile; use tk::normalizers::{Lowercase, NFKC}; #[test] fn serialize() { let mut tokenizer = Tokenizer::new(PyModel::from(BPE::default())); tokenizer.with_normalizer(Some(PyNormalizer::new(PyNormalizerTypeWrapper::Sequence( vec![ Arc::new(RwLock::new(NFKC.into())), Arc::new(RwLock::new(Lowercase.into())), ], )))); let tmp = NamedTempFile::new().unwrap().into_temp_path(); tokenizer.save(&tmp, false).unwrap(); Tokenizer::from_file(&tmp).unwrap(); } #[test] fn serde_pyo3() { let mut tokenizer = Tokenizer::new(PyModel::from(BPE::default())); tokenizer.with_normalizer(Some(PyNormalizer::new(PyNormalizerTypeWrapper::Sequence( vec![ Arc::new(RwLock::new(NFKC.into())), Arc::new(RwLock::new(Lowercase.into())), ], )))); let output = crate::utils::serde_pyo3::to_string(&tokenizer).unwrap(); assert_eq!(output, "Tokenizer(version=\"1.0\", truncation=None, padding=None, added_tokens=[], normalizer=Sequence(normalizers=[NFKC(), Lowercase()]), pre_tokenizer=None, post_processor=None, decoder=None, model=BPE(dropout=None, unk_token=None, continuing_subword_prefix=None, end_of_word_suffix=None, fuse_unk=False, byte_fallback=False, ignore_merges=False, vocab={}, merges=[]))"); } }
tokenizers/bindings/python/src/tokenizer.rs/0
{ "file_path": "tokenizers/bindings/python/src/tokenizer.rs", "repo_id": "tokenizers", "token_count": 27922 }
329
import json import pickle import pytest from tokenizers.pre_tokenizers import ( BertPreTokenizer, ByteLevel, CharDelimiterSplit, Digits, FixedLength, Metaspace, PreTokenizer, Punctuation, Sequence, Split, UnicodeScripts, Whitespace, WhitespaceSplit, ) class TestByteLevel: def test_instantiate(self): assert ByteLevel() is not None assert ByteLevel(add_prefix_space=True) is not None assert ByteLevel(add_prefix_space=False) is not None assert isinstance(ByteLevel(), PreTokenizer) assert isinstance(ByteLevel(), ByteLevel) assert isinstance(pickle.loads(pickle.dumps(ByteLevel())), ByteLevel) def test_has_alphabet(self): assert isinstance(ByteLevel.alphabet(), list) assert len(ByteLevel.alphabet()) == 256 def test_can_modify(self): pretok = ByteLevel(add_prefix_space=False) assert pretok.add_prefix_space == False # Modify these pretok.add_prefix_space = True assert pretok.add_prefix_space == True def test_manual_reload(self): byte_level = ByteLevel() state = json.loads(byte_level.__getstate__()) reloaded = ByteLevel(**state) assert isinstance(reloaded, ByteLevel) class TestSplit: def test_instantiate(self): pre_tokenizer = Split(pattern=" ", behavior="removed") assert pre_tokenizer is not None assert isinstance(pre_tokenizer, PreTokenizer) assert isinstance(pre_tokenizer, Split) assert isinstance(pickle.loads(pickle.dumps(Split(" ", "removed"))), Split) # test with invert=True pre_tokenizer_with_invert = Split(pattern=" ", behavior="isolated", invert=True) assert pre_tokenizer_with_invert is not None assert isinstance(pre_tokenizer_with_invert, PreTokenizer) assert isinstance(pre_tokenizer_with_invert, Split) assert isinstance(pickle.loads(pickle.dumps(Split(" ", "removed", True))), Split) class TestWhitespace: def test_instantiate(self): assert Whitespace() is not None assert isinstance(Whitespace(), PreTokenizer) assert isinstance(Whitespace(), Whitespace) assert isinstance(pickle.loads(pickle.dumps(Whitespace())), Whitespace) class TestWhitespaceSplit: def test_instantiate(self): assert WhitespaceSplit() is not None assert isinstance(WhitespaceSplit(), PreTokenizer) assert isinstance(WhitespaceSplit(), WhitespaceSplit) assert isinstance(pickle.loads(pickle.dumps(WhitespaceSplit())), WhitespaceSplit) class TestBertPreTokenizer: def test_instantiate(self): assert BertPreTokenizer() is not None assert isinstance(BertPreTokenizer(), PreTokenizer) assert isinstance(BertPreTokenizer(), BertPreTokenizer) assert isinstance(pickle.loads(pickle.dumps(BertPreTokenizer())), BertPreTokenizer) class TestMetaspace: def test_instantiate(self): assert Metaspace() is not None assert Metaspace(replacement="-") is not None with pytest.raises(ValueError, match="expected a string of length 1"): Metaspace(replacement="") assert Metaspace(prepend_scheme="always") is not None assert isinstance(Metaspace(), PreTokenizer) assert isinstance(Metaspace(), Metaspace) assert isinstance(pickle.loads(pickle.dumps(Metaspace())), Metaspace) def test_can_modify(self): pretok = Metaspace(replacement="$", prepend_scheme="never") assert pretok.replacement == "$" assert pretok.prepend_scheme == "never" assert pretok.split == True # Modify these pretok.replacement = "%" assert pretok.replacement == "%" pretok.prepend_scheme = "first" assert pretok.prepend_scheme == "first" pretok.split = True assert pretok.split == True class TestCharDelimiterSplit: def test_instantiate(self): assert CharDelimiterSplit("-") is not None with pytest.raises(ValueError, match="expected a string of length 1"): CharDelimiterSplit("") assert isinstance(CharDelimiterSplit(" "), PreTokenizer) assert isinstance(CharDelimiterSplit(" "), CharDelimiterSplit) assert isinstance(pickle.loads(pickle.dumps(CharDelimiterSplit("-"))), CharDelimiterSplit) def test_can_modify(self): pretok = CharDelimiterSplit("@") assert pretok.delimiter == "@" # Modify these pretok.delimiter = "!" assert pretok.delimiter == "!" class TestPunctuation: def test_instantiate(self): assert Punctuation() is not None assert Punctuation("removed") is not None assert isinstance(Punctuation(), PreTokenizer) assert isinstance(Punctuation(), Punctuation) assert isinstance(pickle.loads(pickle.dumps(Punctuation())), Punctuation) class TestSequence: def test_instantiate(self): assert Sequence([]) is not None assert isinstance(Sequence([]), PreTokenizer) assert isinstance(Sequence([]), Sequence) dumped = pickle.dumps(Sequence([])) assert isinstance(pickle.loads(dumped), Sequence) def test_bert_like(self): pre_tokenizer = Sequence([WhitespaceSplit(), Punctuation()]) assert isinstance(Sequence([]), PreTokenizer) assert isinstance(Sequence([]), Sequence) assert isinstance(pickle.loads(pickle.dumps(pre_tokenizer)), Sequence) result = pre_tokenizer.pre_tokenize_str("Hey friend! How are you?!?") assert result == [ ("Hey", (0, 3)), ("friend", (4, 10)), ("!", (10, 11)), ("How", (16, 19)), ("are", (20, 23)), ("you", (24, 27)), ("?", (27, 28)), ("!", (28, 29)), ("?", (29, 30)), ] def test_set_item(self): pre_tokenizers = Sequence( [ ByteLevel(), Split(pattern="/test/", behavior="removed"), ] ) assert pre_tokenizers[0].__class__ == ByteLevel assert pre_tokenizers[1].__class__ == Split pre_tokenizers[1] = Metaspace() assert pre_tokenizers[1].__class__ == Metaspace with pytest.raises(IndexError): print(pre_tokenizers[2]) def test_item_getters_and_setters(self): pre_tokenizers = Sequence( [ ByteLevel(add_prefix_space=True, trim_offsets=True, use_regex=True), Split(pattern="/test/", behavior="removed", invert=False), Metaspace("a", "never", split=False), CharDelimiterSplit(delimiter=" "), Punctuation(behavior="removed"), Digits(individual_digits=True), ] ) assert pre_tokenizers[0].__class__ == ByteLevel pre_tokenizers[0].add_prefix_space = False pre_tokenizers[0].trim_offsets = False pre_tokenizers[0].use_regex = False assert not pre_tokenizers[0].add_prefix_space assert not pre_tokenizers[0].trim_offsets assert not pre_tokenizers[0].use_regex assert pre_tokenizers[1].__class__ == Split with pytest.raises(Exception): pre_tokenizers[1].pattern = "/pattern/" pre_tokenizers[1].behavior = "isolated" pre_tokenizers[1].invert = True with pytest.raises(Exception): pre_tokenizers[1].pattern assert pre_tokenizers[1].behavior == "isolated" assert pre_tokenizers[1].invert assert pre_tokenizers[2].__class__ == Metaspace pre_tokenizers[2].replacement = " " pre_tokenizers[2].prepend_scheme = "always" pre_tokenizers[2].split = True assert pre_tokenizers[2].replacement == " " assert pre_tokenizers[2].prepend_scheme == "always" assert pre_tokenizers[2].split assert pre_tokenizers[3].__class__ == CharDelimiterSplit pre_tokenizers[3].delimiter = "_" assert pre_tokenizers[3].delimiter == "_" assert pre_tokenizers[4].__class__ == Punctuation pre_tokenizers[4].behavior = "isolated" assert pre_tokenizers[4].behavior == "isolated" assert pre_tokenizers[5].__class__ == Digits pre_tokenizers[5].individual_digits = False assert not pre_tokenizers[5].individual_digits class TestDigits: def test_instantiate(self): assert Digits() is not None assert isinstance(Digits(), PreTokenizer) assert isinstance(Digits(), Digits) assert isinstance(Digits(True), Digits) assert isinstance(Digits(False), Digits) assert isinstance(pickle.loads(pickle.dumps(Digits())), Digits) def test_can_modify(self): pretok = Digits(individual_digits=False) assert pretok.individual_digits == False # Modify these pretok.individual_digits = True assert pretok.individual_digits == True class TestFixedLength: def test_instantiate(self): assert FixedLength() is not None assert isinstance(FixedLength(), PreTokenizer) assert isinstance(FixedLength(), FixedLength) assert isinstance(pickle.loads(pickle.dumps(FixedLength())), FixedLength) def test_pre_tokenize_str(self): pretok = FixedLength(length=5) assert pretok.length == 5 assert pretok.pre_tokenize_str("ATCCTGGTACTG") == [ ("ATCCT", (0, 5)), ("GGTAC", (5, 10)), ("TG", (10, 12)), ] pretok.length = 10 assert pretok.length == 10 assert pretok.pre_tokenize_str("ATCCTGGTACTG") == [ ("ATCCTGGTAC", (0, 10)), ("TG", (10, 12)), ] class TestUnicodeScripts: def test_instantiate(self): assert UnicodeScripts() is not None assert isinstance(UnicodeScripts(), PreTokenizer) assert isinstance(UnicodeScripts(), UnicodeScripts) assert isinstance(pickle.loads(pickle.dumps(UnicodeScripts())), UnicodeScripts) class TestCustomPreTokenizer: class BadCustomPretok: def pre_tokenize(self, pretok, wrong): # This method does not have the right signature: it takes one too many arg pass class GoodCustomPretok: def split(self, n, normalized): # Here we just test that we can return a List[NormalizedString], it # does not really make sense to return twice the same otherwise return [normalized, normalized] def pre_tokenize(self, pretok): pretok.split(self.split) def test_instantiate(self): bad = PreTokenizer.custom(TestCustomPreTokenizer.BadCustomPretok()) good = PreTokenizer.custom(TestCustomPreTokenizer.GoodCustomPretok()) assert isinstance(bad, PreTokenizer) assert isinstance(good, PreTokenizer) with pytest.raises(Exception, match="TypeError:.*pre_tokenize()"): bad.pre_tokenize_str("Hey there!") assert good.pre_tokenize_str("Hey there!") == [ ("Hey there!", (0, 10)), ("Hey there!", (0, 10)), ] def test_camel_case(self): class CamelCasePretok: def get_state(self, c): if c.islower(): return "lower" elif c.isupper(): return "upper" elif c.isdigit(): return "digit" else: return "rest" def split(self, n, normalized): i = 0 # states = {"any", "lower", "upper", "digit", "rest"} state = "any" pieces = [] for j, c in enumerate(normalized.normalized): c_state = self.get_state(c) if state == "any": state = c_state if state != "rest" and state == c_state: pass elif state == "upper" and c_state == "lower": pass else: pieces.append(normalized[i:j]) i = j state = c_state pieces.append(normalized[i:]) return pieces def pre_tokenize(self, pretok): pretok.split(self.split) camel = PreTokenizer.custom(CamelCasePretok()) assert camel.pre_tokenize_str("HeyThere!?-ThisIsLife") == [ ("Hey", (0, 3)), ("There", (3, 8)), ("!", (8, 9)), ("?", (9, 10)), ("-", (10, 11)), ("This", (11, 15)), ("Is", (15, 17)), ("Life", (17, 21)), ]
tokenizers/bindings/python/tests/bindings/test_pre_tokenizers.py/0
{ "file_path": "tokenizers/bindings/python/tests/bindings/test_pre_tokenizers.py", "repo_id": "tokenizers", "token_count": 5762 }
330
# Minimal makefile for Sphinx documentation # # You can set these variables from the command line, and also # from the environment for those with `?=` SPHINXOPTS ?= SPHINXBUILD ?= sphinx-build BUILDDIR ?= build SOURCEDIR = source # Put it first so that "make" without argument is like "make html_all". html_all: @echo "Generating doc for Rust" @$(SPHINXBUILD) -M html "$(SOURCEDIR)" "$(BUILDDIR)/rust" $(SPHINXOPTS) $(O) -t rust @echo "Generating doc for Python" @$(SPHINXBUILD) -M html "$(SOURCEDIR)" "$(BUILDDIR)/python" $(SPHINXOPTS) $(O) -t python @echo "Generating doc for Node.js" @$(SPHINXBUILD) -M html "$(SOURCEDIR)" "$(BUILDDIR)/node" $(SPHINXOPTS) $(O) -t node .PHONY: html_all Makefile # Catch-all target: route all unknown targets to Sphinx using the new # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). %: Makefile @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
tokenizers/docs/Makefile/0
{ "file_path": "tokenizers/docs/Makefile", "repo_id": "tokenizers", "token_count": 393 }
331
<!-- DISABLE-FRONTMATTER-SECTIONS --> # Tokenizers Fast State-of-the-art tokenizers, optimized for both research and production [🤗 Tokenizers](https://github.com/huggingface/tokenizers) provides an implementation of today's most used tokenizers, with a focus on performance and versatility. These tokenizers are also used in [🤗 Transformers](https://github.com/huggingface/transformers). # Main features: - Train new vocabularies and tokenize, using today's most used tokenizers. - Extremely fast (both training and tokenization), thanks to the Rust implementation. Takes less than 20 seconds to tokenize a GB of text on a server's CPU. - Easy to use, but also extremely versatile. - Designed for both research and production. - Full alignment tracking. Even with destructive normalization, it's always possible to get the part of the original sentence that corresponds to any token. - Does all the pre-processing: Truncation, Padding, add the special tokens your model needs.
tokenizers/docs/source-doc-builder/index.mdx/0
{ "file_path": "tokenizers/docs/source-doc-builder/index.mdx", "repo_id": "tokenizers", "token_count": 250 }
332
Input sequences ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ These types represent all the different kinds of sequence that can be used as input of a Tokenizer. Globally, any sequence can be either a string or a list of strings, according to the operating mode of the tokenizer: ``raw text`` vs ``pre-tokenized``. .. autodata:: tokenizers.TextInputSequence .. autodata:: tokenizers.PreTokenizedInputSequence .. autodata:: tokenizers.InputSequence Encode inputs ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ These types represent all the different kinds of input that a :class:`~tokenizers.Tokenizer` accepts when using :meth:`~tokenizers.Tokenizer.encode_batch`. .. autodata:: tokenizers.TextEncodeInput .. autodata:: tokenizers.PreTokenizedEncodeInput .. autodata:: tokenizers.EncodeInput Tokenizer ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. autoclass:: tokenizers.Tokenizer :members: Encoding ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. autoclass:: tokenizers.Encoding :members: Added Tokens ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. autoclass:: tokenizers.AddedToken :members: Models ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. automodule:: tokenizers.models :members: Normalizers ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. automodule:: tokenizers.normalizers :members: Pre-tokenizers ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. automodule:: tokenizers.pre_tokenizers :members: Post-processor ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. automodule:: tokenizers.processors :members: Trainers ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. automodule:: tokenizers.trainers :members: Decoders ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. automodule:: tokenizers.decoders :members: Visualizer ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. autoclass:: tokenizers.tools.Annotation :members: .. autoclass:: tokenizers.tools.EncodingVisualizer :members: __call__
tokenizers/docs/source/api/python.inc/0
{ "file_path": "tokenizers/docs/source/api/python.inc", "repo_id": "tokenizers", "token_count": 562 }
333
pub fn set_panic_hook() { // When the `console_error_panic_hook` feature is enabled, we can call the // `set_panic_hook` function at least once during initialization, and then // we will get better error messages if our code ever panics. // // For more details see // https://github.com/rustwasm/console_error_panic_hook#readme #[cfg(feature = "console_error_panic_hook")] console_error_panic_hook::set_once(); }
tokenizers/tokenizers/examples/unstable_wasm/src/utils.rs/0
{ "file_path": "tokenizers/tokenizers/examples/unstable_wasm/src/utils.rs", "repo_id": "tokenizers", "token_count": 150 }
334
use crate::tokenizer::{Decoder, Result}; use monostate::MustBe; use serde::{Deserialize, Serialize}; #[derive(Deserialize, Clone, Debug, Serialize, Default)] /// ByteFallback is a simple trick which converts tokens looking like `<0x61>` /// to pure bytes, and attempts to make them into a string. If the tokens /// cannot be decoded you will get � instead for each inconvertible byte token #[non_exhaustive] pub struct ByteFallback { #[serde(rename = "type")] type_: MustBe!("ByteFallback"), } impl ByteFallback { pub fn new() -> Self { Self { type_: MustBe!("ByteFallback"), } } } impl Decoder for ByteFallback { fn decode_chain(&self, tokens: Vec<String>) -> Result<Vec<String>> { let mut new_tokens: Vec<String> = vec![]; let mut previous_byte_tokens: Vec<u8> = vec![]; for token in tokens { let bytes = if token.len() == 6 && token.starts_with("<0x") && token.ends_with('>') { u8::from_str_radix(&token[3..5], 16).ok() } else { None }; if let Some(bytes) = bytes { previous_byte_tokens.push(bytes); } else { if !previous_byte_tokens.is_empty() { if let Ok(string) = String::from_utf8(previous_byte_tokens.clone()) { new_tokens.push(string); } else { for _ in 0..previous_byte_tokens.len() { new_tokens.push("�".into()); } } previous_byte_tokens.clear(); } new_tokens.push(token); } } if !previous_byte_tokens.is_empty() { if let Ok(string) = String::from_utf8(previous_byte_tokens.clone()) { new_tokens.push(string); } else { for _ in 0..previous_byte_tokens.len() { new_tokens.push("�".into()); } } } Ok(new_tokens) } } #[cfg(test)] mod tests { use super::*; #[test] fn decode() { let decoder = ByteFallback::new(); let res = decoder .decode_chain(vec!["Hey".into(), "friend!".into()]) .unwrap(); assert_eq!(res, vec!["Hey", "friend!"]); let res = decoder.decode_chain(vec!["<0x61>".into()]).unwrap(); assert_eq!(res, vec!["a"]); let res = decoder.decode_chain(vec!["<0xE5>".into()]).unwrap(); assert_eq!(res, vec!["�"]); let res = decoder .decode_chain(vec!["<0xE5>".into(), "<0x8f>".into()]) .unwrap(); assert_eq!(res, vec!["�", "�"]); // 叫 let res = decoder .decode_chain(vec!["<0xE5>".into(), "<0x8f>".into(), "<0xab>".into()]) .unwrap(); assert_eq!(res, vec!["叫"]); let res = decoder .decode_chain(vec![ "<0xE5>".into(), "<0x8f>".into(), "<0xab>".into(), "a".into(), ]) .unwrap(); assert_eq!(res, vec!["叫", "a"]); let res = decoder .decode_chain(vec!["<0xE5>".into(), "<0x8f>".into(), "a".into()]) .unwrap(); assert_eq!(res, vec!["�", "�", "a"]); } }
tokenizers/tokenizers/src/decoders/byte_fallback.rs/0
{ "file_path": "tokenizers/tokenizers/src/decoders/byte_fallback.rs", "repo_id": "tokenizers", "token_count": 1851 }
335
use super::{ lattice::Lattice, trainer::UnigramTrainer, trie::{Trie, TrieBuilder}, }; use crate::tokenizer::{Model, Result, Token}; use crate::utils::cache::{Cache, MAX_LENGTH}; use std::collections::HashMap; use ahash::AHashMap; use std::convert::TryInto; use std::fs::read_to_string; use std::path::{Path, PathBuf}; type TokenMap = AHashMap<String, u32>; type Vocab = Vec<(String, f64)>; /// A `Unigram` model to encode sentences. pub struct Unigram { token_to_ids: TokenMap, pub(crate) vocab: Vocab, cache: Cache<String, Vec<String>>, trie: Trie<u8>, pub min_score: f64, pub(super) unk_id: Option<usize>, pub(super) bos_id: usize, pub(super) eos_id: usize, fuse_unk: bool, is_optimized: bool, byte_fallback: bool, } impl PartialEq for Unigram { fn eq(&self, other: &Self) -> bool { self.unk_id == other.unk_id && self.vocab == other.vocab } } impl Clone for Unigram { // `Clone` can't be derive because it's not implemented for `Cache`. // To keep things simple when we clone, the new Unigram will start with a fresh cache. fn clone(&self) -> Self { let fresh_cache = self.cache.fresh(); Self { vocab: self.vocab.clone(), cache: fresh_cache, token_to_ids: self.token_to_ids.clone(), trie: self.trie.clone(), min_score: self.min_score, unk_id: self.unk_id, bos_id: self.bos_id, eos_id: self.eos_id, fuse_unk: self.fuse_unk, is_optimized: self.is_optimized, byte_fallback: self.byte_fallback, } } } impl std::fmt::Debug for Unigram { fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result { fmt.debug_struct("Unigram") .field("vocab", &self.vocab.len()) .field("unk_id", &self.unk_id) .field("byte_fallback", &self.byte_fallback) .finish() } } static K_UNK_PENALTY: f64 = 10.0; #[derive(thiserror::Error, Debug)] pub enum UnigramError { #[error("The vocabulary is empty but at least <unk> is needed")] EmptyVocabulary, #[error("The `unk_id` is larger than vocabulary size")] UnkIdNotInVocabulary, #[error("Encountered an unknown token but `unk_id` is missing")] MissingUnkId, } impl Default for Unigram { fn default() -> Self { let vocab = vec![("<unk>".to_string(), 0.0)]; Self::from(vocab, Some(0), false).unwrap() } } impl Unigram { /// Create a `Unigram` model from a given vocabulary. /// Vocabulary are the various tokens and their associated score which is a sort of a logprob of /// their frequency, which will enable tokenization and sampling. /// unk_id, is the index within the vocabulary. /// For now `Unigram` *requires* at least `unk` because we might find a never seen char. /// Further versions might allow that part to be hidden. pub fn from( vocab: Vec<(String, f64)>, unk_id: Option<usize>, byte_fallback: bool, ) -> Result<Self> { let n = vocab.len(); let mut token_to_ids: TokenMap = AHashMap::new(); let mut builder = TrieBuilder::default(); if let Some(unk_id) = unk_id { if vocab.is_empty() { return Err(Box::new(UnigramError::EmptyVocabulary)); } if unk_id >= vocab.len() { return Err(Box::new(UnigramError::UnkIdNotInVocabulary)); } } let bos_id = n + 1; let eos_id = n + 2; let mut min_score = f64::INFINITY; for (id, (token, score)) in vocab.iter().enumerate() { token_to_ids.insert(token.to_string(), id as u32); builder.push(token.as_bytes()); if score < &min_score { min_score = *score; } } let trie = builder.build(); let fuse_unk = true; let is_optimized = true; Ok(Self { vocab, token_to_ids, trie, min_score, bos_id, eos_id, unk_id, fuse_unk, cache: Cache::default(), is_optimized, byte_fallback, }) } #[cfg(test)] pub(super) fn set_fuse_unk(&mut self, fuse_unk: bool) { self.fuse_unk = fuse_unk; self.cache = self.cache.fresh(); } #[cfg(test)] pub(super) fn set_optimized(&mut self, is_optimized: bool) { self.is_optimized = is_optimized; } pub fn byte_fallback(&self) -> bool { self.byte_fallback } pub(super) fn len(&self) -> usize { self.vocab.len() } pub(super) fn populate_nodes(&self, lattice: &mut Lattice) { let unk_score = self.min_score - K_UNK_PENALTY; let len = lattice.len(); let mut begin_pos = 0; while begin_pos < len { let mblen = lattice.sentence[begin_pos..] .chars() .next() .unwrap() .len_utf8(); let mut has_single_node = false; for bytes in self .trie .common_prefix_search(lattice.sentence.bytes().skip(begin_pos)) { let n = bytes.len(); let tok = String::from_utf8(bytes).unwrap(); let id = *self.token_to_ids.get(&tok).unwrap(); let item = &self.vocab[id as usize]; assert_eq!(item.0, tok); let score: f64 = item.1; lattice.insert(begin_pos, n, score, id.try_into().unwrap()); if !has_single_node && n == mblen { has_single_node = true; } } if !has_single_node { if let Some(unk_id) = self.unk_id { lattice.insert(begin_pos, mblen, unk_score, unk_id); } } begin_pos += mblen } } /// This functions take a String, and will encode it in a Vec of Strings, /// of the best tokenization available to the current model. /// ``` /// use tokenizers::models::unigram::Unigram; /// /// let pieces = vec![ /// ("<unk>".to_string(), 0.0), /// ("a".to_string(), 0.0), /// ("b".to_string(), 0.0), /// ("c".to_string(), 0.0), /// ("d".to_string(), 0.0), /// ("cd".to_string(), 1.0), /// ("ab".to_string(), 2.0), /// ("abc".to_string(), 5.0), /// ("abcd".to_string(), 10.0), /// ]; /// let model = Unigram::from(pieces, Some(0), false).unwrap(); /// let result = model.encode("abcdacdxx").unwrap(); /// assert_eq!(result, vec!["abcd", "a", "cd", "xx"]); /// ``` pub fn encode(&self, sentence: &str) -> Result<Vec<String>> { if sentence.is_empty() { return Ok(vec![]); } if let Some(result) = self.cache.get(sentence) { Ok(result.to_vec()) } else { let result = if self.is_optimized { self.encode_optimized(sentence)? } else { self.encode_unoptimized(sentence)? }; if sentence.len() < MAX_LENGTH { self.cache.set(sentence.to_owned(), result.clone()); } Ok(result) } } fn encode_optimized(&self, sentence: &str) -> Result<Vec<String>> { // https://github.com/google/sentencepiece/blob/d48247191a6d50e469ed1a4a36e877befffd1851/src/unigram_model.cc#L600 #[derive(Debug, Clone)] struct BestPathNode { /// The vocab id. (maybe UNK) id: usize, /// The total score of the best path ending at this node. best_path_score: f64, /// The starting position (in utf-8) of this node. The entire best /// path can be constructed by backtracking along this link. starts_at: Option<usize>, } impl Default for BestPathNode { fn default() -> Self { Self { id: 0, best_path_score: 0.0, starts_at: None, } } } let size = sentence.len(); let unk_score = self.min_score - K_UNK_PENALTY; let mut best_path_ends_at = vec![BestPathNode::default(); size + 1]; let mut starts_at = 0; while starts_at < size { let best_path_score_till_here = best_path_ends_at[starts_at].best_path_score; let mut has_single_node = false; let mblen = sentence[starts_at..].chars().next().unwrap().len_utf8(); for tok_bytes in self .trie .common_prefix_search(sentence.bytes().skip(starts_at)) { let key_pos = starts_at + tok_bytes.len(); let token: String = String::from_utf8(tok_bytes).unwrap(); let target_node = &mut best_path_ends_at[key_pos]; let length = key_pos - starts_at; let id = self.token_to_ids.get(&token).unwrap(); let score = self.vocab.get(*id as usize).unwrap().1; let candidate_best_path_score = score + best_path_score_till_here; if target_node.starts_at.is_none() || candidate_best_path_score > target_node.best_path_score { target_node.best_path_score = candidate_best_path_score; target_node.starts_at = Some(starts_at); target_node.id = *id as usize; } if !has_single_node && length == mblen { has_single_node = true; } } if !has_single_node { let target_node = &mut best_path_ends_at[starts_at + mblen]; let candidate_best_path_score = unk_score + best_path_score_till_here; if target_node.starts_at.is_none() || candidate_best_path_score > target_node.best_path_score { target_node.best_path_score = candidate_best_path_score; target_node.starts_at = Some(starts_at); target_node.id = self.unk_id.ok_or(UnigramError::MissingUnkId)?; } } starts_at += mblen } let mut ends_at = size; let mut results: Vec<String> = vec![]; let mut token = vec![]; while ends_at > 0 { let node = &best_path_ends_at[ends_at]; let starts_at = node.starts_at.unwrap(); if self.fuse_unk && Some(node.id) == self.unk_id { token.push(sentence[starts_at..ends_at].to_string()); } else { if !token.is_empty() { token.reverse(); results.push(token.concat()); token = vec![]; } results.push(sentence[starts_at..ends_at].to_string()); } ends_at = starts_at; } if !token.is_empty() { token.reverse(); results.push(token.concat()); } results.reverse(); Ok(results) } fn encode_unoptimized(&self, sentence: &str) -> Result<Vec<String>> { let mut lattice = Lattice::from(sentence, self.bos_id, self.eos_id); self.populate_nodes(&mut lattice); if self.fuse_unk { let mut results = vec![]; let mut token = String::new(); for node in lattice.viterbi().iter() { let item = lattice.piece(&node.borrow()); if node.borrow().id == self.unk_id.ok_or(UnigramError::MissingUnkId)? { token.push_str(&item); } else { if !token.is_empty() { results.push(token); token = String::new(); } results.push(item); } } if !token.is_empty() { results.push(token); } Ok(results) } else { Ok(lattice.tokens()) } } /// Iterate of vocabulary of the model as a pair of `(token, score)`. pub fn iter(&self) -> UnigramIterator { UnigramIterator { model: self, i: 0 } } /// Loads a SentencePiece output model after being trained by tokenizers. /// After that you can use the model with tokenizers library. /// ```no_run /// use tokenizers::models::unigram::Unigram; /// use std::path::Path; /// /// let model = Unigram::load("mymodel-unigram.json").unwrap(); /// ``` pub fn load<P: AsRef<Path>>(path: P) -> Result<Unigram> { let string = read_to_string(path)?; Ok(serde_json::from_str(&string)?) } /// Clears the internal cache pub fn clear_cache(&mut self) { self.cache.clear(); } /// Resize the cache pub fn resize_cache(&mut self, capacity: usize) { self.cache.resize(capacity); } } /// Iterator to iterate of vocabulary of the model, and their relative score. pub struct UnigramIterator<'a> { model: &'a Unigram, i: usize, } impl<'a> Iterator for UnigramIterator<'a> { type Item = &'a (String, f64); fn next(&mut self) -> Option<Self::Item> { let i = self.i; if i < self.model.len() { let r = Some(&self.model.vocab[i]); self.i += 1; r } else { None } } } impl Model for Unigram { type Trainer = UnigramTrainer; fn get_vocab(&self) -> HashMap<String, u32> { self.token_to_ids.clone().into_iter().collect() } fn get_vocab_size(&self) -> usize { self.vocab.len() } fn tokenize(&self, sentence: &str) -> Result<Vec<Token>> { let str_tokens = self.encode(sentence)?; let mut offset = 0; let mut tokens = Vec::with_capacity(str_tokens.len()); for string in str_tokens { let len = string.len(); let offsets = (offset, offset + len); let id: u32 = match self.token_to_ids.get(&string) { Some(id) => *id, None => { if self.byte_fallback { let byte_tokens: Option<Vec<_>> = string .bytes() .map(|byte| -> Option<Token> { let byte_string = format!("<0x{byte:02X}>"); let id = self.token_to_ids.get(&byte_string); id.map(|id| Token::new(*id, byte_string, (offset, offset + len))) }) .collect(); if let Some(byte_tokens) = byte_tokens { for token in byte_tokens { tokens.push(token); } offset += len; continue; } } self.unk_id.ok_or(UnigramError::MissingUnkId)? as u32 } }; offset += len; tokens.push(Token::new(id, string, offsets)); } Ok(tokens) } fn token_to_id(&self, token: &str) -> Option<u32> { self.token_to_ids.get(token).copied() } fn id_to_token(&self, id: u32) -> Option<String> { self.vocab.get(id as usize).map(|item| item.0.clone()) } fn save(&self, folder: &Path, name: Option<&str>) -> Result<Vec<PathBuf>> { let name = match name { Some(name) => format!("{name}-unigram.json"), None => "unigram.json".to_string(), }; let mut fullpath = PathBuf::new(); fullpath.push(folder); fullpath.push(name); let string = serde_json::to_string_pretty(self)?; std::fs::write(&fullpath, string)?; Ok(vec![fullpath]) } fn get_trainer(&self) -> Self::Trainer { UnigramTrainer::default() } } #[cfg(test)] mod tests { use super::*; #[test] fn test_populate_nodes_unk() { let pieces = vec![("<unk>".to_string(), 0.0)]; let model = Unigram::from(pieces, Some(0), false).unwrap(); let mut lattice = Lattice::from("abc", model.bos_id, model.eos_id); model.populate_nodes(&mut lattice); assert_eq!(lattice.begin_nodes[0].len(), 1); assert_eq!(lattice.begin_nodes[1].len(), 1); assert_eq!(lattice.begin_nodes[2].len(), 1); assert_eq!(lattice.begin_nodes[0][0].borrow().id, 0); assert_eq!(lattice.begin_nodes[1][0].borrow().id, 0); assert_eq!(lattice.begin_nodes[2][0].borrow().id, 0); assert_eq!(lattice.begin_nodes[0][0].borrow().node_id, 2); assert_eq!(lattice.begin_nodes[1][0].borrow().node_id, 3); assert_eq!(lattice.begin_nodes[2][0].borrow().node_id, 4); } #[test] fn test_populate_nodes() { let pieces = vec![ ("<unk>".to_string(), 0.0), ("a".to_string(), 0.1), ("b".to_string(), 0.2), ("ab".to_string(), 0.3), ("bc".to_string(), 0.4), ]; let model = Unigram::from(pieces, Some(0), false).unwrap(); let mut lattice = Lattice::from("abc", model.bos_id, model.eos_id); model.populate_nodes(&mut lattice); assert_eq!(lattice.begin_nodes[0].len(), 2); // a, ab assert_eq!(lattice.begin_nodes[1].len(), 2); // b, bc assert_eq!(lattice.begin_nodes[2].len(), 1); // c(unk) // Id is the vocabulary id from Unigram model // node_id is simply the rank of the given node in the lattice. assert_eq!(lattice.begin_nodes[0][0].borrow().id, 1); assert_eq!(lattice.begin_nodes[0][1].borrow().id, 3); assert_eq!(lattice.begin_nodes[1][0].borrow().id, 2); assert_eq!(lattice.begin_nodes[1][1].borrow().id, 4); assert_eq!(lattice.begin_nodes[2][0].borrow().id, 0); assert_eq!(lattice.begin_nodes[0][0].borrow().node_id, 2); assert_eq!(lattice.begin_nodes[0][1].borrow().node_id, 3); assert_eq!(lattice.begin_nodes[1][0].borrow().node_id, 4); assert_eq!(lattice.begin_nodes[1][1].borrow().node_id, 5); assert_eq!(lattice.begin_nodes[2][0].borrow().node_id, 6); } #[test] fn test_encode() { let sentencepieces = vec![ ("<unk>".to_string(), 0.0), ("a".to_string(), 0.0), ("b".to_string(), 0.0), ("c".to_string(), 0.0), ("d".to_string(), 0.0), ("cd".to_string(), 1.0), ("ab".to_string(), 2.0), ("abc".to_string(), 5.0), ("abcd".to_string(), 10.0), ]; let model = Unigram::from(sentencepieces, Some(0), false).unwrap(); let result = model.encode("abcd").unwrap(); assert_eq!(result, vec!["abcd"]); } #[test] fn test_encode2() { let sentencepieces = vec![ ("<unk>".to_string(), 0.0), ("ab".to_string(), 0.0), ("cd".to_string(), -0.1), ("abc".to_string(), -0.2), ("a".to_string(), -0.3), ("b".to_string(), -0.4), ("c".to_string(), -0.5), ("ABC".to_string(), -0.5), ("abcdabcd".to_string(), 20.0), // User defined just max the scores. ("q".to_string(), 20.5), ("r".to_string(), 20.5), ("qr".to_string(), -0.5), ]; let mut model = Unigram::from(sentencepieces, Some(0), false).unwrap(); for is_optimized in &[true, false] { model.set_optimized(*is_optimized); println!("IsOptimized {is_optimized:?}"); assert_eq!(model.encode("abc").unwrap(), vec!["abc"]); assert_eq!(model.encode("AB").unwrap(), vec!["AB"]); model.set_fuse_unk(false); assert_eq!(model.encode("AB").unwrap(), vec!["A", "B"]); model.set_fuse_unk(true); assert_eq!(model.encode("AB").unwrap(), vec!["AB"]); assert_eq!(model.encode("abcd").unwrap(), vec!["ab", "cd"]); assert_eq!(model.encode("abcc").unwrap(), vec!["abc", "c"]); assert_eq!( model.encode("xabcabaabcdd").unwrap(), vec!["x", "abc", "ab", "a", "ab", "cd", "d"] ); model.set_fuse_unk(false); assert_eq!( model.encode("xyz東京").unwrap(), vec!["x", "y", "z", "東", "京"] ); model.set_fuse_unk(true); assert_eq!(model.encode("xyz東京").unwrap(), vec!["xyz東京"]); // User encoded in original version assert_eq!(model.encode("ABC").unwrap(), vec!["ABC"]); assert_eq!(model.encode("abABCcd").unwrap(), vec!["ab", "ABC", "cd"]); assert_eq!( model.encode("ababcdabcdcd").unwrap(), vec!["ab", "abcdabcd", "cd"] ); assert_eq!(model.encode("abqrcd").unwrap(), vec!["ab", "q", "r", "cd"]); } } #[test] fn test_unigram_bytefallback() { // In [97]: processor.encode_as_pieces("⅐⅛⅑ ") // Out[97]: ['▁', '<0xE2>', '<0x85>', '<0x90>', '⅛', '<0xE2>', '<0x85>', '<0x91>', '▁'] let sentencepieces = vec![ ("<unk>".to_string(), 0.0), ("<0xC3>".to_string(), -0.01), ("<0xA9>".to_string(), -0.03), ]; let unigram = Unigram::from(sentencepieces, Some(0), true).unwrap(); let tokens: Vec<Token> = unigram.tokenize("é").unwrap(); assert_eq!( tokens, [ Token { id: 1, value: "<0xC3>".to_string(), offsets: (0, 2) }, Token { id: 2, value: "<0xA9>".to_string(), offsets: (0, 2) } ] ); let tokens = unigram.tokenize("?é").unwrap(); assert_eq!(tokens[0].id, 0); } }
tokenizers/tokenizers/src/models/unigram/model.rs/0
{ "file_path": "tokenizers/tokenizers/src/models/unigram/model.rs", "repo_id": "tokenizers", "token_count": 11856 }
336
use crate::tokenizer::{NormalizedString, Normalizer, Result}; use crate::utils::macro_rules_attribute; use serde::{Deserialize, Serialize}; use unicode_normalization_alignments::char::is_combining_mark; #[derive(Copy, Clone, Debug, Deserialize, Serialize)] #[serde(tag = "type")] #[non_exhaustive] pub struct Strip { pub strip_left: bool, pub strip_right: bool, } impl Strip { pub fn new(strip_left: bool, strip_right: bool) -> Self { Self { strip_left, strip_right, } } } impl Normalizer for Strip { /// Strip the normalized string inplace fn normalize(&self, normalized: &mut NormalizedString) -> Result<()> { if self.strip_left && self.strip_right { // Fast path normalized.strip(); } else { if self.strip_left { normalized.lstrip(); } if self.strip_right { normalized.rstrip(); } } Ok(()) } } // This normalizer removes combining marks from a normalized string // It's different from unidecode as it does not attempt to modify // non ascii languages. #[derive(Copy, Clone, Debug)] #[macro_rules_attribute(impl_serde_type!)] pub struct StripAccents; impl Normalizer for StripAccents { /// Strip the normalized string inplace fn normalize(&self, normalized: &mut NormalizedString) -> Result<()> { normalized.filter(|c| !is_combining_mark(c)); Ok(()) } } #[cfg(test)] mod tests { use super::*; use crate::normalizer::NormalizedString; use crate::normalizers::Lowercase; use crate::normalizers::NFKD; use unicode_normalization_alignments::UnicodeNormalization; #[test] fn test_strip_accents() { // Unicode combining char let original: String = "Me llamó".nfkd().map(|(c, _)| c).collect(); let normalized = "Me llamo"; assert_ne!(original, normalized); let mut n = NormalizedString::from(original); StripAccents.normalize(&mut n).unwrap(); assert_eq!(&n.get(), &normalized); // Ignores regular ascii let original = "Me llamo"; let normalized = "Me llamo"; assert_eq!(original, normalized); let mut n = NormalizedString::from(original); StripAccents.normalize(&mut n).unwrap(); assert_eq!(&n.get(), &normalized); // Does not change chinese let original: String = "这很简单".nfkd().map(|(c, _)| c).collect(); let normalized = "这很简单"; assert_eq!(original, normalized); let mut n = NormalizedString::from(original); StripAccents.normalize(&mut n).unwrap(); assert_eq!(&n.get(), &normalized); } #[test] fn test_vietnamese_bug() { let original: String = "ậ…".to_string(); let normalized = "a...".to_string(); assert_ne!(original, normalized); let mut n = NormalizedString::from(original); NFKD.normalize(&mut n).unwrap(); StripAccents.normalize(&mut n).unwrap(); assert_eq!(&n.get(), &normalized); Lowercase.normalize(&mut n).unwrap(); assert_eq!(&n.get(), &normalized); let original: String = "Cụ thể, bạn sẽ tham gia một nhóm các giám đốc điều hành tổ chức, các nhà lãnh đạo doanh nghiệp, các học giả, chuyên gia phát triển và tình nguyện viên riêng biệt trong lĩnh vực phi lợi nhuận…".to_string(); let normalized = "cu the, ban se tham gia mot nhom cac giam đoc đieu hanh to chuc, cac nha lanh đao doanh nghiep, cac hoc gia, chuyen gia phat trien va tinh nguyen vien rieng biet trong linh vuc phi loi nhuan...".to_string(); let mut n = NormalizedString::from(original); NFKD.normalize(&mut n).unwrap(); StripAccents.normalize(&mut n).unwrap(); Lowercase.normalize(&mut n).unwrap(); assert_eq!(&n.get(), &normalized); } #[test] fn test_thai_bug() { let original = "ำน\u{e49}ำ3ลำ".to_string(); let normalized = "านา3ลา".to_string(); assert_ne!(original, normalized); let mut n = NormalizedString::from(original); NFKD.normalize(&mut n).unwrap(); StripAccents.normalize(&mut n).unwrap(); Lowercase.normalize(&mut n).unwrap(); assert_eq!(&n.get(), &normalized); } #[test] fn test_strip_accents_multiple() { let original = "e\u{304}\u{304}\u{304}o"; let normalized = "eo"; assert_ne!(original, normalized); let mut n = NormalizedString::from(original); StripAccents.normalize(&mut n).unwrap(); assert_eq!(&n.get(), &normalized); assert_eq!( n, NormalizedString::new( original.to_string(), normalized.to_string(), vec![(0, 1), (7, 8)], 0 ) ); assert_eq!( n.alignments_original(), vec![ (0, 1), (1, 1), (1, 1), (1, 1), (1, 1), (1, 1), (1, 1), (1, 2) ] ); } }
tokenizers/tokenizers/src/normalizers/strip.rs/0
{ "file_path": "tokenizers/tokenizers/src/normalizers/strip.rs", "repo_id": "tokenizers", "token_count": 2512 }
337
use std::sync::LazyLock; use regex::Regex; use crate::tokenizer::{ pattern::Invert, PreTokenizedString, PreTokenizer, Result, SplitDelimiterBehavior, }; use crate::utils::macro_rules_attribute; #[derive(Clone, Debug, PartialEq, Eq)] #[macro_rules_attribute(impl_serde_type!)] pub struct Whitespace; impl Default for Whitespace { fn default() -> Self { Self } } impl PreTokenizer for Whitespace { fn pre_tokenize(&self, pretokenized: &mut PreTokenizedString) -> Result<()> { static RE: LazyLock<Regex> = LazyLock::new(|| Regex::new(r"\w+|[^\w\s]+").unwrap()); let re_ref: &Regex = &RE; pretokenized.split(|_, normalized| { normalized.split(Invert(re_ref), SplitDelimiterBehavior::Removed) }) } } #[derive(Copy, Clone, Debug, PartialEq, Eq)] #[macro_rules_attribute(impl_serde_type!)] pub struct WhitespaceSplit; impl PreTokenizer for WhitespaceSplit { fn pre_tokenize(&self, pretokenized: &mut PreTokenizedString) -> Result<()> { pretokenized.split(|_, normalized| { normalized.split(char::is_whitespace, SplitDelimiterBehavior::Removed) }) } } #[cfg(test)] mod tests { use super::*; use crate::{OffsetReferential, OffsetType, PreTokenizer}; #[test] fn basic() { let tests = vec![ ( "Hey man!", vec![("Hey", (0, 3)), ("man", (4, 7)), ("!", (7, 8))], ), ( "How are you doing?", vec![ ("How", (0, 3)), ("are", (4, 7)), ("you", (8, 11)), ("doing", (12, 17)), ("?", (17, 18)), ], ), ("\n", vec![]), ]; let pretok = Whitespace {}; for (s, res) in tests { let mut pretokenized = PreTokenizedString::from(s); pretok.pre_tokenize(&mut pretokenized).unwrap(); assert_eq!( pretokenized .get_splits(OffsetReferential::Original, OffsetType::Byte) .into_iter() .map(|(s, o, _)| (s, o)) .collect::<Vec<_>>(), res ); } } #[test] fn whitespace_split() { let tests = vec![ ("Hey man!", vec![("Hey", (0, 3)), ("man!", (4, 8))]), ( "Hey, man, Good?", vec![("Hey,", (0, 4)), ("man,", (5, 9)), ("Good?", (10, 15))], ), ]; let pretok = WhitespaceSplit; for (s, res) in tests { let mut pretokenized = PreTokenizedString::from(s); pretok.pre_tokenize(&mut pretokenized).unwrap(); assert_eq!( pretokenized .get_splits(OffsetReferential::Original, OffsetType::Byte) .into_iter() .map(|(s, o, _)| (s, o)) .collect::<Vec<_>>(), res ); } } }
tokenizers/tokenizers/src/pre_tokenizers/whitespace.rs/0
{ "file_path": "tokenizers/tokenizers/src/pre_tokenizers/whitespace.rs", "repo_id": "tokenizers", "token_count": 1656 }
338
//! This comes from the Rust libcore and is duplicated here because it is not exported //! (cf <https://github.com/rust-lang/rust/blob/25091ed9b7739e12466fb2490baa1e8a2815121c/src/libcore/iter/adapters/mod.rs#L2664>) //! We are now using the version from <https://stackoverflow.com/questions/44544323/how-to-unzip-a-sequence-of-resulta-b-e-to-a-veca-vecb-and-stop-on-f> //! because the one from the libcore seems to cause overflowing stacks in some cases //! It also contains a lines_with_ending that copies std::io::BufRead but keeps line endings. use std::io::BufRead; pub struct ResultShunt<I, E> { iter: I, error: Option<E>, } impl<I, T, E> ResultShunt<I, E> where I: Iterator<Item = Result<T, E>>, { /// Process the given iterator as if it yielded a `T` instead of a /// `Result<T, _>`. Any errors will stop the inner iterator and /// the overall result will be an error. pub fn process<F, U>(iter: I, mut f: F) -> Result<U, E> where F: FnMut(&mut Self) -> U, { let mut shunt = ResultShunt::new(iter); let value = f(shunt.by_ref()); shunt.reconstruct(value) } fn new(iter: I) -> Self { ResultShunt { iter, error: None } } /// Consume the adapter and rebuild a `Result` value. This should /// *always* be called, otherwise any potential error would be /// lost. fn reconstruct<U>(self, val: U) -> Result<U, E> { match self.error { None => Ok(val), Some(e) => Err(e), } } } impl<I, T, E> Iterator for ResultShunt<I, E> where I: Iterator<Item = Result<T, E>>, { type Item = T; fn next(&mut self) -> Option<Self::Item> { match self.iter.next() { Some(Ok(v)) => Some(v), Some(Err(e)) => { self.error = Some(e); None } None => None, } } } /// Copied from std::io::BufRead but keep newline characters. #[derive(Debug)] pub struct Lines<B> { buf: B, } pub trait LinesWithEnding<B> { fn lines_with_ending(self) -> Lines<B>; } impl<B> LinesWithEnding<B> for B where B: BufRead, { fn lines_with_ending(self) -> Lines<B> { Lines::<B> { buf: self } } } impl<B: BufRead> Iterator for Lines<B> { type Item = std::io::Result<String>; fn next(&mut self) -> Option<Self::Item> { let mut buf = String::new(); match self.buf.read_line(&mut buf) { Ok(0) => None, Ok(_n) => { // if buf.ends_with('\n') { // buf.pop(); // if buf.ends_with('\r') { // buf.pop(); // } // } Some(Ok(buf)) } Err(e) => Some(Err(e)), } } }
tokenizers/tokenizers/src/utils/iter.rs/0
{ "file_path": "tokenizers/tokenizers/src/utils/iter.rs", "repo_id": "tokenizers", "token_count": 1339 }
339
import re README_TEMPLATE = """ <p align="center"> <br/> <picture> <source media="(prefers-color-scheme: dark)" srcset="https://huggingface.co/datasets/Xenova/transformers.js-docs/resolve/main/transformersjs-dark.svg" width="500" style="max-width: 100%;"> <source media="(prefers-color-scheme: light)" srcset="https://huggingface.co/datasets/Xenova/transformers.js-docs/resolve/main/transformersjs-light.svg" width="500" style="max-width: 100%;"> <img alt="transformers.js javascript library logo" src="https://huggingface.co/datasets/Xenova/transformers.js-docs/resolve/main/transformersjs-light.svg" width="500" style="max-width: 100%;"> </picture> <br/> </p> <p align="center"> <a href="https://www.npmjs.com/package/@huggingface/transformers"><img alt="NPM" src="https://img.shields.io/npm/v/@huggingface/transformers"></a> <a href="https://www.npmjs.com/package/@huggingface/transformers"><img alt="NPM Downloads" src="https://img.shields.io/npm/dw/@huggingface/transformers"></a> <a href="https://www.jsdelivr.com/package/npm/@huggingface/transformers"><img alt="jsDelivr Hits" src="https://img.shields.io/jsdelivr/npm/hw/@huggingface/transformers"></a> <a href="https://github.com/huggingface/transformers.js/blob/main/LICENSE"><img alt="License" src="https://img.shields.io/github/license/huggingface/transformers.js?color=blue"></a> <a href="https://huggingface.co/docs/transformers.js/index"><img alt="Documentation" src="https://img.shields.io/website/http/huggingface.co/docs/transformers.js/index.svg?down_color=red&down_message=offline&up_message=online"></a> </p> {intro} ## Installation {installation} ## Quick tour {quick_tour} ## Examples {examples} ## Custom usage {custom_usage} ## Supported tasks/models Here is the list of all tasks and architectures currently supported by Transformers.js. If you don't see your task/model listed here or it is not yet supported, feel free to open up a feature request [here](https://github.com/huggingface/transformers.js/issues/new/choose). To find compatible models on the Hub, select the "transformers.js" library tag in the filter menu (or visit [this link](https://huggingface.co/models?library=transformers.js)). You can refine your search by selecting the task you're interested in (e.g., [text-classification](https://huggingface.co/models?pipeline_tag=text-classification&library=transformers.js)). {tasks} {models} """ FILES_TO_INCLUDE = dict( intro='./docs/snippets/0_introduction.snippet', quick_tour='./docs/snippets/1_quick-tour.snippet', installation='./docs/snippets/2_installation.snippet', examples='./docs/snippets/3_examples.snippet', custom_usage='./docs/snippets/4_custom-usage.snippet', tasks='./docs/snippets/5_supported-tasks.snippet', models='./docs/snippets/6_supported-models.snippet', ) DOCS_BASE_URL = 'https://huggingface.co/docs/transformers.js' # Map of custom links to replace, typically used for links to other sections of the README. CUSTOM_LINK_MAP = { '/custom_usage#convert-your-models-to-onnx': '#convert-your-models-to-onnx', './api/env': DOCS_BASE_URL + '/api/env', './guides/webgpu': DOCS_BASE_URL + '/guides/webgpu', './guides/dtypes': DOCS_BASE_URL + '/guides/dtypes', } def main(): file_data = {} for key, file_path in FILES_TO_INCLUDE.items(): with open(file_path, encoding='utf-8') as f: file_data[key] = f.read() # Fix links: # NOTE: This regex does not match all markdown links, but works for the ones we need to replace. LINK_RE = r'(?<=\])\((.+?)\)' def replace_fn(match): link = match.group(1) if link in CUSTOM_LINK_MAP: link = CUSTOM_LINK_MAP[link] elif link.startswith('/'): # Link to docs link = DOCS_BASE_URL + link elif link.startswith('./'): # Relative link to file pass elif link.startswith('http'): # Link to external site pass return f'({link})' result = README_TEMPLATE.format(**file_data) result = re.sub(LINK_RE, replace_fn, result, count=0, flags=re.MULTILINE) with open('README.md', 'w', encoding='utf-8') as f: f.write(result) if __name__ == '__main__': main()
transformers.js/docs/scripts/build_readme.py/0
{ "file_path": "transformers.js/docs/scripts/build_readme.py", "repo_id": "transformers.js", "token_count": 1760 }
340
# Transformers.js <include> { "path": "../snippets/0_introduction.snippet" } </include> ## Quick tour <include> { "path": "../snippets/1_quick-tour.snippet" } </include> ## Contents The documentation is organized into 4 sections: 1. **GET STARTED** provides a quick tour of the library and installation instructions to get up and running. 2. **TUTORIALS** are a great place to start if you're a beginner! We also include sample applications for you to play around with! 3. **DEVELOPER GUIDES** show you how to use the library to achieve a specific goal. 4. **API REFERENCE** describes all classes and functions, as well as their available parameters and types. ## Examples <include> { "path": "../snippets/3_examples.snippet" } </include> ## Supported tasks/models Here is the list of all tasks and architectures currently supported by Transformers.js. If you don't see your task/model listed here or it is not yet supported, feel free to open up a feature request [here](https://github.com/huggingface/transformers.js/issues/new/choose). To find compatible models on the Hub, select the "transformers.js" library tag in the filter menu (or visit [this link](https://huggingface.co/models?library=transformers.js)). You can refine your search by selecting the task you're interested in (e.g., [text-classification](https://huggingface.co/models?pipeline_tag=text-classification&library=transformers.js)). <include> { "path": "../snippets/5_supported-tasks.snippet" } </include> <include> { "path": "../snippets/6_supported-models.snippet" } </include>
transformers.js/docs/source/index.md/0
{ "file_path": "transformers.js/docs/source/index.md", "repo_id": "transformers.js", "token_count": 495 }
341
import { useState, useRef, useEffect, useCallback } from 'react' import './App.css' const PLACEHOLDER_TEXTS = [ "A panda is a large black-and-white bear native to China.", "The typical life span of a panda is 20 years in the wild.", "A panda's diet consists almost entirely of bamboo.", "Ailuropoda melanoleuca is a bear species endemic to China.", "I love pandas so much!", "Bamboo is a fast-growing, woody grass.", "My favorite movie is Kung Fu Panda.", "I love the color blue.", "Once upon a time, in a land far, far away...", "Hello world.", "This is an example sentence.", ].sort(() => Math.random() - 0.5); function normalize(embedding) { const magnitude = Math.sqrt(embedding.reduce((sum, val) => sum + val * val, 0)); return embedding.map((val) => val / magnitude); } function dot(a, b) { return a.reduce((acc, val, i) => acc + val * b[i], 0); } function App() { const [status, setStatus] = useState('idle'); const [source, setSource] = useState('What is a panda?'); const [text, setText] = useState(PLACEHOLDER_TEXTS.join('\n')); const [dimensions, setDimensions] = useState(768); const [embeddings, setEmbeddings] = useState([]); const [results, setResults] = useState([]); // Create a reference to the worker object. const worker = useRef(null); // We use the `useEffect` hook to setup the worker as soon as the `App` component is mounted. useEffect(() => { if (!worker.current) { // Create the worker if it does not yet exist. worker.current = new Worker(new URL('./worker.js', import.meta.url), { type: 'module' }); } // Create a callback function for messages from the worker thread. const onMessageReceived = (e) => { const status = e.data.status; if (status === 'initiate') { setStatus('loading'); } else if (status === 'ready') { setStatus('ready'); } else if (status === 'complete') { const embeddings = e.data.embeddings; setDimensions(embeddings[0].length); setEmbeddings(embeddings); setStatus('idle'); } }; // Attach the callback function as an event listener. worker.current.addEventListener('message', onMessageReceived); // Define a cleanup function for when the component is unmounted. return () => worker.current.removeEventListener('message', onMessageReceived); }, []); const run = useCallback(() => { setStatus('processing'); worker.current.postMessage({ source, text, }); }, [source, text]) useEffect(() => { if (embeddings.length === 0) return; const slicedEmbeddings = embeddings.map(x => normalize(x.slice(0, dimensions))); const sourceEmbedding = slicedEmbeddings[0]; const sentenceEmbeddings = slicedEmbeddings.slice(1); // Compute the cosine similarity between the source sentence and the other sentences. // NOTE: Since vectors are normalized, we use the dot product. const similarities = sentenceEmbeddings.map((embedding) => dot(sourceEmbedding, embedding)); setResults(text.trim().split('\n').map((sentence, i) => ({ sentence, similarity: similarities[i] })).sort((a, b) => b.similarity - a.similarity)); }, [text, embeddings, dimensions]) const busy = status !== 'idle'; return ( <div className='flex flex-col h-full'> <h1 className='text-2xl md:text-4xl font-bold text-center mb-1'>Adaptive Retrieval w/ Matryoshka Embeddings</h1> <p className='text-lg md:text-xl font-medium text-center mb-2'>Powered by <a href='https://huggingface.co/nomic-ai/nomic-embed-text-v1.5'>Nomic Embed v1.5</a> and <a href='http://huggingface.co/docs/transformers.js'>🤗 Transformers.js</a></p> <div className='flex-grow flex flex-wrap p-4'> <div className='flex flex-col items-center gap-y-1 w-full md:w-1/2'> <label className='text-lg font-medium'>Query</label> <textarea placeholder='Enter source sentence.' className='border w-full p-1 resize-none overflow-hidden h-10' value={source} onChange={e => { setSource(e.target.value); setResults([]); setEmbeddings([]); }} ></textarea> <label className='text-lg font-medium mt-1'>Text</label> <textarea placeholder='Enter sentences to compare with the source sentence. One sentence per line.' className='border w-full p-1 h-full resize-none' value={text} onChange={e => { setText(e.target.value); setResults([]); setEmbeddings([]); }} ></textarea> <button className='border py-1 px-2 bg-blue-400 rounded text-white text-lg font-medium disabled:opacity-50 disabled:cursor-not-allowed' disabled={busy} onClick={run}>{ !busy ? (embeddings.length === 0 ? 'Compute Embeddings' : 'Recompute Embeddings') : status === 'loading' ? 'Model loading...' : 'Processing' }</button> </div> <div className='flex flex-col items-center w-full md:w-1/2 gap-y-1'> {embeddings.length > 0 && (<> <label className='text-lg font-medium'>Dimensions</label> <input type="range" min="64" max="768" step="1" value={dimensions} onChange={e => { setDimensions(e.target.value); }} className="w-[98%] h-[10px]" /> <p className="font-bold text-sm">{dimensions}</p> <div className='w-full flex flex-col gap-y-1'> <label className='text-lg font-medium text-center mt-1'>Results</label> <div className='flex flex-col gap-y-1'> {results.map((result, i) => ( <div key={i} className='flex gap-x-2 border mx-2 p-1'> <span className='font-bold'>{result.similarity.toFixed(3)}</span> <span>{result.sentence}</span> </div> ))} </div> </div> </>) } </div> </div> </div> ) } export default App
transformers.js/examples/adaptive-retrieval/src/App.jsx/0
{ "file_path": "transformers.js/examples/adaptive-retrieval/src/App.jsx", "repo_id": "transformers.js", "token_count": 2829 }
342
@tailwind base; @tailwind components; @tailwind utilities; :root { font-family: Inter, system-ui, Avenir, Helvetica, Arial, sans-serif; line-height: 1.5; font-weight: 400; color-scheme: light dark; color: rgba(255, 255, 255, 0.87); background-color: #242424; font-synthesis: none; text-rendering: optimizeLegibility; -webkit-font-smoothing: antialiased; -moz-osx-font-smoothing: grayscale; -webkit-text-size-adjust: 100%; } a { font-weight: 500; color: #646cff; text-decoration: inherit; } a:hover { color: #535bf2; } body { margin: 0; display: flex; place-items: center; } h1 { font-size: 3.2em; line-height: 1.1; } button { border-radius: 8px; border: 1px solid transparent; padding: 0.6em 1.2em; font-size: 1em; font-weight: 500; font-family: inherit; background-color: #1a1a1a; cursor: pointer; transition: border-color 0.25s; } button:hover { border-color: #646cff; } button:focus, button:focus-visible { outline: 4px auto -webkit-focus-ring-color; } @media (prefers-color-scheme: light) { :root { color: #213547; background-color: #ffffff; } a:hover { color: #747bff; } button { background-color: #f9f9f9; } }
transformers.js/examples/code-completion/src/index.css/0
{ "file_path": "transformers.js/examples/code-completion/src/index.css", "repo_id": "transformers.js", "token_count": 514 }
343
/** @type {import('tailwindcss').Config} */ export default { content: [ "./index.html", "./src/**/*.{js,ts,jsx,tsx}", ], theme: { extend: {}, }, plugins: [], }
transformers.js/examples/cross-encoder/tailwind.config.js/0
{ "file_path": "transformers.js/examples/cross-encoder/tailwind.config.js", "repo_id": "transformers.js", "token_count": 82 }
344
{ "manifest_version": 3, "name": "extension", "description": "Transformers.js | Sample browser extension", "version": "0.0.1", "permissions": [ "activeTab", "scripting", "contextMenus", "storage", "unlimitedStorage" ], "background": { "service_worker": "background.js", "type": "module" }, "content_scripts": [ { "matches": [ "<all_urls>" ], "js": [ "content.js" ] } ], "minimum_chrome_version": "92", "action": { "default_icon": { "16": "icons/icon.png", "24": "icons/icon.png", "32": "icons/icon.png" }, "default_title": "Transformers.js", "default_popup": "popup.html" }, "content_security_policy": { "extension_pages": "script-src 'self' 'wasm-unsafe-eval'" }, "icons": { "16": "icons/icon.png", "48": "icons/icon.png", "128": "icons/icon.png" } }
transformers.js/examples/extension/public/manifest.json/0
{ "file_path": "transformers.js/examples/extension/public/manifest.json", "repo_id": "transformers.js", "token_count": 421 }
345
@tailwind base; @tailwind components; @tailwind utilities; @layer utilities { .scrollbar-thin::-webkit-scrollbar { @apply w-2; } .scrollbar-thin::-webkit-scrollbar-track { @apply rounded-full bg-gray-100 dark:bg-gray-700; } .scrollbar-thin::-webkit-scrollbar-thumb { @apply rounded-full bg-gray-300 dark:bg-gray-600; } .scrollbar-thin::-webkit-scrollbar-thumb:hover { @apply bg-gray-500; } }
transformers.js/examples/florence2-webgpu/src/index.css/0
{ "file_path": "transformers.js/examples/florence2-webgpu/src/index.css", "repo_id": "transformers.js", "token_count": 173 }
346
import { pipeline } from "@huggingface/transformers"; // Use the Singleton pattern to enable lazy construction of the pipeline. class PipelineSingleton { static task = 'text-classification'; static model = 'Xenova/distilbert-base-uncased-finetuned-sst-2-english'; static instance = null; static async getInstance(progress_callback = null) { this.instance ??= pipeline(this.task, this.model, { progress_callback }); return this.instance; } } // Listen for messages from the main thread self.addEventListener('message', async (event) => { // Retrieve the classification pipeline. When called for the first time, // this will load the pipeline and save it for future use. const classifier = await PipelineSingleton.getInstance(x => { // We also add a progress callback to the pipeline so that we can // track model loading. self.postMessage(x); }); // Actually perform the classification const output = await classifier(event.data.text); // Send the output back to the main thread self.postMessage({ status: 'complete', output: output, }); });
transformers.js/examples/next-client/src/app/worker.js/0
{ "file_path": "transformers.js/examples/next-client/src/app/worker.js", "repo_id": "transformers.js", "token_count": 369 }
347
// The full list of languages in FLORES-200 is available here: // https://github.com/facebookresearch/flores/blob/main/flores200/README.md#languages-in-flores-200 const LANGUAGES = { "Acehnese (Arabic script)": "ace_Arab", "Acehnese (Latin script)": "ace_Latn", "Afrikaans": "afr_Latn", "Akan": "aka_Latn", "Amharic": "amh_Ethi", "Armenian": "hye_Armn", "Assamese": "asm_Beng", "Asturian": "ast_Latn", "Awadhi": "awa_Deva", "Ayacucho Quechua": "quy_Latn", "Balinese": "ban_Latn", "Bambara": "bam_Latn", "Banjar (Arabic script)": "bjn_Arab", "Banjar (Latin script)": "bjn_Latn", "Bashkir": "bak_Cyrl", "Basque": "eus_Latn", "Belarusian": "bel_Cyrl", "Bemba": "bem_Latn", "Bengali": "ben_Beng", "Bhojpuri": "bho_Deva", "Bosnian": "bos_Latn", "Buginese": "bug_Latn", "Bulgarian": "bul_Cyrl", "Burmese": "mya_Mymr", "Catalan": "cat_Latn", "Cebuano": "ceb_Latn", "Central Atlas Tamazight": "tzm_Tfng", "Central Aymara": "ayr_Latn", "Central Kanuri (Arabic script)": "knc_Arab", "Central Kanuri (Latin script)": "knc_Latn", "Central Kurdish": "ckb_Arab", "Chhattisgarhi": "hne_Deva", "Chinese (Simplified)": "zho_Hans", "Chinese (Traditional)": "zho_Hant", "Chokwe": "cjk_Latn", "Crimean Tatar": "crh_Latn", "Croatian": "hrv_Latn", "Czech": "ces_Latn", "Danish": "dan_Latn", "Dari": "prs_Arab", "Dutch": "nld_Latn", "Dyula": "dyu_Latn", "Dzongkha": "dzo_Tibt", "Eastern Panjabi": "pan_Guru", "Eastern Yiddish": "ydd_Hebr", "Egyptian Arabic": "arz_Arab", "English": "eng_Latn", "Esperanto": "epo_Latn", "Estonian": "est_Latn", "Ewe": "ewe_Latn", "Faroese": "fao_Latn", "Fijian": "fij_Latn", "Finnish": "fin_Latn", "Fon": "fon_Latn", "French": "fra_Latn", "Friulian": "fur_Latn", "Galician": "glg_Latn", "Ganda": "lug_Latn", "Georgian": "kat_Geor", "German": "deu_Latn", "Greek": "ell_Grek", "Guarani": "grn_Latn", "Gujarati": "guj_Gujr", "Haitian Creole": "hat_Latn", "Halh Mongolian": "khk_Cyrl", "Hausa": "hau_Latn", "Hebrew": "heb_Hebr", "Hindi": "hin_Deva", "Hungarian": "hun_Latn", "Icelandic": "isl_Latn", "Igbo": "ibo_Latn", "Ilocano": "ilo_Latn", "Indonesian": "ind_Latn", "Irish": "gle_Latn", "Italian": "ita_Latn", "Japanese": "jpn_Jpan", "Javanese": "jav_Latn", "Jingpho": "kac_Latn", "Kabiyè": "kbp_Latn", "Kabuverdianu": "kea_Latn", "Kabyle": "kab_Latn", "Kamba": "kam_Latn", "Kannada": "kan_Knda", "Kashmiri (Arabic script)": "kas_Arab", "Kashmiri (Devanagari script)": "kas_Deva", "Kazakh": "kaz_Cyrl", "Khmer": "khm_Khmr", "Kikongo": "kon_Latn", "Kikuyu": "kik_Latn", "Kimbundu": "kmb_Latn", "Kinyarwanda": "kin_Latn", "Korean": "kor_Hang", "Kyrgyz": "kir_Cyrl", "Lao": "lao_Laoo", "Latgalian": "ltg_Latn", "Ligurian": "lij_Latn", "Limburgish": "lim_Latn", "Lingala": "lin_Latn", "Lithuanian": "lit_Latn", "Lombard": "lmo_Latn", "Luba-Kasai": "lua_Latn", "Luo": "luo_Latn", "Luxembourgish": "ltz_Latn", "Macedonian": "mkd_Cyrl", "Magahi": "mag_Deva", "Maithili": "mai_Deva", "Malayalam": "mal_Mlym", "Maltese": "mlt_Latn", "Maori": "mri_Latn", "Marathi": "mar_Deva", "Meitei (Bengali script)": "mni_Beng", "Mesopotamian Arabic": "acm_Arab", "Minangkabau (Arabic script)": "min_Arab", "Minangkabau (Latin script)": "min_Latn", "Mizo": "lus_Latn", "Modern Standard Arabic (Romanized)": "arb_Latn", "Modern Standard Arabic": "arb_Arab", "Moroccan Arabic": "ary_Arab", "Mossi": "mos_Latn", "Najdi Arabic": "ars_Arab", "Nepali": "npi_Deva", "Nigerian Fulfulde": "fuv_Latn", "North Azerbaijani": "azj_Latn", "North Levantine Arabic": "apc_Arab", "Northern Kurdish": "kmr_Latn", "Northern Sotho": "nso_Latn", "Northern Uzbek": "uzn_Latn", "Norwegian Bokmål": "nob_Latn", "Norwegian Nynorsk": "nno_Latn", "Nuer": "nus_Latn", "Nyanja": "nya_Latn", "Occitan": "oci_Latn", "Odia": "ory_Orya", "Pangasinan": "pag_Latn", "Papiamento": "pap_Latn", "Plateau Malagasy": "plt_Latn", "Polish": "pol_Latn", "Portuguese": "por_Latn", "Romanian": "ron_Latn", "Rundi": "run_Latn", "Russian": "rus_Cyrl", "Samoan": "smo_Latn", "Sango": "sag_Latn", "Sanskrit": "san_Deva", "Santali": "sat_Olck", "Sardinian": "srd_Latn", "Scottish Gaelic": "gla_Latn", "Serbian": "srp_Cyrl", "Shan": "shn_Mymr", "Shona": "sna_Latn", "Sicilian": "scn_Latn", "Silesian": "szl_Latn", "Sindhi": "snd_Arab", "Sinhala": "sin_Sinh", "Slovak": "slk_Latn", "Slovenian": "slv_Latn", "Somali": "som_Latn", "South Azerbaijani": "azb_Arab", "South Levantine Arabic": "ajp_Arab", "Southern Pashto": "pbt_Arab", "Southern Sotho": "sot_Latn", "Southwestern Dinka": "dik_Latn", "Spanish": "spa_Latn", "Standard Latvian": "lvs_Latn", "Standard Malay": "zsm_Latn", "Standard Tibetan": "bod_Tibt", "Sundanese": "sun_Latn", "Swahili": "swh_Latn", "Swati": "ssw_Latn", "Swedish": "swe_Latn", "Tagalog": "tgl_Latn", "Tajik": "tgk_Cyrl", "Tamasheq (Latin script)": "taq_Latn", "Tamasheq (Tifinagh script)": "taq_Tfng", "Tamil": "tam_Taml", "Tatar": "tat_Cyrl", "Ta’izzi-Adeni Arabic": "acq_Arab", "Telugu": "tel_Telu", "Thai": "tha_Thai", "Tigrinya": "tir_Ethi", "Tok Pisin": "tpi_Latn", "Tosk Albanian": "als_Latn", "Tsonga": "tso_Latn", "Tswana": "tsn_Latn", "Tumbuka": "tum_Latn", "Tunisian Arabic": "aeb_Arab", "Turkish": "tur_Latn", "Turkmen": "tuk_Latn", "Twi": "twi_Latn", "Ukrainian": "ukr_Cyrl", "Umbundu": "umb_Latn", "Urdu": "urd_Arab", "Uyghur": "uig_Arab", "Venetian": "vec_Latn", "Vietnamese": "vie_Latn", "Waray": "war_Latn", "Welsh": "cym_Latn", "West Central Oromo": "gaz_Latn", "Western Persian": "pes_Arab", "Wolof": "wol_Latn", "Xhosa": "xho_Latn", "Yoruba": "yor_Latn", "Yue Chinese": "yue_Hant", "Zulu": "zul_Latn", } export default function LanguageSelector({ type, onChange, defaultLanguage }) { return ( <div className='language-selector'> <label>{type}: </label> <select onChange={onChange} defaultValue={defaultLanguage}> {Object.entries(LANGUAGES).map(([key, value]) => { return <option key={key} value={value}>{key}</option> })} </select> </div> ) }
transformers.js/examples/react-translator/src/components/LanguageSelector.jsx/0
{ "file_path": "transformers.js/examples/react-translator/src/components/LanguageSelector.jsx", "repo_id": "transformers.js", "token_count": 3102 }
348
// Reference the elements we will use const statusLabel = document.getElementById('status'); const fileUpload = document.getElementById('upload'); const imageContainer = document.getElementById('container'); const example = document.getElementById('example'); const maskCanvas = document.getElementById('mask-output'); const uploadButton = document.getElementById('upload-button'); const resetButton = document.getElementById('reset-image'); const clearButton = document.getElementById('clear-points'); const cutButton = document.getElementById('cut-mask'); // State variables let lastPoints = null; let isEncoded = false; let isDecoding = false; let isMultiMaskMode = false; let modelReady = false; let imageDataURI = null; // Constants const BASE_URL = 'https://huggingface.co/datasets/Xenova/transformers.js-docs/resolve/main/'; const EXAMPLE_URL = BASE_URL + 'corgi.jpg'; // Create a web worker so that the main (UI) thread is not blocked during inference. const worker = new Worker( new URL('./worker.js', import.meta.url), { type: 'module' } ); // Preload star and cross images to avoid lag on first click const star = new Image(); star.src = BASE_URL + 'star-icon.png'; star.className = 'icon'; const cross = new Image(); cross.src = BASE_URL + 'cross-icon.png'; cross.className = 'icon'; // Set up message handler worker.addEventListener('message', (e) => { const { type, data } = e.data; if (type === 'ready') { modelReady = true; statusLabel.textContent = 'Ready'; } else if (type === 'decode_result') { isDecoding = false; if (!isEncoded) { return; // We are not ready to decode yet } if (!isMultiMaskMode && lastPoints) { // Perform decoding with the last point decode(); lastPoints = null; } const { mask, scores } = data; // Update canvas dimensions (if different) if (maskCanvas.width !== mask.width || maskCanvas.height !== mask.height) { maskCanvas.width = mask.width; maskCanvas.height = mask.height; } // Create context and allocate buffer for pixel data const context = maskCanvas.getContext('2d'); const imageData = context.createImageData(maskCanvas.width, maskCanvas.height); // Select best mask const numMasks = scores.length; // 3 let bestIndex = 0; for (let i = 1; i < numMasks; ++i) { if (scores[i] > scores[bestIndex]) { bestIndex = i; } } statusLabel.textContent = `Segment score: ${scores[bestIndex].toFixed(2)}`; // Fill mask with colour const pixelData = imageData.data; for (let i = 0; i < pixelData.length; ++i) { if (mask.data[numMasks * i + bestIndex] === 1) { const offset = 4 * i; pixelData[offset] = 0; // red pixelData[offset + 1] = 114; // green pixelData[offset + 2] = 189; // blue pixelData[offset + 3] = 255; // alpha } } // Draw image data to context context.putImageData(imageData, 0, 0); } else if (type === 'segment_result') { if (data === 'start') { statusLabel.textContent = 'Extracting image embedding...'; } else { statusLabel.textContent = 'Embedding extracted!'; isEncoded = true; } } }); function decode() { isDecoding = true; worker.postMessage({ type: 'decode', data: lastPoints }); } function clearPointsAndMask() { // Reset state isMultiMaskMode = false; lastPoints = null; // Remove points from previous mask (if any) document.querySelectorAll('.icon').forEach(e => e.remove()); // Disable cut button cutButton.disabled = true; // Reset mask canvas maskCanvas.getContext('2d').clearRect(0, 0, maskCanvas.width, maskCanvas.height); } clearButton.addEventListener('click', clearPointsAndMask); resetButton.addEventListener('click', () => { // Update state isEncoded = false; imageDataURI = null; // Indicate to worker that we have reset the state worker.postMessage({ type: 'reset' }); // Clear points and mask (if present) clearPointsAndMask(); // Update UI cutButton.disabled = true; imageContainer.style.backgroundImage = 'none'; uploadButton.style.display = 'flex'; statusLabel.textContent = 'Ready'; }); function segment(data) { // Update state isEncoded = false; if (!modelReady) { statusLabel.textContent = 'Loading model...'; } imageDataURI = data; // Update UI imageContainer.style.backgroundImage = `url(${data})`; uploadButton.style.display = 'none'; cutButton.disabled = true; // Instruct worker to segment the image worker.postMessage({ type: 'segment', data }); } // Handle file selection fileUpload.addEventListener('change', function (e) { const file = e.target.files[0]; if (!file) { return; } const reader = new FileReader(); // Set up a callback when the file is loaded reader.onload = e2 => segment(e2.target.result); reader.readAsDataURL(file); }); example.addEventListener('click', (e) => { e.preventDefault(); segment(EXAMPLE_URL); }); function addIcon({ point, label }) { const icon = (label === 1 ? star : cross).cloneNode(); icon.style.left = `${point[0] * 100}%`; icon.style.top = `${point[1] * 100}%`; imageContainer.appendChild(icon); } // Attach hover event to image container imageContainer.addEventListener('mousedown', e => { if (e.button !== 0 && e.button !== 2) { return; // Ignore other buttons } if (!isEncoded) { return; // Ignore if not encoded yet } if (!isMultiMaskMode) { lastPoints = []; isMultiMaskMode = true; cutButton.disabled = false; } const point = getPoint(e); lastPoints.push(point); // add icon addIcon(point); decode(); }); // Clamp a value inside a range [min, max] function clamp(x, min = 0, max = 1) { return Math.max(Math.min(x, max), min) } function getPoint(e) { // Get bounding box const bb = imageContainer.getBoundingClientRect(); // Get the mouse coordinates relative to the container const mouseX = clamp((e.clientX - bb.left) / bb.width); const mouseY = clamp((e.clientY - bb.top) / bb.height); return { point: [mouseX, mouseY], label: e.button === 2 // right click ? 0 // negative prompt : 1, // positive prompt } } // Do not show context menu on right click imageContainer.addEventListener('contextmenu', e => { e.preventDefault(); }); // Attach hover event to image container imageContainer.addEventListener('mousemove', e => { if (!isEncoded || isMultiMaskMode) { // Ignore mousemove events if the image is not encoded yet, // or we are in multi-mask mode return; } lastPoints = [getPoint(e)]; if (!isDecoding) { decode(); // Only decode if we are not already decoding } }); // Handle cut button click cutButton.addEventListener('click', () => { const [w, h] = [maskCanvas.width, maskCanvas.height]; // Get the mask pixel data const maskContext = maskCanvas.getContext('2d'); const maskPixelData = maskContext.getImageData(0, 0, w, h); // Load the image const image = new Image(); image.crossOrigin = 'anonymous'; image.onload = async () => { // Create a new canvas to hold the image const imageCanvas = new OffscreenCanvas(w, h); const imageContext = imageCanvas.getContext('2d'); imageContext.drawImage(image, 0, 0, w, h); const imagePixelData = imageContext.getImageData(0, 0, w, h); // Create a new canvas to hold the cut-out const cutCanvas = new OffscreenCanvas(w, h); const cutContext = cutCanvas.getContext('2d'); const cutPixelData = cutContext.getImageData(0, 0, w, h); // Copy the image pixel data to the cut canvas for (let i = 3; i < maskPixelData.data.length; i += 4) { if (maskPixelData.data[i] > 0) { for (let j = 0; j < 4; ++j) { const offset = i - j; cutPixelData.data[offset] = imagePixelData.data[offset]; } } } cutContext.putImageData(cutPixelData, 0, 0); // Download image const link = document.createElement('a'); link.download = 'image.png'; link.href = URL.createObjectURL(await cutCanvas.convertToBlob()); link.click(); link.remove(); } image.src = imageDataURI; });
transformers.js/examples/segment-anything-client/index.js/0
{ "file_path": "transformers.js/examples/segment-anything-client/index.js", "repo_id": "transformers.js", "token_count": 3452 }
349
/** @type {import('next').NextConfig} */ const nextConfig = { // (Optional) Export as a static site // See https://nextjs.org/docs/pages/building-your-application/deploying/static-exports#configuration output: 'export', // Feel free to modify/remove this option // Override the default webpack configuration webpack: (config) => { // Ignore node-specific modules when bundling for the browser // See https://webpack.js.org/configuration/resolve/#resolvealias config.resolve.alias = { ...config.resolve.alias, 'sharp$': false, 'onnxruntime-node$': false, } return config; }, }; module.exports = nextConfig;
transformers.js/examples/semantic-image-search-client/next.config.js/0
{ "file_path": "transformers.js/examples/semantic-image-search-client/next.config.js", "repo_id": "transformers.js", "token_count": 269 }
350
SUPABASE_URL=your-project-url SUPABASE_ANON_KEY=your-anon-key SUPABASE_SECRET_KEY=your-secret-key
transformers.js/examples/semantic-image-search/.env.local.example/0
{ "file_path": "transformers.js/examples/semantic-image-search/.env.local.example", "repo_id": "transformers.js", "token_count": 45 }
351
export default function Progress({ text, percentage }) { percentage ??= 0; return ( <div className="relative text-black bg-white rounded-lg text-left overflow-hidden"> <div className='px-2 w-[1%] h-full bg-blue-500 whitespace-nowrap' style={{ width: `${percentage}%` }}> {text} ({`${percentage.toFixed(2)}%`}) </div> </div> ); }
transformers.js/examples/text-to-speech-client/src/components/Progress.jsx/0
{ "file_path": "transformers.js/examples/text-to-speech-client/src/components/Progress.jsx", "repo_id": "transformers.js", "token_count": 144 }
352
import { useCallback, useEffect, useRef, useState } from 'react' import { Token } from './components/Token' import './App.css' // Define list of tokenizers and their corresponding human-readable names const TOKENIZER_OPTIONS = Object.freeze({ 'Xenova/gpt-4': 'gpt-4 / gpt-3.5-turbo / text-embedding-ada-002', 'Xenova/text-davinci-003': 'text-davinci-003 / text-davinci-002', 'Xenova/gpt-3': 'gpt-3', 'Xenova/grok-1-tokenizer': 'Grok-1', 'Xenova/claude-tokenizer': 'Claude', 'Xenova/mistral-tokenizer-v3': 'Mistral v3', 'Xenova/mistral-tokenizer-v1': 'Mistral v1', 'Xenova/gemma-tokenizer': 'Gemma', 'Xenova/llama-3-tokenizer': 'Llama 3', 'Xenova/llama-tokenizer': 'LLaMA / Llama 2', 'Xenova/c4ai-command-r-v01-tokenizer': 'Cohere Command-R', 'Xenova/t5-small': 'T5', 'Xenova/bert-base-cased': 'bert-base-cased', '': 'Custom', }) function App() { // Allow user to set tokenizer and text via URL query parameters const urlParams = new URLSearchParams(window.location.search); const tokenizerParam = urlParams.get('tokenizer'); const textParam = urlParams.get('text'); const [tokenIds, setTokenIds] = useState([]); const [decodedTokens, setDecodedTokens] = useState([]); const [margins, setMargins] = useState([]); const [outputOption, setOutputOption] = useState('text'); const [tokenizer, setTokenizer] = useState(tokenizerParam ?? 'Xenova/gpt-4'); const [customTokenizer, setCustomTokenizer] = useState(''); const textareaRef = useRef(null); const outputRef = useRef(null); // Create a reference to the worker object. const worker = useRef(null); // We use the `useEffect` hook to set up the worker as soon as the `App` component is mounted. useEffect(() => { if (!worker.current) { // Create the worker if it does not yet exist. worker.current = new Worker(new URL('./worker.js', import.meta.url), { type: 'module' }); } // Create a callback function for messages from the worker thread. const onMessageReceived = (e) => { setTokenIds(e.data.token_ids); setDecodedTokens(e.data.decoded); setMargins(e.data.margins); }; // Attach the callback function as an event listener. worker.current.addEventListener('message', onMessageReceived); // Define a cleanup function for when the component is unmounted. return () => worker.current.removeEventListener('message', onMessageReceived); }, []); const resetOutput = useCallback(() => { setOutputOption('text'); setTokenIds([]); setDecodedTokens([]); setMargins([]); }, []); const onInputChange = useCallback((e) => { const model_id = tokenizer; const text = e.target.value; if (text.length > 10000) { setOutputOption(null); console.log('User most likely pasted in a large body of text (> 10k chars), so we hide the output (until specifically requested by the user).'); } worker.current.postMessage({ model_id, text }); }, [tokenizer]); useEffect(() => { if (textParam) { onInputChange({ target: { value: textParam } }); } }, [onInputChange, textParam]); const onTokenizerChange = useCallback((e) => { const model_id = e.target.value; setTokenizer(model_id); if (!model_id) return; worker.current.postMessage({ model_id, text: textareaRef.current.value }); }, []); return ( <div className='w-full max-w-[720px] flex flex-col gap-4 items-center'> <div> <h1 className='text-5xl font-bold mb-2'>The Tokenizer Playground</h1> <h2 className='text-lg font-normal'>Experiment with different tokenizers (running <a className="text-gray-900 underline" href="https://github.com/huggingface/transformers.js">locally</a> in your browser).</h2> </div> <div> <select value={(tokenizer in TOKENIZER_OPTIONS && !customTokenizer) ? tokenizer : ''} onChange={(e) => { resetOutput(); setCustomTokenizer(''); onTokenizerChange(e); }} className="bg-gray-50 border border-gray-300 text-gray-900 text-sm rounded-lg focus:ring-blue-500 focus:border-blue-500 block w-full p-2"> {Object.entries(TOKENIZER_OPTIONS).map(([value, label]) => ( <option key={value} value={value}>{label}</option> ))} </select> {(!(tokenizer in TOKENIZER_OPTIONS) || customTokenizer || tokenizer === '') && ( <input type="text" placeholder="Custom tokenizer" defaultValue={customTokenizer || tokenizer} onChange={(e) => { setCustomTokenizer(e.target.value); onTokenizerChange(e); }} className="bg-white border border-gray-300 text-gray-900 text-sm rounded-lg focus:ring-blue-500 focus:border-blue-500 block w-full py-1 px-2 mt-1" /> )} </div> <textarea ref={textareaRef} onChange={onInputChange} rows="8" className="font-mono text-lg block w-full p-2.5 text-gray-900 bg-gray-50 rounded-lg border border-gray-200" placeholder="Enter some text" defaultValue={textParam ?? textareaRef.current?.value ?? ''} ></textarea> <div className='flex justify-center gap-5'> <div className='flex flex-col'> <h2 className='font-semibold uppercase leading-4'>Tokens</h2> <h3 className='font-semibold text-3xl'>{tokenIds.length.toLocaleString()}</h3> </div> <div className='flex flex-col'> <h2 className='font-semibold uppercase leading-4'>Characters</h2> <h3 className='font-semibold text-3xl'>{(textareaRef.current?.value.length ?? 0).toLocaleString()}</h3> </div> </div> <div ref={outputRef} className='font-mono text-lg p-2.5 w-full bg-gray-100 rounded-lg border border-gray-200 whitespace-pre-wrap text-left h-[200px] overflow-y-auto'> {outputOption === 'text' ? ( decodedTokens.map( (token, index) => <Token key={index} text={token} position={index} margin={margins[index]} /> ) ) : outputOption === 'token_ids' ? ( `[${tokenIds.join(', ')}]` ) : null} </div> <div className="flex items-center gap-2 self-end"> <div className="flex items-center"> <input checked={outputOption === 'text'} onChange={() => setOutputOption('text')} id="output-radio-1" type="radio" value="" name="output-radio" className="w-4 h-4 text-blue-600 bg-gray-100 border-gray-300 focus:ring-blue-500" /> <label htmlFor="output-radio-1" className="ml-1 text-sm font-medium text-gray-900 dark:text-gray-300">Text</label> </div> <div className="flex items-center"> <input checked={outputOption === 'token_ids'} onChange={() => setOutputOption('token_ids')} id="output-radio-2" type="radio" value="" name="output-radio" className="w-4 h-4 text-blue-600 bg-gray-100 border-gray-300 focus:ring-blue-500" /> <label htmlFor="output-radio-2" className="ml-1 text-sm font-medium text-gray-900 dark:text-gray-300">Token IDs</label> </div> <div className="flex items-center"> <input checked={outputOption === null} onChange={() => setOutputOption(null)} id="output-radio-3" type="radio" value="" name="output-radio" className="w-4 h-4 text-blue-600 bg-gray-100 border-gray-300 focus:ring-blue-500" /> <label htmlFor="output-radio-3" className="ml-1 text-sm font-medium text-gray-900 dark:text-gray-300">Hide</label> </div> </div> </div > ) } export default App
transformers.js/examples/tokenizer-playground/src/App.jsx/0
{ "file_path": "transformers.js/examples/tokenizer-playground/src/App.jsx", "repo_id": "transformers.js", "token_count": 3075 }
353
* { box-sizing: border-box; padding: 0; margin: 0; font-family: sans-serif; } html, body { height: 100%; } body { padding: 16px 32px; } body, #container { display: flex; flex-direction: column; justify-content: center; align-items: center; } #controls { display: flex; padding: 1rem; gap: 1rem; } #controls>div { text-align: center; } h1, h4 { text-align: center; } h4 { margin-top: 0.5rem; } #container { position: relative; width: 720px; height: 405px; max-width: 100%; max-height: 100%; border: 2px dashed #D1D5DB; border-radius: 0.75rem; overflow: hidden; margin-top: 1rem; background-size: 100% 100%; background-position: center; background-repeat: no-repeat; } #overlay, canvas { position: absolute; width: 100%; height: 100%; } #status { min-height: 16px; margin: 8px 0; } .bounding-box { position: absolute; box-sizing: border-box; border: solid 2px; } .bounding-box-label { color: white; position: absolute; font-size: 12px; margin: -16px 0 0 -2px; padding: 1px; }
transformers.js/examples/video-object-detection/style.css/0
{ "file_path": "transformers.js/examples/video-object-detection/style.css", "repo_id": "transformers.js", "token_count": 445 }
354
* { box-sizing: border-box; padding: 0; margin: 0; font-family: sans-serif; } html, body { height: 100%; } body { padding: 16px 32px; display: flex; flex-direction: column; justify-content: center; align-items: center; } h1 { text-align: center; } #status { min-height: 16px; margin: 8px 0; text-align: center; } button { transition: all .25s; background: rgba(40, 44, 52, 0.05); border: 1px solid transparent; border-radius: 6px; color: #3080d0; text-decoration: none !important; display: inline-block; font-size: 14px; font-weight: 500; padding: 8px 16px; cursor: pointer; -webkit-user-select: none; -moz-user-select: none; user-select: none; } button:disabled { background: rgba(40, 44, 52, 0.1); color: #a0a0a0; cursor: not-allowed; } button:hover { background: rgba(40, 44, 52, 0.1); } p { text-align: center; font-size: 12px; max-width: 600px; padding: 8px; } #chart-container { position: relative; height: 60vh; width: min(90vw, 800px); padding-right: 50px; margin-bottom: 10px; } details { position: fixed; background-color: white; right: 0; top: 0; padding: 16px; } summary { text-align: right; } hr { margin: 8px 0; }
transformers.js/examples/webgpu-embedding-benchmark/style.css/0
{ "file_path": "transformers.js/examples/webgpu-embedding-benchmark/style.css", "repo_id": "transformers.js", "token_count": 518 }
355
import { useMemo } from "react"; const Chunk = ({ chunk, currentTime, onClick, ...props }) => { const { text, timestamp } = chunk; const [start, end] = timestamp; const bolded = start <= currentTime && currentTime < end; return ( <span {...props}> {text.startsWith(' ') ? " " : ""} <span onClick={onClick} className="text-md text-gray-600 cursor-pointer hover:text-red-600" title={timestamp.map(x => x.toFixed(2)).join(' → ')} style={{ textDecoration: bolded ? 'underline' : 'none', textShadow: bolded ? '0 0 1px #000' : 'none', }} >{text.trim()}</span> </span> ) } const Transcript = ({ transcript, currentTime, setCurrentTime, ...props }) => { const jsonTranscript = useMemo(() => { return JSON.stringify(transcript, null, 2) // post-process the JSON to make it more readable .replace(/( {4}"timestamp": )\[\s+(\S+)\s+(\S+)\s+\]/gm, "$1[$2 $3]"); }, [transcript]); const downloadTranscript = () => { const blob = new Blob([jsonTranscript], { type: 'application/json' }); const url = URL.createObjectURL(blob); const a = document.createElement('a'); a.href = url; a.download = 'transcript.json'; a.click(); URL.revokeObjectURL(url); } return (<> <div {...props}> { transcript.chunks.map((chunk, i) => <Chunk key={i} chunk={chunk} currentTime={currentTime} onClick={e => { setCurrentTime(chunk.timestamp[0]) // Set to start of chunk }} />) } </div> <div className="flex justify-center border-t text-sm text-gray-600 max-h-[150px] overflow-y-auto p-2 scrollbar-thin"> <button className="flex items-center border px-2 py-1 rounded-lg bg-green-400 text-white hover:bg-green-500" onClick={downloadTranscript} > <svg xmlns="http://www.w3.org/2000/svg" fill="none" viewBox="0 0 24 24" strokeWidth={1.5} stroke="currentColor" className="size-6 mr-1"> <path strokeLinecap="round" strokeLinejoin="round" d="M3 16.5v2.25A2.25 2.25 0 0 0 5.25 21h13.5A2.25 2.25 0 0 0 21 18.75V16.5M16.5 12 12 16.5m0 0L7.5 12m4.5 4.5V3" /> </svg> Download transcript </button> </div> </>) }; export default Transcript;
transformers.js/examples/whisper-word-timestamps/src/components/Transcript.jsx/0
{ "file_path": "transformers.js/examples/whisper-word-timestamps/src/components/Transcript.jsx", "repo_id": "transformers.js", "token_count": 1253 }
356