""" Processor for Gemma3Tiled that handles tokenization with tiled images. This processor generates the correct number of image placeholder tokens based on the tile grid dimensions. """ import re from typing import Optional, Union import torch import numpy as np from transformers.feature_extraction_utils import BatchFeature from transformers.image_utils import ImageInput, make_nested_list_of_images from transformers.processing_utils import ProcessingKwargs, ProcessorMixin, Unpack, ImagesKwargs, MultiModalData from transformers.tokenization_utils_base import PreTokenizedInput, TextInput def calculate_tile_grid( image_height: int, image_width: int, tile_size: int, max_tiles_h: int, max_tiles_w: int, min_tiles: int = 1, ) -> tuple[int, int]: """ Calculate the optimal tile grid dimensions for an image. The strategy is to: 1. Maximize effective resolution (pixels preserved from original image) 2. Minimize wasted canvas space as a tiebreaker """ original_pixels = image_height * image_width best_grid = (1, 1) best_score = float('-inf') for rows in range(1, max_tiles_h + 1): for cols in range(1, max_tiles_w + 1): total_tiles = rows * cols if total_tiles < min_tiles: continue canvas_h = rows * tile_size canvas_w = cols * tile_size scale = min(canvas_w / image_width, canvas_h / image_height) effective = min(image_height * image_width * scale * scale, original_pixels) waste = (canvas_h * canvas_w) - effective score = effective - 0.001 * waste if score > best_score: best_score = score best_grid = (rows, cols) return best_grid class Gemma3TiledImagesKwargs(ImagesKwargs): tile_size: Optional[int] max_tiles_h: Optional[int] max_tiles_w: Optional[int] min_tiles: Optional[int] do_convert_rgb: Optional[bool] class Gemma3TiledProcessorKwargs(ProcessingKwargs, total=False): images_kwargs: Gemma3TiledImagesKwargs _defaults = { "text_kwargs": { "padding": False, "return_mm_token_type_ids": True, }, "images_kwargs": { "do_convert_rgb": True, "tile_size": 896, "max_tiles_h": 4, "max_tiles_w": 4, "min_tiles": 1, }, } class Gemma3TiledProcessor(ProcessorMixin): """ Processor for Gemma3Tiled that handles tokenization with tiled images. The key difference from Gemma3Processor is that instead of a fixed 256 tokens per image, we generate (grid_h * 16) * (grid_w * 16) + (grid_h * 16 - 1) tokens per image, where the extra tokens are for linebreak embeddings. """ attributes = ["image_processor", "tokenizer"] image_processor_class = "AutoImageProcessor" # Use AutoImageProcessor for compatibility tokenizer_class = "AutoTokenizer" _auto_class = "AutoProcessor" # Required for auto_map in processor_config.json def __init__( self, image_processor, tokenizer, chat_template=None, tokens_per_tile: int = 256, # 16x16 = 256 tokens per tile after projection **kwargs, ): self.tokens_per_tile = tokens_per_tile self.tokens_per_tile_side = int(tokens_per_tile ** 0.5) # 16 self.image_token_id = tokenizer.image_token_id self.boi_token = tokenizer.boi_token self.eoi_token = getattr(tokenizer, 'eoi_token', '') # Fallback self.image_token = tokenizer.image_token super().__init__( image_processor=image_processor, tokenizer=tokenizer, chat_template=chat_template, **kwargs, ) def get_num_image_tokens(self, grid_h: int, grid_w: int) -> int: """ Calculate total image tokens needed for a tile grid. For a grid_h x grid_w grid of tiles: - Image tokens: (grid_h * 16) * (grid_w * 16) = grid_h * grid_w * 256 - Linebreak tokens: (grid_h * 16 - 1) = one after each row except the last Total = grid_h * grid_w * 256 + grid_h * 16 - 1 """ rows = grid_h * self.tokens_per_tile_side cols = grid_w * self.tokens_per_tile_side img_tokens = rows * cols linebreak_tokens = rows - 1 return img_tokens + linebreak_tokens def build_image_token_sequence(self, grid_h: int, grid_w: int) -> str: """ Build the image token sequence for a tiled image. Returns a string like: \n\n×(16*grid_w)×(16*grid_w)...(×16*grid_h rows)... Note: We use tokens for BOTH actual image positions AND linebreak positions. The model will replace them with the appropriate embeddings. IMPORTANT: We do NOT add trailing \n\n because when followed by text content that starts with \n, it would create \n\n\n which tokenizes differently and breaks vLLM's placeholder pattern matching. """ rows = grid_h * self.tokens_per_tile_side cols = grid_w * self.tokens_per_tile_side total_tokens = self.get_num_image_tokens(grid_h, grid_w) image_tokens = self.image_token * total_tokens return f"\n\n{self.boi_token}{image_tokens}{self.eoi_token}" def __call__( self, images: Optional[ImageInput] = None, text: Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]] = None, videos=None, audio=None, **kwargs: Unpack[Gemma3TiledProcessorKwargs], ) -> BatchFeature: if text is None and images is None: raise ValueError("Provide at least one of `text` or `images`.") output_kwargs = self._merge_kwargs( Gemma3TiledProcessorKwargs, tokenizer_init_kwargs=self.tokenizer.init_kwargs, **kwargs, ) if isinstance(text, str): text = [text] elif not isinstance(text, list) and not isinstance(text[0], str): raise TypeError("Invalid input text. Please provide a string, or a list of strings") image_inputs = {} if images is not None: # Fetch and preprocess images images_fetched = self.image_processor.fetch_images(images) if hasattr(self.image_processor, 'fetch_images') else images batched_images = make_nested_list_of_images(images_fetched) # Process images to get tiles image_inputs = self.image_processor(images_fetched, **output_kwargs["images_kwargs"]) # Get grid shapes for each image (make a copy to avoid mutating) tile_grid_shapes = list(image_inputs.get("tile_grid_shape", [])) # Create empty text to be replaced with placeholders if not text: text = [" ".join([self.boi_token] * len(imgs)) for imgs in batched_images] if len(batched_images) != len(text): raise ValueError( f"Received inconsistently sized batches of images ({len(batched_images)}) and text ({len(text)})." ) # Build flat list of grid shapes across all batches all_grid_shapes = [] grid_shape_iter = iter(tile_grid_shapes) for imgs in batched_images: for _ in imgs: try: all_grid_shapes.append(next(grid_shape_iter)) except StopIteration: # Fallback to 1x1 grid all_grid_shapes.append((1, 1)) # Replace image tokens with expanded sequences grid_shape_idx = 0 for batch_idx, (prompt, imgs) in enumerate(zip(text, batched_images)): image_indexes = [m.start() for m in re.finditer(re.escape(self.boi_token), prompt)] if len(imgs) != len(image_indexes): raise ValueError( f"Prompt contained {len(image_indexes)} image tokens but received {len(imgs)} images." ) # Get grid shapes for this batch's images (in order) batch_grid_shapes = all_grid_shapes[grid_shape_idx:grid_shape_idx + len(imgs)] grid_shape_idx += len(imgs) # Replace each BOI token with the full image sequence # Iterate in reverse to avoid shifting string indices, but also reverse grid shapes to match for idx, (grid_h, grid_w) in zip(reversed(image_indexes), reversed(batch_grid_shapes)): image_sequence = self.build_image_token_sequence(grid_h, grid_w) prompt = prompt[:idx] + image_sequence + prompt[idx + len(self.boi_token):] text[batch_idx] = prompt return_tensors = output_kwargs["text_kwargs"].pop("return_tensors", None) return_mm_token_type_ids = output_kwargs["text_kwargs"].pop("return_mm_token_type_ids", False) # Get text inputs - let tokenizer handle tensor conversion for text text_inputs = self.tokenizer(text=text, return_tensors=return_tensors, **output_kwargs["text_kwargs"]) # Add token type ids (1 for image tokens, 0 for text) if return_mm_token_type_ids: if return_tensors == "pt": input_ids = text_inputs["input_ids"] mm_token_type_ids = torch.zeros_like(input_ids) mm_token_type_ids[input_ids == self.image_token_id] = 1 text_inputs["token_type_ids"] = mm_token_type_ids else: array_ids = np.array(text_inputs["input_ids"]) mm_token_type_ids = np.zeros_like(array_ids) mm_token_type_ids[array_ids == self.image_token_id] = 1 text_inputs["token_type_ids"] = mm_token_type_ids.tolist() # Combine outputs - DON'T pass tensor_type here because pixel_values # has inhomogeneous shapes (different tile counts per image) return BatchFeature(data={**text_inputs, **image_inputs}) @property def model_input_names(self): tokenizer_input_names = self.tokenizer.model_input_names + ["token_type_ids"] image_processor_input_names = self.image_processor.model_input_names return list(set(tokenizer_input_names + image_processor_input_names)) def _get_num_multimodal_tokens(self, image_sizes=None, **kwargs): """ Computes the number of placeholder tokens needed for multimodal inputs with the given sizes. This is required by vLLM for memory profiling and scheduling. Args: image_sizes (`list[list[int]]`, *optional*): The input sizes formatted as (height, width) per each image. **kwargs: Additional arguments (tile_size, max_tiles_h, max_tiles_w, min_tiles) that override image processor defaults. Returns: `MultiModalData`: A `MultiModalData` object holding number of tokens per each of the provided input modalities, along with other useful data. """ vision_data = {} if image_sizes is not None: # Get tiling parameters from kwargs or fall back to image processor settings tile_size = kwargs.get("tile_size", getattr(self.image_processor, "tile_size", 896)) max_tiles_h = kwargs.get("max_tiles_h", getattr(self.image_processor, "max_tiles_h", 4)) max_tiles_w = kwargs.get("max_tiles_w", getattr(self.image_processor, "max_tiles_w", 4)) min_tiles = kwargs.get("min_tiles", getattr(self.image_processor, "min_tiles", 1)) num_image_tokens = [] num_image_patches = [] for height, width in image_sizes: # Calculate optimal tile grid for this image grid_h, grid_w = calculate_tile_grid( image_height=height, image_width=width, tile_size=tile_size, max_tiles_h=max_tiles_h, max_tiles_w=max_tiles_w, min_tiles=min_tiles, ) # Calculate token count for this grid tokens = self.get_num_image_tokens(grid_h, grid_w) num_image_tokens.append(tokens) # Number of patches = number of tiles num_image_patches.append(grid_h * grid_w) vision_data.update({ "num_image_tokens": num_image_tokens, "num_image_patches": num_image_patches, }) return MultiModalData(**vision_data) __all__ = ["Gemma3TiledProcessor", "Gemma3TiledProcessorKwargs"]