|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| """Image processor class for Qwen2-VL."""
|
|
|
| import math
|
| from typing import Dict, List, Optional, Union
|
|
|
| import numpy as np
|
| import torch
|
| from torch import nn
|
|
|
| from transformers.image_processing_utils import BaseImageProcessor, BatchFeature
|
| from transformers.image_transforms import (
|
| convert_to_rgb,
|
| resize,
|
| to_channel_dimension_format,
|
| )
|
| from .image_utils import (
|
| OPENAI_CLIP_MEAN,
|
| OPENAI_CLIP_STD,
|
| ChannelDimension,
|
| ImageInput,
|
| PILImageResampling,
|
| VideoInput,
|
| get_image_size,
|
| infer_channel_dimension_format,
|
| is_scaled_image,
|
| is_valid_image,
|
| make_list_of_images,
|
| to_numpy_array,
|
| valid_images,
|
| validate_preprocess_arguments,
|
| )
|
| from transformers.utils import TensorType, is_vision_available, logging
|
|
|
|
|
| logger = logging.get_logger(__name__)
|
|
|
|
|
| if is_vision_available():
|
| from PIL import Image
|
|
|
|
|
| def make_batched_images(images) -> List[List[ImageInput]]:
|
| """
|
| Accepts images in list or nested list format, and makes a list of images for preprocessing.
|
|
|
| Args:
|
| images (`Union[List[List[ImageInput]], List[ImageInput], ImageInput]`):
|
| The input image.
|
|
|
| Returns:
|
| list: A list of images.
|
| """
|
| if isinstance(images, (list, tuple)) and isinstance(images[0], (list, tuple)) and is_valid_image(images[0][0]):
|
| return [img for img_list in images for img in img_list]
|
|
|
| elif isinstance(images, (list, tuple)) and is_valid_image(images[0]):
|
| return images
|
|
|
| elif is_valid_image(images):
|
| return [images]
|
|
|
| raise ValueError(f"Could not make batched images from {images}")
|
|
|
|
|
|
|
| def make_batched_videos(videos) -> List[VideoInput]:
|
| if isinstance(videos, (list, tuple)) and isinstance(videos[0], (list, tuple)) and is_valid_image(videos[0][0]):
|
| return videos
|
|
|
| elif isinstance(videos, (list, tuple)) and is_valid_image(videos[0]):
|
| if isinstance(videos[0], Image.Image):
|
| return [videos]
|
| elif len(videos[0].shape) == 4:
|
| return [list(video) for video in videos]
|
|
|
| elif is_valid_image(videos) and len(videos.shape) == 4:
|
| return [list(videos)]
|
|
|
| raise ValueError(f"Could not make batched video from {videos}")
|
|
|
|
|
| def smart_resize(
|
| height: int, width: int, factor: int = 28, min_pixels: int = 56 * 56, max_pixels: int = 14 * 14 * 4 * 1280
|
| ):
|
| """Rescales the image so that the following conditions are met:
|
|
|
| 1. Both dimensions (height and width) are divisible by 'factor'.
|
|
|
| 2. The total number of pixels is within the range ['min_pixels', 'max_pixels'].
|
|
|
| 3. The aspect ratio of the image is maintained as closely as possible.
|
|
|
| """
|
| if height < factor or width < factor:
|
|
|
| if height < width:
|
| h_bar = factor
|
| w_bar = round(width / height * factor)
|
| else:
|
| h_bar = round(height / width * factor)
|
| w_bar = factor
|
|
|
| height, width = h_bar, w_bar
|
|
|
| elif max(height, width) / min(height, width) > 200:
|
| raise ValueError(
|
| f"absolute aspect ratio must be smaller than 200, got {max(height, width) / min(height, width)}"
|
| )
|
| h_bar = round(height / factor) * factor
|
| w_bar = round(width / factor) * factor
|
| if h_bar * w_bar > max_pixels:
|
| beta = math.sqrt((height * width) / max_pixels)
|
| h_bar = math.floor(height / beta / factor) * factor
|
| w_bar = math.floor(width / beta / factor) * factor
|
| elif h_bar * w_bar < min_pixels:
|
| beta = math.sqrt(min_pixels / (height * width))
|
| h_bar = math.ceil(height * beta / factor) * factor
|
| w_bar = math.ceil(width * beta / factor) * factor
|
| return h_bar, w_bar
|
|
|
|
|
| class Qwen2VLImageProcessor(BaseImageProcessor):
|
| r"""
|
| Constructs a Qwen2-VL image processor that dynamically resizes images based on the original images.
|
|
|
| Args:
|
| do_resize (`bool`, *optional*, defaults to `True`):
|
| Whether to resize the image's (height, width) dimensions.
|
| resample (`PILImageResampling`, *optional*, defaults to `Resampling.BICUBIC`):
|
| Resampling filter to use when resizing the image.
|
| do_rescale (`bool`, *optional*, defaults to `True`):
|
| Whether to rescale the image by the specified scale `rescale_factor`.
|
| rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
|
| Scale factor to use if rescaling the image.
|
| do_normalize (`bool`, *optional*, defaults to `True`):
|
| Whether to normalize the image.
|
| image_mean (`float` or `List[float]`, *optional*, defaults to `[0.48145466, 0.4578275, 0.40821073]`):
|
| Mean to use if normalizing the image. This is a float or list of floats for each channel in the image.
|
| image_std (`float` or `List[float]`, *optional*, defaults to `[0.26862954, 0.26130258, 0.27577711]`):
|
| Standard deviation to use if normalizing the image. This is a float or list of floats for each channel in the image.
|
| do_convert_rgb (`bool`, *optional*, defaults to `True`):
|
| Whether to convert the image to RGB.
|
| min_pixels (`int`, *optional*, defaults to `56 * 56`):
|
| The min pixels of the image to resize the image.
|
| max_pixels (`int`, *optional*, defaults to `28 * 28 * 1280`):
|
| The max pixels of the image to resize the image.
|
| patch_size (`int`, *optional*, defaults to 14):
|
| The spacial patch size of the vision encoder.
|
| temporal_patch_size (`int`, *optional*, defaults to 2):
|
| The temporal patch size of the vision encoder.
|
| merge_size (`int`, *optional*, defaults to 2):
|
| The merge size of the vision encoder to llm encoder.
|
| """
|
|
|
| model_input_names = ["pixel_values", "image_grid_thw", "pixel_values_videos", "video_grid_thw"]
|
|
|
| def __init__(
|
| self,
|
| do_resize: bool = True,
|
| resample: PILImageResampling = PILImageResampling.BICUBIC,
|
| do_rescale: bool = True,
|
| rescale_factor: Union[int, float] = 1 / 255,
|
| do_normalize: bool = True,
|
| image_mean: Optional[Union[float, List[float]]] = None,
|
| image_std: Optional[Union[float, List[float]]] = None,
|
| do_convert_rgb: bool = True,
|
| min_pixels: int = 56 * 56,
|
| max_pixels: int = 28 * 28 * 1280,
|
| patch_size: int = 14,
|
| temporal_patch_size: int = 2,
|
| merge_size: int = 2,
|
| shifted_patch_tokenize=False,
|
| **kwargs,
|
| ) -> None:
|
| super().__init__(**kwargs)
|
| self.do_resize = do_resize
|
| self.resample = resample
|
| self.do_rescale = do_rescale
|
| self.rescale_factor = rescale_factor
|
| self.do_normalize = do_normalize
|
| self.image_mean = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
|
| self.image_std = image_std if image_std is not None else OPENAI_CLIP_STD
|
| self.min_pixels = min_pixels
|
| self.max_pixels = max_pixels
|
| self.patch_size = patch_size
|
| self.temporal_patch_size = temporal_patch_size
|
| self.merge_size = merge_size
|
| self.size = {"min_pixels": min_pixels, "max_pixels": max_pixels}
|
| self.do_convert_rgb = do_convert_rgb
|
| self.shifted_patch_tokenize = shifted_patch_tokenize
|
|
|
| def _preprocess(
|
| self,
|
| images: Union[ImageInput, VideoInput],
|
| do_resize: bool = None,
|
| resample: PILImageResampling = None,
|
| do_rescale: bool = None,
|
| rescale_factor: float = None,
|
| do_normalize: bool = None,
|
| image_mean: Optional[Union[float, List[float]]] = None,
|
| image_std: Optional[Union[float, List[float]]] = None,
|
| do_convert_rgb: bool = None,
|
| data_format: Optional[ChannelDimension] = ChannelDimension.FIRST,
|
| input_data_format: Optional[Union[str, ChannelDimension]] = None,
|
| ):
|
| """
|
| Preprocess an image or batch of images. Copy of the `preprocess` method from `CLIPImageProcessor`.
|
|
|
| Args:
|
| images (`ImageInput`):
|
| Image or batch of images to preprocess. Expects pixel values ranging from 0 to 255. If pixel values range from 0 to 1, set `do_rescale=False`.
|
| vision_info (`List[Dict]`, *optional*):
|
| Optional list of dictionaries containing additional information about vision inputs.
|
| do_resize (`bool`, *optional*, defaults to `self.do_resize`):
|
| Whether to resize the image.
|
| resample (`PILImageResampling`, *optional*, defaults to `self.resample`):
|
| Resampling filter to use if resizing the image. This can be one of the `PILImageResampling` enums.
|
| do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
|
| Whether to rescale the image.
|
| rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
|
| Scale factor to use if rescaling the image.
|
| do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
|
| Whether to normalize the image.
|
| image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`):
|
| Mean to use if normalizing the image. Can be a float or a list of floats corresponding to the number of channels in the image.
|
| image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`):
|
| Standard deviation to use if normalizing the image. Can be a float or a list of floats corresponding to the number of channels in the image.
|
| do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`):
|
| Whether to convert the image to RGB.
|
| data_format (`ChannelDimension`, *optional*, defaults to `ChannelDimension.FIRST`):
|
| The channel dimension format for the output image. Can be one of:
|
| - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
|
| - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
|
| - Unset: Use the channel dimension format of the input image.
|
| input_data_format (`ChannelDimension` or `str`, *optional*):
|
| The channel dimension format for the input image. Can be one of:
|
| - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
|
| - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
|
| - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
|
| """
|
|
|
|
|
|
|
|
|
| images = make_list_of_images(images)
|
|
|
| if do_convert_rgb:
|
| images = [convert_to_rgb(image) for image in images]
|
|
|
|
|
| images = [to_numpy_array(image) for image in images]
|
|
|
| if is_scaled_image(images[0]) and do_rescale:
|
| logger.warning_once(
|
| "It looks like you are trying to rescale already rescaled images. If the input"
|
| " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
|
| )
|
| if input_data_format is None:
|
|
|
| input_data_format = infer_channel_dimension_format(images[0])
|
|
|
| height, width = get_image_size(images[0], channel_dim=input_data_format)
|
| resized_height, resized_width = height, width
|
| processed_images = []
|
| for image in images:
|
| if do_resize:
|
| resized_height, resized_width = smart_resize(
|
| height,
|
| width,
|
| factor=self.patch_size * self.merge_size,
|
| min_pixels=self.min_pixels,
|
| max_pixels=self.max_pixels,
|
| )
|
| image = resize(
|
| image, size=(resized_height, resized_width), resample=resample, input_data_format=input_data_format
|
| )
|
|
|
| if do_rescale:
|
| image = self.rescale(image, scale=rescale_factor, input_data_format=input_data_format)
|
|
|
| if do_normalize:
|
| image = self.normalize(
|
| image=image, mean=image_mean, std=image_std, input_data_format=input_data_format
|
| )
|
|
|
| image = to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format)
|
| processed_images.append(image)
|
|
|
| patches = np.array(processed_images)
|
| if data_format == ChannelDimension.LAST:
|
| patches = patches.transpose(0, 3, 1, 2)
|
| if patches.shape[0] == 1:
|
| patches = np.tile(patches, (self.temporal_patch_size, 1, 1, 1))
|
| channel = patches.shape[1]
|
| grid_t = patches.shape[0] // self.temporal_patch_size
|
| grid_h, grid_w = resized_height // self.patch_size, resized_width // self.patch_size
|
| patches = patches.reshape(
|
| grid_t,
|
| self.temporal_patch_size,
|
| channel,
|
| grid_h // self.merge_size,
|
| self.merge_size,
|
| self.patch_size,
|
| grid_w // self.merge_size,
|
| self.merge_size,
|
| self.patch_size,
|
| )
|
| patches = patches.transpose(0, 3, 6, 4, 7, 2, 1, 5, 8)
|
| flatten_patches = patches.reshape(
|
| grid_t * grid_h * grid_w, channel * self.temporal_patch_size * self.patch_size * self.patch_size
|
| )
|
|
|
| return flatten_patches, (grid_t, grid_h, grid_w)
|
|
|
| def preprocess(
|
| self,
|
| images: ImageInput,
|
| videos: VideoInput = None,
|
| do_resize: bool = None,
|
| size: Dict[str, int] = None,
|
| resample: PILImageResampling = None,
|
| do_rescale: bool = None,
|
| rescale_factor: float = None,
|
| do_normalize: bool = None,
|
| image_mean: Optional[Union[float, List[float]]] = None,
|
| image_std: Optional[Union[float, List[float]]] = None,
|
| do_convert_rgb: bool = None,
|
| return_tensors: Optional[Union[str, TensorType]] = None,
|
| data_format: Optional[ChannelDimension] = ChannelDimension.FIRST,
|
| input_data_format: Optional[Union[str, ChannelDimension]] = None,
|
| ):
|
| """
|
| Args:
|
| images (`ImageInput`):
|
| Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
|
| passing in images with pixel values between 0 and 1, set `do_rescale=False`.
|
| videos (`VideoInput`):
|
| Video to preprocess. Expects a single or batch of videos with pixel values ranging from 0 to 255. If
|
| passing in videos with pixel values between 0 and 1, set `do_rescale=False`.
|
| do_resize (`bool`, *optional*, defaults to `self.do_resize`):
|
| Whether to resize the image.
|
| size (`Dict[str, int]`, *optional*, defaults to `self.size`):
|
| Size of the image after resizing. Shortest edge of the image is resized to size["shortest_edge"], with
|
| the longest edge resized to keep the input aspect ratio.
|
| resample (`int`, *optional*, defaults to `self.resample`):
|
| Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`. Only
|
| has an effect if `do_resize` is set to `True`.
|
| do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
|
| Whether to rescale the image.
|
| rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
|
| Rescale factor to rescale the image by if `do_rescale` is set to `True`.
|
| do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
|
| Whether to normalize the image.
|
| image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`):
|
| Image mean to use for normalization. Only has an effect if `do_normalize` is set to `True`.
|
| image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`):
|
| Image standard deviation to use for normalization. Only has an effect if `do_normalize` is set to
|
| `True`.
|
| do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`):
|
| Whether to convert the image to RGB.
|
| return_tensors (`str` or `TensorType`, *optional*):
|
| The type of tensors to return. Can be one of:
|
| - Unset: Return a list of `np.ndarray`.
|
| - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
|
| - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
|
| - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
|
| - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
|
| data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
|
| The channel dimension format for the output image. Can be one of:
|
| - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
|
| - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
|
| - Unset: Use the channel dimension format of the input image.
|
| input_data_format (`ChannelDimension` or `str`, *optional*):
|
| The channel dimension format for the input image. If unset, the channel dimension format is inferred
|
| from the input image. Can be one of:
|
| - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
|
| - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
|
| - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
|
|
|
| """
|
| do_resize = do_resize if do_resize is not None else self.do_resize
|
| size = size if size is not None else self.size
|
| resample = resample if resample is not None else self.resample
|
| do_rescale = do_rescale if do_rescale is not None else self.do_rescale
|
| rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
|
| do_normalize = do_normalize if do_normalize is not None else self.do_normalize
|
| image_mean = image_mean if image_mean is not None else self.image_mean
|
| image_std = image_std if image_std is not None else self.image_std
|
| do_convert_rgb = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
|
|
|
| if images is not None:
|
| images = make_batched_images(images)
|
| if videos is not None:
|
| videos = make_batched_videos(videos)
|
|
|
| if images is not None and not valid_images(images):
|
| raise ValueError(
|
| "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
|
| "torch.Tensor, tf.Tensor or jax.ndarray."
|
| )
|
|
|
| validate_preprocess_arguments(
|
| rescale_factor=rescale_factor,
|
| do_normalize=do_normalize,
|
| image_mean=image_mean,
|
| image_std=image_std,
|
| do_resize=do_resize,
|
| size=size,
|
| resample=resample,
|
| )
|
|
|
| if images is not None:
|
| pixel_values, vision_grid_thws = [], []
|
| for image in images:
|
| patches, image_grid_thw = self._preprocess(
|
| image,
|
| do_resize=do_resize,
|
| resample=resample,
|
| do_rescale=do_rescale,
|
| rescale_factor=rescale_factor,
|
| do_normalize=do_normalize,
|
| image_mean=image_mean,
|
| image_std=image_std,
|
| data_format=data_format,
|
| do_convert_rgb=do_convert_rgb,
|
| input_data_format=input_data_format,
|
| )
|
| pixel_values.extend(patches)
|
| vision_grid_thws.append(image_grid_thw)
|
| pixel_values = np.array(pixel_values)
|
| vision_grid_thws = np.array(vision_grid_thws)
|
| data = {"pixel_values": pixel_values, "image_grid_thw": vision_grid_thws}
|
|
|
| if videos is not None:
|
| pixel_values, vision_grid_thws = [], []
|
| for images in videos:
|
| patches, video_grid_thw = self._preprocess(
|
| images,
|
| do_resize=do_resize,
|
| resample=resample,
|
| do_rescale=do_rescale,
|
| rescale_factor=rescale_factor,
|
| do_normalize=do_normalize,
|
| image_mean=image_mean,
|
| image_std=image_std,
|
| data_format=data_format,
|
| do_convert_rgb=do_convert_rgb,
|
| input_data_format=input_data_format,
|
| )
|
| pixel_values.extend(patches)
|
| vision_grid_thws.append(video_grid_thw)
|
| pixel_values = np.array(pixel_values)
|
| vision_grid_thws = np.array(vision_grid_thws)
|
| data = {"pixel_values_videos": pixel_values, "video_grid_thw": vision_grid_thws}
|
|
|
| return BatchFeature(data=data, tensor_type=return_tensors)
|
|
|
|
|