| | import uuid |
| | from typing import List, Optional, Tuple, cast |
| |
|
| | import transformers.image_transforms as image_transforms |
| | import transformers.image_utils as image_utils |
| | import transformers.utils.logging |
| | import transformers.video_utils as video_utils |
| | from PIL.Image import Image |
| | from torch import Tensor |
| | from transformers.feature_extraction_utils import BatchFeature |
| | from transformers.image_processing_utils import BaseImageProcessor |
| | from transformers.image_processing_utils_fast import BaseImageProcessorFast |
| | from transformers.image_utils import ImageInput |
| | from transformers.models.siglip.image_processing_siglip import SiglipImageProcessor |
| | from transformers.models.siglip.image_processing_siglip_fast import SiglipImageProcessorFast |
| | from transformers.processing_utils import ProcessingKwargs, ProcessorMixin, Unpack |
| | from transformers.tokenization_utils import PreTrainedTokenizer |
| | from transformers.tokenization_utils_base import BatchEncoding, PreTrainedTokenizerBase, TextInput |
| | from transformers.video_utils import VideoInput |
| |
|
| | logger = transformers.utils.logging.get_logger(__name__) |
| |
|
| |
|
| | class VILAProcessorProcessingKwargs(ProcessingKwargs, total=False): |
| | _defaults = {} |
| |
|
| |
|
| | class VILAProcessorOutput(BatchFeature): |
| | input_ids: List[List[int]] | Tensor |
| | attention_mask: List[List[int]] | Tensor |
| | pixel_values: Optional[List[Tensor] | Tensor] |
| |
|
| |
|
| | class VILAProcessor(ProcessorMixin): |
| | attributes: List[str] = [ |
| | "image_processor", |
| | "tokenizer", |
| | ] |
| | image_processor_class: str = "AutoImageProcessor" |
| | tokenizer_class: str = "AutoTokenizer" |
| | _auto_class: str = "AutoProcessor" |
| | valid_kwargs: List[str] = [ |
| | "chat_template", |
| | "image_pad_len", |
| | "max_tiles", |
| | "min_tiles", |
| | "video_max_tiles", |
| | ] |
| |
|
| | |
| | image_processor: BaseImageProcessor | BaseImageProcessorFast |
| | tokenizer: PreTrainedTokenizerBase |
| |
|
| | |
| | image_pad_len: int |
| | max_tiles: int |
| | min_tiles: int |
| | video_max_tiles: int |
| |
|
| | def __init__( |
| | self, |
| | image_processor: BaseImageProcessor, |
| | tokenizer: PreTrainedTokenizer, |
| | *, |
| | image_pad_len: int = 121, |
| | max_tiles: int = 12, |
| | min_tiles: int = 1, |
| | video_max_tiles: int = 1, |
| | **kwargs, |
| | ): |
| | super().__init__( |
| | image_processor, |
| | tokenizer, |
| | **kwargs, |
| | ) |
| |
|
| | self.image_pad_len = image_pad_len |
| | self.max_tiles = max_tiles |
| | self.min_tiles = min_tiles |
| | self.video_max_tiles = video_max_tiles |
| |
|
| | def __call__( |
| | self, |
| | text: TextInput | List[TextInput], |
| | images: Optional[ImageInput] = None, |
| | videos: Optional[VideoInput] = None, |
| | **kwargs: Unpack[ProcessingKwargs], |
| | ) -> VILAProcessorOutput: |
| | """Preprocesses inputs for VILA. |
| | |
| | Args: |
| | text: The text to be processed. |
| | images: The images to be processed. |
| | videos: The videos to be processed. |
| | **kwargs: Additional arguments for processing. |
| | |
| | Returns: |
| | The processed inputs that can be fed to the model. |
| | """ |
| |
|
| | merged_kwargs = self._merge_kwargs( |
| | VILAProcessorProcessingKwargs, |
| | tokenizer_init_kwargs=self.tokenizer.init_kwargs, |
| | **kwargs, |
| | ) |
| |
|
| | normalized_text, normalized_images, normalized_videos = self._normalize_inputs( |
| | text=text, |
| | images=images, |
| | videos=videos, |
| | ) |
| |
|
| | preprocessed_text, preprocessed_media_tiles = self._preprocess_inputs( |
| | text=normalized_text, |
| | images=normalized_images, |
| | videos=normalized_videos, |
| | ) |
| |
|
| | text_inputs = self.tokenizer.__call__( |
| | preprocessed_text, |
| | **merged_kwargs["text_kwargs"], |
| | ) |
| |
|
| | if len(preprocessed_media_tiles) > 0: |
| | image_inputs = self.image_processor.__call__( |
| | preprocessed_media_tiles, |
| | **merged_kwargs["images_kwargs"], |
| | ) |
| | else: |
| | image_inputs = BatchFeature() |
| |
|
| | text_inputs = self._replace_image_tile_suffix(text_inputs) |
| |
|
| | return VILAProcessorOutput( |
| | data={ |
| | **text_inputs, |
| | **image_inputs, |
| | } |
| | ) |
| |
|
| | def _find_media_token_order(self, text: List[str]) -> List[str]: |
| | """Finds the order of media tokens in the text. |
| | |
| | Args: |
| | text: The text to be processed. |
| | |
| | Returns: |
| | The order of media tokens in the text. Each item is either an image token or a video |
| | token. |
| | """ |
| |
|
| | image_token = cast(str, self.tokenizer.image_token) |
| | video_token = cast(str, self.tokenizer.video_token) |
| |
|
| | return_order: List[str] = [] |
| |
|
| | for text_item in text: |
| | while image_token in text_item or video_token in text_item: |
| | image_pos = text_item.find(image_token) |
| | video_pos = text_item.find(video_token) |
| |
|
| | if image_pos == -1 and video_pos == -1: |
| | |
| | break |
| |
|
| | elif image_pos == -1: |
| | |
| | return_order.append(video_token) |
| | text_item = text_item[video_pos + len(video_token) :] |
| |
|
| | elif video_pos == -1: |
| | |
| | return_order.append(image_token) |
| | text_item = text_item[image_pos + len(image_token) :] |
| |
|
| | else: |
| | |
| | if image_pos < video_pos: |
| | return_order.append(image_token) |
| | text_item = text_item[image_pos + len(image_token) :] |
| | else: |
| | return_order.append(video_token) |
| | text_item = text_item[video_pos + len(video_token) :] |
| |
|
| | return return_order |
| |
|
| | def _generate_image_token_placeholder(self, text: List[str]) -> str: |
| | while True: |
| | placeholder = f"<|image_placeholder_{str(uuid.uuid4())}|>" |
| | if all(placeholder not in text_item for text_item in text): |
| | return placeholder |
| |
|
| | def _merge_media_tiles( |
| | self, |
| | image_tiles: List[List[Image]], |
| | video_tiles: List[List[List[Image]]], |
| | media_token_order: List[str], |
| | ) -> List[Image]: |
| | """Merges the media tiles by the media token order. |
| | |
| | Args: |
| | image_tiles: The image tiles. |
| | video_tiles: The video tiles. |
| | media_token_order: The order of media tokens in the text. |
| | |
| | Returns: |
| | The merged media tiles. |
| | """ |
| |
|
| | image_token = cast(str, self.tokenizer.image_token) |
| | video_token = cast(str, self.tokenizer.video_token) |
| |
|
| | image_tiles_idx = 0 |
| | video_tiles_idx = 0 |
| |
|
| | return_tiles: List[Image] = [] |
| |
|
| | for media_token in media_token_order: |
| | if media_token == image_token: |
| | return_tiles.extend(image_tiles[image_tiles_idx]) |
| | image_tiles_idx += 1 |
| | elif media_token == video_token: |
| | for video_tile in video_tiles[video_tiles_idx]: |
| | return_tiles.extend(video_tile) |
| | video_tiles_idx += 1 |
| | else: |
| | raise ValueError(f"Invalid media token: {media_token}") |
| |
|
| | return return_tiles |
| |
|
| | def _normalize_inputs( |
| | self, |
| | text: TextInput | List[TextInput], |
| | images: Optional[ImageInput], |
| | videos: Optional[VideoInput], |
| | ) -> Tuple[List[str], List[Image], List[List[Image]]]: |
| | """Normalizes text, image, and video inputs for processing. |
| | |
| | This method converts various input formats into standardized lists of PIL images |
| | and text strings that can be processed by the model. |
| | |
| | Args: |
| | text: The original input text. |
| | images: The original input images. |
| | videos: The original input videos. |
| | |
| | Returns: |
| | The text as a list of strings. |
| | The images as a list of PIL images. |
| | The videos as a list of lists of PIL images. |
| | """ |
| |
|
| | prepared_text = text if isinstance(text, list) else [text] |
| |
|
| | if images is not None: |
| | image_list = cast(List, image_utils.make_flat_list_of_images(images)) |
| | prepared_images = [cast(Image, image_transforms.to_pil_image(image)) for image in image_list] |
| | else: |
| | prepared_images = [] |
| |
|
| | if videos is not None: |
| | video_list = cast(List[List], video_utils.make_batched_videos(videos)) |
| | prepared_videos = [ |
| | [cast(Image, image_transforms.to_pil_image(image)) for image in video] for video in video_list |
| | ] |
| | else: |
| | prepared_videos = [] |
| |
|
| | return prepared_text, prepared_images, prepared_videos |
| |
|
| | def _pad_image_tiles( |
| | self, |
| | text: List[str], |
| | ) -> List[str]: |
| | """Pads each media tile. |
| | |
| | This will pad each <image> to (self.image_pad_len + 1) times. The additional one padding is |
| | for the \\n token suffix. |
| | |
| | Args: |
| | text: The text to be padded. |
| | |
| | Returns: |
| | The padded text. |
| | """ |
| |
|
| | image_token = cast(str, self.tokenizer.image_token) |
| |
|
| | return [text_item.replace(image_token, image_token * (self.image_pad_len + 1)) for text_item in text] |
| |
|
| | def _preprocess_inputs( |
| | self, |
| | text: List[str], |
| | images: List[Image], |
| | videos: List[List[Image]], |
| | ) -> Tuple[List[str], List[Image]]: |
| | """Preprocesses the input data for the VILA model. |
| | |
| | This method takes a list of texts, images, and videos, and prepares them for the model. |
| | It handles the interleaving of text and media, and returns the processed text and a |
| | list of media tiles (images or video frames). |
| | |
| | Args: |
| | text: The input text. |
| | images: The input images. |
| | videos: The input videos. |
| | |
| | Returns: |
| | The text ready to be tokenized. |
| | The media tiles ready to be processed. |
| | """ |
| |
|
| | media_token_order = self._find_media_token_order(text) |
| |
|
| | image_token_placeholder = self._generate_image_token_placeholder(text) |
| |
|
| | preprocessed_text = text |
| | preprocessed_text, preprocessed_image_tiles = self._preprocess_images( |
| | preprocessed_text, |
| | images, |
| | image_token_placeholder=image_token_placeholder, |
| | ) |
| | preprocessed_text, preprocessed_video_tiles = self._preprocess_videos( |
| | preprocessed_text, |
| | videos, |
| | image_token_placeholder=image_token_placeholder, |
| | ) |
| |
|
| | |
| | image_token = cast(str, self.tokenizer.image_token) |
| | preprocessed_text = [text_item.replace(image_token_placeholder, image_token) for text_item in preprocessed_text] |
| |
|
| | preprocessed_text = self._pad_image_tiles(preprocessed_text) |
| |
|
| | preprocessed_media_tiles = self._merge_media_tiles( |
| | preprocessed_image_tiles, |
| | preprocessed_video_tiles, |
| | media_token_order, |
| | ) |
| |
|
| | return preprocessed_text, preprocessed_media_tiles |
| |
|
| | def _preprocess_images( |
| | self, |
| | text: List[str], |
| | images: List[Image], |
| | *, |
| | image_token_placeholder: str, |
| | ) -> Tuple[List[str], List[List[Image]]]: |
| | single_image_token_placeholder = self._generate_image_token_placeholder(text) |
| |
|
| | preprocessed_text = text |
| | preprocessed_image_tiles: List[List[Image]] = [] |
| |
|
| | for image in images: |
| | preprocessed_text, preprocessed_single_image_tiles = self._preprocess_single_image( |
| | text, |
| | image, |
| | image_token_placeholder=single_image_token_placeholder, |
| | is_video_frame=False, |
| | use_dynamic_preprocess=(len(images) == 1), |
| | ) |
| |
|
| | preprocessed_text = [ |
| | text_item.replace( |
| | single_image_token_placeholder, |
| | (image_token_placeholder + "\n") if len(images) == 1 else image_token_placeholder, |
| | ) |
| | for text_item in preprocessed_text |
| | ] |
| |
|
| | preprocessed_image_tiles.append(preprocessed_single_image_tiles) |
| |
|
| | return preprocessed_text, preprocessed_image_tiles |
| |
|
| | def _preprocess_single_image( |
| | self, |
| | text: List[str], |
| | image: Image, |
| | *, |
| | image_token_placeholder: str, |
| | is_video_frame: bool, |
| | use_dynamic_preprocess: bool, |
| | ) -> Tuple[List[str], List[Image]]: |
| | assert isinstance(self.image_processor, (SiglipImageProcessor, SiglipImageProcessorFast)) |
| | assert self.image_processor.size["height"] == self.image_processor.size["width"] |
| | cropped_size = self.image_processor.size["height"] |
| |
|
| | if use_dynamic_preprocess: |
| | if is_video_frame: |
| | max_num = self.video_max_tiles |
| | else: |
| | max_num = self.max_tiles |
| | else: |
| | max_num = 1 |
| |
|
| | image = image.convert("RGB") |
| |
|
| | cropped_images: List[Image] = dynamic_preprocess( |
| | image, |
| | min_num=self.min_tiles, |
| | max_num=max_num, |
| | image_size=cropped_size, |
| | ) |
| |
|
| | image_token = cast(str, self.tokenizer.image_token) |
| |
|
| | for i in range(len(text)): |
| | if image_token in text[i]: |
| | text[i] = text[i].replace(image_token, image_token_placeholder * len(cropped_images)) |
| | break |
| |
|
| | return text, cropped_images |
| |
|
| | def _preprocess_videos( |
| | self, |
| | text: List[str], |
| | videos: List[List[Image]], |
| | *, |
| | image_token_placeholder: str, |
| | ) -> Tuple[List[str], List[List[List[Image]]]]: |
| | image_token = cast(str, self.tokenizer.image_token) |
| | video_token = cast(str, self.tokenizer.video_token) |
| |
|
| | processed_text = text |
| | processed_video_tiles: List[List[List[Image]]] = [] |
| |
|
| | for video in videos: |
| | |
| | for i in range(len(processed_text)): |
| | if video_token in processed_text[i]: |
| | processed_text[i] = processed_text[i].replace(video_token, image_token * len(video)) |
| | break |
| |
|
| | processed_frame_tiles: List[List[Image]] = [] |
| | for frame in video: |
| | processed_text, processed_single_frame_tiles = self._preprocess_single_image( |
| | processed_text, |
| | frame, |
| | image_token_placeholder=image_token_placeholder, |
| | is_video_frame=True, |
| | use_dynamic_preprocess=(self.video_max_tiles > 1), |
| | ) |
| | processed_frame_tiles.append(processed_single_frame_tiles) |
| |
|
| | processed_video_tiles.append(processed_frame_tiles) |
| |
|
| | return processed_text, processed_video_tiles |
| |
|
| | def _replace_image_tile_suffix(self, text_inputs: BatchEncoding) -> BatchEncoding: |
| | lf_token_id = cast(int, self.tokenizer.encode("\n")[0]) |
| | image_token_id = cast(int, self.tokenizer.image_token_id) |
| |
|
| | for i in range(len(text_inputs.input_ids)): |
| | input_ids = text_inputs.input_ids[i] |
| |
|
| | idx = 0 |
| | while idx < len(input_ids): |
| | if input_ids[idx] != image_token_id: |
| | idx += 1 |
| | continue |
| |
|
| | if idx + self.image_pad_len < len(input_ids): |
| | input_ids[idx + self.image_pad_len] = lf_token_id |
| | idx += self.image_pad_len + 1 |
| | else: |
| | break |
| |
|
| | return text_inputs |
| |
|
| |
|
| | def dynamic_preprocess(image: Image, min_num: int, max_num: int, image_size: int, use_thumbnail=True) -> List[Image]: |
| | orig_width, orig_height = image.size |
| | aspect_ratio = orig_width / orig_height |
| |
|
| | |
| | target_ratios = { |
| | (i, j) |
| | for n in range(min_num, max_num + 1) |
| | for i in range(1, n + 1) |
| | for j in range(1, n + 1) |
| | if i * j <= max_num and i * j >= min_num |
| | } |
| | target_ratios = sorted(target_ratios, key=lambda x: x[0] * x[1]) |
| |
|
| | |
| | target_aspect_ratio = find_closest_aspect_ratio(aspect_ratio, target_ratios, orig_width, orig_height, image_size) |
| |
|
| | |
| | target_width = image_size * target_aspect_ratio[0] |
| | target_height = image_size * target_aspect_ratio[1] |
| | blocks = target_aspect_ratio[0] * target_aspect_ratio[1] |
| |
|
| | |
| | resized_img = image.resize((target_width, target_height)) |
| | processed_images = [] |
| | for i in range(blocks): |
| | box = ( |
| | (i % (target_width // image_size)) * image_size, |
| | (i // (target_width // image_size)) * image_size, |
| | ((i % (target_width // image_size)) + 1) * image_size, |
| | ((i // (target_width // image_size)) + 1) * image_size, |
| | ) |
| | |
| | split_img = resized_img.crop(box) |
| | processed_images.append(split_img) |
| | assert len(processed_images) == blocks |
| | if use_thumbnail and len(processed_images) != 1: |
| | thumbnail_img = image.resize((image_size, image_size)) |
| | processed_images.append(thumbnail_img) |
| | return processed_images |
| |
|
| |
|
| | def find_closest_aspect_ratio( |
| | aspect_ratio: float, target_ratios: List[Tuple[int, int]], width: int, height: int, image_size: int |
| | ) -> Tuple[int, int]: |
| | best_ratio_diff = float("inf") |
| | best_ratio = (1, 1) |
| | area = width * height |
| | for ratio in target_ratios: |
| | target_aspect_ratio = ratio[0] / ratio[1] |
| | ratio_diff = abs(aspect_ratio - target_aspect_ratio) |
| | if ratio_diff < best_ratio_diff: |
| | best_ratio_diff = ratio_diff |
| | best_ratio = ratio |
| | elif ratio_diff == best_ratio_diff: |
| | if area > 0.5 * image_size * image_size * ratio[0] * ratio[1]: |
| | best_ratio = ratio |
| | return best_ratio |
| |
|