File size: 10,385 Bytes
73c0556
782af8d
73c0556
 
 
bf6f216
73c0556
bf6f216
782af8d
bf6f216
4e95a1b
782af8d
73c0556
996e0be
bf6f216
4e95a1b
782af8d
 
10d7cc5
 
 
 
602e681
73c0556
 
 
bf6f216
 
782af8d
73c0556
 
 
 
bf6f216
 
 
4e95a1b
 
 
73c0556
4e95a1b
 
 
73c0556
4e95a1b
bf6f216
 
 
 
 
602e681
4e95a1b
 
 
 
bf6f216
 
 
 
 
 
 
782af8d
4e95a1b
 
 
 
 
782af8d
 
 
73c0556
782af8d
 
bf6f216
602e681
5d08481
 
73c0556
 
 
f681f3f
bf6f216
5d08481
bf6f216
 
 
 
73c0556
996e0be
5d08481
73c0556
 
f681f3f
 
5d08481
f681f3f
5d08481
f681f3f
73c0556
f681f3f
73c0556
 
5d08481
 
 
 
 
 
 
73c0556
f681f3f
 
bf6f216
73c0556
f681f3f
bf6f216
 
602e681
bf6f216
 
f681f3f
bf6f216
 
f681f3f
5d08481
 
 
 
 
10d7cc5
5d08481
 
10d7cc5
5d08481
 
 
 
 
 
 
 
 
 
4e95a1b
5d08481
 
 
 
 
 
 
 
 
f681f3f
73c0556
f681f3f
5d08481
 
 
 
 
 
 
 
 
 
 
 
 
4e95a1b
5d08481
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
10d7cc5
5d08481
4e95a1b
5d08481
 
 
 
 
4e95a1b
5d08481
 
 
4e95a1b
5d08481
 
 
 
 
 
 
 
73c0556
4e95a1b
5d08481
 
 
 
 
 
 
 
73c0556
 
5d08481
 
10d7cc5
5d08481
4e95a1b
5d08481
 
 
 
 
4e95a1b
73c0556
f681f3f
4e95a1b
5d08481
 
 
 
73c0556
 
 
 
996e0be
73c0556
5d08481
 
73c0556
 
 
 
5d08481
 
73c0556
 
 
 
10d7cc5
73c0556
 
 
 
 
 
5d08481
4e95a1b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
from typing import List, Optional, Tuple, Unpack, cast

import numpy as np
import transformers.image_transforms as image_transforms
import transformers.image_utils as image_utils
from numpy.typing import NDArray
from PIL.Image import Image
from torch import Tensor
from transformers.feature_extraction_utils import BatchFeature
from transformers.image_processing_utils import BaseImageProcessor
from transformers.image_processing_utils_fast import BaseImageProcessorFast
from transformers.image_utils import ImageInput, VideoInput
from transformers.models.siglip.image_processing_siglip import SiglipImageProcessor
from transformers.processing_utils import ProcessingKwargs, ProcessorMixin
from transformers.tokenization_utils import PreTrainedTokenizer
from transformers.tokenization_utils_base import PreTrainedTokenizerBase, TextInput


class VILAProcessorKwargs(ProcessingKwargs, total=False):
    _defaults = {}  # type: ignore


class VILAProcessorOutput(BatchFeature):
    input_ids: List[List[int]] | NDArray[np.int64] | Tensor
    attention_mask: List[List[int]] | NDArray[np.int64] | Tensor
    pixel_values: Optional[List[NDArray[np.float32]] | NDArray[np.float32] | Tensor]


class VILAProcessor(ProcessorMixin):
    attributes: List[str] = [
        "image_processor",
        "tokenizer",
    ]
    image_processor_class: str = "AutoImageProcessor"
    tokenizer_class: str = "AutoTokenizer"

    # Attributes.
    image_processor: BaseImageProcessor | BaseImageProcessorFast
    tokenizer: PreTrainedTokenizerBase

    # Configuration parameters.
    image_pad_len: int
    image_token: str
    max_tiles: int
    min_tiles: int

    def __init__(
        self,
        image_processor: BaseImageProcessor,
        tokenizer: PreTrainedTokenizer,
        *,
        image_pad_len: int,
        image_token: str,
        max_tiles: int,
        min_tiles: int,
        **kwargs,
    ):
        super().__init__(
            image_processor,
            tokenizer,
            **kwargs,
        )

        self.image_pad_len = image_pad_len
        self.image_token = image_token
        self.max_tiles = max_tiles
        self.min_tiles = min_tiles

    def __call__(
        self,
        images: Optional[ImageInput] = None,
        text: Optional[TextInput | List[TextInput]] = None,
        audio: None = None,
        videos: Optional[VideoInput] = None,
        **kwargs: Unpack[VILAProcessorKwargs],
    ) -> VILAProcessorOutput:
        # Validate arguments.
        assert text is not None and text != [], "text must be provided"
        assert not kwargs.get(
            "is_split_into_words", False
        ), "is_split_into_words=True is not supported"

        output_kwargs = self._merge_kwargs(
            VILAProcessorKwargs,
            tokenizer_init_kwargs=self.tokenizer.init_kwargs,
            **kwargs,
        )

        # Process images.
        if images is not None and images != []:
            image_inputs, num_cropped_images = self._process_images(
                images=images,
                **output_kwargs["images_kwargs"],
            )
        else:
            # If no images are provided, do not define pixel_values.
            image_inputs = BatchFeature()
            num_cropped_images = []

        # TODO: video processing.

        # Process text.
        text = text if isinstance(text, list) else [text]

        text = self._pad_image_tokens_by_num_crops(
            text,
            num_cropped_images=num_cropped_images,
        )

        text = self._pad_image_tokens_by_num_embeddings(
            text,
        )

        text_inputs = self.tokenizer.__call__(
            text,
            **output_kwargs["text_kwargs"],
        )

        return VILAProcessorOutput(
            data={
                **text_inputs,
                **image_inputs,
            }
        )

    def _crop_image(
        self,
        image: Image,
    ) -> List[Image]:
        """Crops the image into multiple tiles.

        Args:
            image: The image to be cropped.

        Returns:
            The cropped images.
        """

        # TODO: Support more image processors.
        assert isinstance(self.image_processor, SiglipImageProcessor)

        assert self.image_processor.size["height"] == self.image_processor.size["width"]
        cropped_size = self.image_processor.size["height"]

        cropped_images: List[Image] = dynamic_preprocess(
            image,
            min_num=self.min_tiles,
            max_num=self.max_tiles,
            image_size=cropped_size,
        )

        return cropped_images

    def _pad_image_tokens_by_num_crops(
        self,
        text: List[TextInput],
        *,
        num_cropped_images: List[int],
    ) -> List[TextInput]:
        """Pads each <image> to num_cropped_images of "<image>\n\n".

        Args:
            text: The text to be padded.
            num_cropped_images: The number of cropped images for each image token.

        Returns:
            The padded text.
        """
        # Validate arguments.
        num_images = len(num_cropped_images)
        num_image_tokens = sum([item.count(self.image_token) for item in text])
        assert num_images == num_image_tokens, (
            f"Number of image tokens ({num_image_tokens}) in text does not match "
            f"the number of images ({num_images})."
        )

        assert all(
            image_pad_len > 0 for image_pad_len in num_cropped_images
        ), "All image padding lengths should be positive integers."

        # Pad image tokens.
        image_idx = 0
        padded_text: List[TextInput] = []

        for i in range(len(text)):
            padded_text_item = ""
            remaining_text = text[i]

            while True:
                token_pos = remaining_text.find(self.image_token)
                if token_pos == -1:
                    padded_text_item += remaining_text
                    break

                padded_text_item += remaining_text[:token_pos] + (
                    (self.image_token + "\n") * num_cropped_images[image_idx]
                )

                image_idx += 1
                remaining_text = remaining_text[token_pos + len(self.image_token) :]

            padded_text.append(padded_text_item)

        return padded_text

    def _pad_image_tokens_by_num_embeddings(
        self,
        text: List[TextInput],
    ) -> List[TextInput]:
        """Pads each <image> to image_pad_len times of "<image>".

        Args:
            text: The text to be padded.

        Returns:
            The padded text.
        """
        padded_text: List[TextInput] = []

        for i in range(len(text)):
            padded_text_item = ""
            remaining_text = text[i]

            while True:
                token_pos = remaining_text.find(self.image_token)
                if token_pos == -1:
                    padded_text_item += remaining_text
                    break

                padded_text_item += remaining_text[:token_pos] + (
                    self.image_token * self.image_pad_len
                )

                remaining_text = remaining_text[token_pos + len(self.image_token) :]

            padded_text.append(padded_text_item)

        return padded_text

    def _process_images(
        self,
        images: ImageInput,
        **kwargs: Unpack[VILAProcessorKwargs],
    ) -> Tuple[BatchFeature, List[int]]:
        images_flatten = cast(
            List[Image] | List[NDArray] | List[Tensor],
            image_utils.make_flat_list_of_images(images),
        )

        cropped_images: List[Image] = []
        num_cropped_images: List[int] = []
        for image in images_flatten:
            pil_image: Image = image_transforms.to_pil_image(image)
            single_cropped_images = self._crop_image(pil_image)

            cropped_images.extend(single_cropped_images)
            num_cropped_images.append(len(single_cropped_images))

        image_inputs = self.image_processor(
            cropped_images,
            **kwargs,
        )

        return image_inputs, num_cropped_images


def dynamic_preprocess(
    image, min_num=1, max_num=12, image_size=384, use_thumbnail=True
):
    orig_width, orig_height = image.size
    aspect_ratio = orig_width / orig_height

    # calculate the existing image aspect ratio
    target_ratios = {
        (i, j)
        for n in range(min_num, max_num + 1)
        for i in range(1, n + 1)
        for j in range(1, n + 1)
        if i * j <= max_num and i * j >= min_num
    }
    target_ratios = sorted(target_ratios, key=lambda x: x[0] * x[1])

    # find the closest aspect ratio to the target
    target_aspect_ratio = find_closest_aspect_ratio(
        aspect_ratio, target_ratios, orig_width, orig_height, image_size
    )

    # calculate the target width and height
    target_width = image_size * target_aspect_ratio[0]
    target_height = image_size * target_aspect_ratio[1]
    blocks = target_aspect_ratio[0] * target_aspect_ratio[1]

    # resize the image
    resized_img = image.resize((target_width, target_height))
    processed_images = []
    for i in range(blocks):
        box = (
            (i % (target_width // image_size)) * image_size,
            (i // (target_width // image_size)) * image_size,
            ((i % (target_width // image_size)) + 1) * image_size,
            ((i // (target_width // image_size)) + 1) * image_size,
        )
        # split the image
        split_img = resized_img.crop(box)
        processed_images.append(split_img)
    assert len(processed_images) == blocks
    if use_thumbnail and len(processed_images) != 1:
        thumbnail_img = image.resize((image_size, image_size))
        processed_images.append(thumbnail_img)
    return processed_images


def find_closest_aspect_ratio(aspect_ratio, target_ratios, width, height, image_size):
    best_ratio_diff = float("inf")
    best_ratio = (1, 1)
    area = width * height
    for ratio in target_ratios:
        target_aspect_ratio = ratio[0] / ratio[1]
        ratio_diff = abs(aspect_ratio - target_aspect_ratio)
        if ratio_diff < best_ratio_diff:
            best_ratio_diff = ratio_diff
            best_ratio = ratio
        elif ratio_diff == best_ratio_diff:
            if area > 0.5 * image_size * image_size * ratio[0] * ratio[1]:
                best_ratio = ratio
    return best_ratio