File size: 19,146 Bytes
0c9c5ce 5a89fd6 0c9c5ce 5a89fd6 e7818b4 5a89fd6 0c9c5ce 5a89fd6 e7818b4 5a89fd6 bbaf57f 5a89fd6 bbaf57f 5a89fd6 bbaf57f 5a89fd6 bbaf57f 5a89fd6 bbaf57f 5a89fd6 bbaf57f 5a89fd6 bbaf57f 5a89fd6 bbaf57f 5a89fd6 bbaf57f 5a89fd6 bbaf57f 5a89fd6 bbaf57f 5a89fd6 bbaf57f 5a89fd6 bbaf57f 5a89fd6 bbaf57f 5a89fd6 0c9c5ce 5a89fd6 0c9c5ce 5a89fd6 bbaf57f 5a89fd6 bbaf57f 5a89fd6 bbaf57f 5a89fd6 bbaf57f 5a89fd6 bbaf57f 5a89fd6 bbaf57f 5a89fd6 bbaf57f 5a89fd6 bbaf57f 5a89fd6 bbaf57f 5a89fd6 bbaf57f 5a89fd6 bbaf57f 5a89fd6 bbaf57f 5a89fd6 bbaf57f 5a89fd6 bbaf57f 496d1ea 5a89fd6 e7818b4 496d1ea e7818b4 496d1ea e7818b4 0c9c5ce e7818b4 da73575 e7818b4 5a89fd6 bbaf57f 5a89fd6 bbaf57f 5a89fd6 bbaf57f 5a89fd6 bbaf57f 5a89fd6 bbaf57f 5a89fd6 bbaf57f 5a89fd6 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 | from typing import Any, Optional, Union
import torch
import numpy as np
from transformers.image_utils import ImageInput
from transformers.processing_utils import ImagesKwargs, MultiModalData, ProcessingKwargs, ProcessorMixin, Unpack, VideosKwargs
from transformers.tokenization_utils_base import PreTokenizedInput, TextInput
from transformers.utils import logging
from transformers.video_utils import VideoInput
from .chunk_utils import chunk_tokens
logger = logging.get_logger(__name__)
class MMFeature(dict):
def __init__(self, data, tensor_type: str | None = None):
super().__init__(data)
self.tensor_type = tensor_type
self.convert_to_tensor()
def convert_to_tensor(self) -> "MMFeature":
if self.tensor_type is None:
return self
match self.tensor_type:
case "pt":
for k, v in self.items():
if not isinstance(v, torch.Tensor):
try:
self[k] = torch.tensor(v)
except Exception:
pass
case "np":
for k, v in self.items():
if not isinstance(v, np.ndarray):
try:
self[k] = np.array(v)
except Exception:
pass
case _:
raise ValueError(f"Unsupported tensor type: {self.tensor_type}")
return self
def to(self, target: Any) -> "MMFeature":
for k, v in self.items():
if isinstance(v, torch.Tensor):
self[k] = v.to(target)
return self
class Qwen3VLVideosProcessorKwargs(VideosKwargs, total=False):
focus_size: Optional[int]
max_chunk_size: Optional[int]
class Qwen3VLImagesKwargs(ImagesKwargs):
min_pixels: Optional[int]
max_pixels: Optional[int]
patch_size: Optional[int]
temporal_patch_size: Optional[int]
merge_size: Optional[int]
focus_size: Optional[int]
class Qwen3VLProcessorKwargs(ProcessingKwargs, total=False):
images_kwargs: Qwen3VLImagesKwargs # type: ignore
videos_kwargs: Qwen3VLVideosProcessorKwargs # type: ignore
_defaults = { # type: ignore
"text_kwargs": {
"padding": False,
"return_token_type_ids": False,
"return_mm_token_type_ids": False,
},
"videos_kwargs": {"return_metadata": True},
}
class ZFQwen3VLProcessor(ProcessorMixin):
r"""
Constructs a Qwen3VL processor which wraps a Qwen3VL image processor and a Qwen2 tokenizer into a single processor.
[`Qwen3VLProcessor`] offers all the functionalities of [`Qwen2VLImageProcessor`] and [`Qwen2TokenizerFast`]. See the
[`~Qwen3VLProcessor.__call__`] and [`~Qwen3VLProcessor.decode`] for more information.
Args:
image_processor ([`Qwen2VLImageProcessor`], *optional*):
The image processor is a required input.
tokenizer ([`Qwen2TokenizerFast`], *optional*):
The tokenizer is a required input.
video_processor ([`Qwen3VLVideoProcessor`], *optional*):
The video processor is a required input.
chat_template (`str`, *optional*): A Jinja template which will be used to convert lists of messages
in a chat into a tokenizable string.
"""
attributes = ["image_processor", "tokenizer", "video_processor"]
image_processor_class = "AutoImageProcessor"
video_processor_class = "AutoVideoProcessor"
tokenizer_class = ("Qwen2Tokenizer", "Qwen2TokenizerFast")
def __init__(self, image_processor=None, tokenizer=None, video_processor=None, chat_template=None, **kwargs):
super().__init__(image_processor, tokenizer, video_processor, chat_template=chat_template)
self.image_token = "<|image_pad|>" if not hasattr(tokenizer, "image_token") else tokenizer.image_token # type: ignore
self.video_token = "<|video_pad|>" if not hasattr(tokenizer, "video_token") else tokenizer.video_token # type: ignore
self.image_token_id = (
tokenizer.image_token_id # type: ignore
if getattr(tokenizer, "image_token_id", None)
else tokenizer.convert_tokens_to_ids(self.image_token) # type: ignore
)
self.video_token_id = (
tokenizer.video_token_id # type: ignore
if getattr(tokenizer, "video_token_id", None)
else tokenizer.convert_tokens_to_ids(self.video_token) # type: ignore
)
self.vision_start_token = (
"<|vision_start|>" if not hasattr(tokenizer, "vision_start_token") else tokenizer.vision_start_token # type: ignore
)
self.vision_end_token = (
"<|vision_end|>" if not hasattr(tokenizer, "vision_end_token") else tokenizer.vision_end_token # type: ignore
)
self.vision_start_token_id = (
tokenizer.vision_start_token_id # type: ignore
if getattr(tokenizer, "vision_start_token_id", None)
else tokenizer.convert_tokens_to_ids(self.vision_start_token) # type: ignore
)
self.vision_end_token_id = (
tokenizer.vision_end_token_id # type: ignore
if getattr(tokenizer, "vision_end_token_id", None)
else tokenizer.convert_tokens_to_ids(self.vision_end_token) # type: ignore
)
def __call__( # type: ignore
self,
images: ImageInput = None, # type: ignore
text: Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]] = None, # type: ignore
videos: VideoInput = None, # type: ignore
**kwargs: Unpack[Qwen3VLProcessorKwargs],
) -> MMFeature:
"""
Main method to prepare for the model one or several sequences(s) and image(s). This method forwards the `text`
and `kwargs` arguments to Qwen2TokenizerFast's [`~Qwen2TokenizerFast.__call__`] if `text` is not `None` to encode
the text. To prepare the vision inputs, this method forwards the `vision_infos` and `kwrags` arguments to
Qwen2VLImageProcessor's [`~Qwen2VLImageProcessor.__call__`] if `vision_infos` is not `None`.
Args:
images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `list[PIL.Image.Image]`, `list[np.ndarray]`, `list[torch.Tensor]`):
The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch
tensor. Both channels-first and channels-last formats are supported.
text (`str`, `list[str]`, `list[list[str]]`):
The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
(pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
`is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
videos (`np.ndarray`, `torch.Tensor`, `list[np.ndarray]`, `list[torch.Tensor]`):
The image or batch of videos to be prepared. Each video can be a 4D NumPy array or PyTorch
tensor, or a nested list of 3D frames. Both channels-first and channels-last formats are supported.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors of a particular framework. Acceptable values are:
- `'tf'`: Return TensorFlow `tf.constant` objects.
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return NumPy `np.ndarray` objects.
- `'jax'`: Return JAX `jnp.ndarray` objects.
Returns:
[`MMFeature`]: A [`MMFeature`] with the following fields:
- **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`.
- **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
`return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not
`None`).
- **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`.
- **pixel_values_videos** -- Pixel values of videos to be fed to a model. Returned when `videos` is not `None`.
- **image_grid_thw** -- List of image 3D grid in LLM. Returned when `images` is not `None`.
- **video_grid_thw** -- List of video 3D grid in LLM. Returned when `videos` is not `None`.
"""
output_kwargs = self._merge_kwargs(
Qwen3VLProcessorKwargs, # type: ignore
tokenizer_init_kwargs=self.tokenizer.init_kwargs, # type: ignore
**kwargs,
)
if images is not None:
image_inputs = self.image_processor(images=images, **output_kwargs["images_kwargs"]) # type: ignore
image_grid_thw = image_inputs["image_grid_thw"]
else:
image_inputs = {}
image_grid_thw = None
if videos is not None:
videos_inputs = self.video_processor(videos=videos, **output_kwargs["videos_kwargs"]) # type: ignore
video_grid_thw = videos_inputs["video_grid_thw"]
# If user has not requested video metadata, pop it
if "return_metadata" not in kwargs:
video_metadata = videos_inputs.pop("video_metadata")
else:
video_metadata = videos_inputs["video_metadata"]
video_grid_thw = videos_inputs["video_grid_thw"]
else:
videos_inputs = {}
video_grid_thw = None
if not isinstance(text, list):
text = [text]
text = text.copy() # below lines change text in-place
if image_grid_thw is not None:
merge_length = self.image_processor.merge_size**2 # type: ignore
index = 0
for i in range(len(text)):
while self.image_token in text[i]:
num_image_tokens = image_grid_thw[index].prod() // merge_length
text[i] = text[i].replace(self.image_token, "<|placeholder|>" * num_image_tokens, 1) # type: ignore
index += 1
text[i] = text[i].replace("<|placeholder|>", self.image_token) # type: ignore
if video_grid_thw is not None:
merge_length = self.video_processor.merge_size**2 # type: ignore
index = 0
for i in range(len(text)):
while self.video_token in text[i]:
metadata = video_metadata[index] # type: ignore
if metadata.fps is None:
logger.warning_once( # type: ignore
"Qwen3VL requires frame timestamps to construct prompts, but the `fps` of the input video could not be inferred. "
"Probably `video_metadata` was missing from inputs and you passed pre-sampled frames. "
"Defaulting to `fps=24`. Please provide `video_metadata` for more accurate results."
)
metadata.fps = 24 if metadata.fps is None else metadata.fps
# if timestamps are not provided, calculate them
curr_timestamp = self._calculate_timestamps(
metadata.frames_indices,
metadata.fps,
self.video_processor.merge_size, # type: ignore
self.video_processor.focus_size, # type: ignore
)
video_placeholder = ""
frame_seqlen = video_grid_thw[index][1:].prod() // merge_length
for frame_idx in range(video_grid_thw[index][0]):
curr_time = curr_timestamp[frame_idx]
video_placeholder += f"<{curr_time:.1f} seconds>"
video_placeholder += (
self.vision_start_token + "<|placeholder|>" * frame_seqlen + self.vision_end_token
)
if f"{self.vision_start_token}{self.video_token}{self.vision_end_token}" in text[i]:
text[i] = text[i].replace( # type: ignore
f"{self.vision_start_token}{self.video_token}{self.vision_end_token}", video_placeholder, 1
)
else:
# vllm may input video token directly
text[i] = text[i].replace(self.video_token, video_placeholder, 1) # type: ignore
index += 1
text[i] = text[i].replace("<|placeholder|>", self.video_token) # type: ignore
return_tensors = output_kwargs["text_kwargs"].pop("return_tensors", None)
return_mm_token_type_ids = output_kwargs["text_kwargs"].pop("return_mm_token_type_ids", None)
text_inputs = self.tokenizer(text, **output_kwargs["text_kwargs"]) # type: ignore
self._check_special_mm_tokens(text, text_inputs, modalities=["image", "video"]) # type: ignore
array_ids = np.array(text_inputs["input_ids"])
array_attention_mask = np.array(text_inputs["attention_mask"]) if "attention_mask" in text_inputs else None
if return_mm_token_type_ids:
mm_token_type_ids = np.zeros_like(text_inputs["input_ids"])
mm_token_type_ids[array_ids == self.image_token_id] = 1
text_inputs["mm_token_type_ids"] = mm_token_type_ids.tolist()
chunks = chunk_tokens(
max_chunk_size=self.video_processor.max_chunk_size, # type: ignore
input_ids=array_ids,
image_token_id=self.image_token_id,
video_token_id=self.video_token_id,
merge_size=self.image_processor.merge_size, # type: ignore
focus_size=self.video_processor.focus_size, # type: ignore
image_grid_thw=image_grid_thw,
video_grid_thw=video_grid_thw,
)
image_token_mask = (array_ids == self.image_token_id).astype(int)
video_token_mask = (array_ids == self.video_token_id).astype(int)
text_token_mask = np.ones_like(image_token_mask) - image_token_mask - video_token_mask
if array_attention_mask is not None:
text_token_mask = text_token_mask * array_attention_mask
image_token_mask = image_token_mask * array_attention_mask
video_token_mask = video_token_mask * array_attention_mask
return MMFeature(data={
**text_inputs,
**image_inputs,
**videos_inputs,
"token_chunks": chunks,
"text_token_mask": text_token_mask.astype(bool),
"image_token_mask": image_token_mask.astype(bool),
"video_token_mask": video_token_mask.astype(bool),
}, tensor_type=return_tensors)
def _get_num_multimodal_tokens(self, image_sizes=None, video_sizes=None, **kwargs):
"""
Computes the number of placeholder tokens needed for multimodal inputs with the given sizes.
Args:
image_sizes (`list[list[int]]`, *optional*):
The input sizes formatted as (height, width) per each image.
video_sizes (`list[list[int]]`, *optional*):
The input sizes formatted as (num_frames, height, width) per each video.
Returns:
`MultiModalData`: A `MultiModalData` object holding number of tokens per each of the provided
input modalities, along with other useful data.
"""
vision_data = {}
if image_sizes is not None:
images_kwargs = Qwen3VLProcessorKwargs._defaults.get("images_kwargs", {})
images_kwargs.update(kwargs)
merge_size = images_kwargs.get("merge_size", None) or self.image_processor.merge_size # type: ignore
num_image_patches = [
self.image_processor.get_number_of_image_patches(*image_size, images_kwargs) # type: ignore
for image_size in image_sizes
]
num_image_tokens = [(num_patches // merge_size**2) for num_patches in num_image_patches]
vision_data.update({"num_image_tokens": num_image_tokens, "num_image_patches": num_image_patches})
if video_sizes is not None:
videos_kwargs = Qwen3VLProcessorKwargs._defaults.get("videos_kwargs", {})
videos_kwargs.update(kwargs)
num_video_patches = [
self.video_processor.get_number_of_video_patches(*video_size, videos_kwargs) # type: ignore
for video_size in video_sizes
]
num_video_tokens = [(num_patches // merge_size**2) for num_patches in num_video_patches] # type: ignore
vision_data["num_video_tokens"] = num_video_tokens
return MultiModalData(**vision_data)
def post_process_image_text_to_text(
self, generated_outputs, skip_special_tokens=True, clean_up_tokenization_spaces=False, **kwargs
):
"""
Post-process the output of the model to decode the text.
Args:
generated_outputs (`torch.Tensor` or `np.ndarray`):
The output of the model `generate` function. The output is expected to be a tensor of shape `(batch_size, sequence_length)`
or `(sequence_length,)`.
skip_special_tokens (`bool`, *optional*, defaults to `True`):
Whether or not to remove special tokens in the output. Argument passed to the tokenizer's `batch_decode` method.
clean_up_tokenization_spaces (`bool`, *optional*, defaults to `False`):
Whether or not to clean up the tokenization spaces. Argument passed to the tokenizer's `batch_decode` method.
**kwargs:
Additional arguments to be passed to the tokenizer's `batch_decode method`.
Returns:
`list[str]`: The decoded text.
"""
return self.tokenizer.batch_decode( # type: ignore
generated_outputs,
skip_special_tokens=skip_special_tokens,
clean_up_tokenization_spaces=clean_up_tokenization_spaces,
**kwargs,
)
def _calculate_timestamps(
self,
indices: Union[list[int], np.ndarray],
video_fps: float,
merge_size: int = 2,
focus_size: int = 2,
):
if not isinstance(indices, list):
indices = indices.tolist()
b_size = merge_size * focus_size
if len(indices) % b_size != 0:
indices.extend(indices[-1] for _ in range(b_size - len(indices) % b_size)) # type: ignore
timestamps = [idx / video_fps for idx in indices]
# @JJJYmmm frames are merged by self.merge_size, \
# so we need to average the timestamps between the first/last frame within the temporal patch
timestamps = [
(timestamps[i] + timestamps[i + merge_size - 1]) / 2 for i in range(0, len(timestamps), merge_size)
]
return timestamps
__all__ = ["ZFQwen3VLProcessor"]
|