| | """Processor class for PenguinVL.""" |
| |
|
| | import copy |
| | import importlib.util |
| | import os |
| | import os.path as osp |
| | import warnings |
| | from collections import defaultdict |
| | from typing import Any, List, Union, Dict, Optional, Tuple, TypedDict |
| |
|
| | import cv2 |
| | import ffmpeg |
| | import imageio |
| | import json |
| | import math |
| | import numpy as np |
| | import torch |
| | import transformers |
| | from decord import VideoReader, cpu |
| | from einops import rearrange |
| | from torch import nn |
| | from PIL import Image |
| | from transformers.feature_extraction_utils import BatchFeature |
| | from transformers.image_utils import ImageInput |
| | from transformers.processing_utils import ProcessingKwargs, ProcessorMixin, Unpack |
| | from transformers.tokenization_utils_base import PreTokenizedInput, TextInput |
| |
|
| | try: |
| | from . import image_processing_penguinvl |
| | from .image_processing_penguinvl import ( |
| | is_valid_image, is_valid_video, |
| | ) |
| | except ModuleNotFoundError: |
| | spec = importlib.util.spec_from_file_location( |
| | "image_processing_penguinvl", |
| | osp.join(osp.dirname(__file__), "image_processing_penguinvl.py"), |
| | ) |
| | image_processing_penguinvl = importlib.util.module_from_spec(spec) |
| | spec.loader.exec_module(image_processing_penguinvl) |
| | is_valid_image = getattr(image_processing_penguinvl, "is_valid_image") |
| | is_valid_video = getattr(image_processing_penguinvl, "is_valid_video") |
| |
|
| | |
| | DEFAULT_IMAGE_TOKEN = "<image>" |
| | IGNORE_INDEX = -100 |
| |
|
| | |
| | Conversation = List[Dict[str, Any]] |
| | SingleImage = Union[Image.Image, np.ndarray, torch.Tensor] |
| | SingleVideo = Union[List[SingleImage], np.ndarray, torch.Tensor] |
| | BatchedImage = List[Union[SingleImage, SingleVideo]] |
| | BatchedNamedImage = List[Tuple[str, Union[SingleImage, SingleVideo]]] |
| |
|
| |
|
| | def _custom_import(class_name: str): |
| | try: |
| | attribute_class = getattr(transformers, class_name) |
| | except AttributeError: |
| | if "image" in class_name.lower(): |
| | attribute_class = getattr(image_processing_penguinvl, class_name) |
| | return attribute_class |
| |
|
| |
|
| | def is_named_image(image) -> bool: |
| | return isinstance(image, (list, tuple)) and \ |
| | len(image) == 2 and \ |
| | isinstance(image[0], str) and \ |
| | image[0] in ["image", "video"] and \ |
| | (is_valid_image(image[1]) or is_valid_video(image[1])) |
| |
|
| |
|
| | def make_batched_images(images) -> List[List[ImageInput]]: |
| | if isinstance(images, (list, tuple)) and all(is_named_image(image) for image in images): |
| | |
| | return [image[0] for image in images], [image[1] for image in images] |
| | elif isinstance(images, (list, tuple)) and all(is_valid_image(image) or is_valid_video(image) for image in images): |
| | |
| | batch = [] |
| | for image in images: |
| | if is_valid_video(image): |
| | batch.append(("video", image)) |
| | elif is_valid_image(image): |
| | batch.append(("image", image)) |
| | else: |
| | raise ValueError(f"Could not make batched images from {images}") |
| | return [x[0] for x in batch], [x[1] for x in batch] |
| | elif is_named_image(images): |
| | |
| | return [images[0]], [image[1]] |
| | elif is_valid_video(images): |
| | |
| | return ["video"], [images] |
| | elif is_valid_image(images): |
| | |
| | return ["image"], [images] |
| |
|
| | raise ValueError(f"Could not make batched images from {images}") |
| |
|
| |
|
| | def frame_sample(duration, mode='uniform', num_frames=None, vid_fps=None, fps=None): |
| | if mode == 'uniform': |
| | assert num_frames is not None, "Number of frames must be provided for uniform sampling." |
| | if duration <= num_frames: |
| | return np.arange(duration).astype(int) |
| | |
| | |
| | |
| | |
| | |
| |
|
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| |
|
| | |
| | |
| | return np.linspace(0, duration-1, num_frames, dtype=int) |
| | elif mode == 'fps': |
| | assert vid_fps is not None, "FPS must be provided for FPS sampling." |
| | assert fps is not None, "FPS must be provided for FPS sampling." |
| | segment_len = min(vid_fps // fps, duration) |
| | return np.arange(segment_len // 2, duration, segment_len, dtype=int) |
| | else: |
| | raise ImportError(f'Unsupported frame sampling mode: {mode}') |
| |
|
| |
|
| | def load_video_from_ids(video_path, s=None, e=None, fps=None, max_frames=128, temporal_factor=1): |
| | if s is not None and e is not None: |
| | s = s if s >= 0. else 0. |
| | e = e if e >= 0. else 0. |
| | if s > e: |
| | s, e = e, s |
| | elif s == e: |
| | e = s + 1 |
| |
|
| | |
| | if os.path.isdir(video_path): |
| | frame_files = sorted(os.listdir(video_path)) |
| |
|
| | vid_fps = 3 |
| | num_frames_of_video = len(frame_files) |
| | elif video_path.endswith('.gif'): |
| | gif_reader = imageio.get_reader(video_path) |
| |
|
| | vid_fps = 25 |
| | num_frames_of_video = len(gif_reader) |
| | else: |
| | vreader = VideoReader(video_path, ctx=cpu(0), num_threads=2) |
| | |
| |
|
| | vid_fps = vreader.get_avg_fps() |
| | num_frames_of_video = len(vreader) |
| |
|
| | |
| | f_start = 0 if s is None else max(int(s * vid_fps) - 1, 0) |
| | f_end = num_frames_of_video - 1 if e is None else min(int(e * vid_fps) - 1, num_frames_of_video - 1) |
| | frame_indices = list(range(f_start, f_end + 1)) |
| |
|
| | duration = len(frame_indices) |
| | |
| | if fps is not None and duration / vid_fps < max_frames: |
| | sampled_frame_indices = [frame_indices[i] for i in frame_sample(duration, mode='fps', vid_fps=vid_fps, fps=fps)] |
| | else: |
| | sampled_frame_indices = [frame_indices[i] for i in frame_sample(duration, mode='uniform', num_frames=max_frames)] |
| |
|
| | |
| | if os.path.isdir(video_path): |
| | frames = np.array([cv2.cvtColor(cv2.imread(os.path.join(video_path, frame_files[frame_idx])), cv2.COLOR_BGR2RGB) for frame_idx in sampled_frame_indices]) |
| | elif video_path.endswith('.gif'): |
| | frames = np.array([cv2.cvtColor(frame, cv2.COLOR_RGBA2RGB) for idx, frame in enumerate(gif_reader) if idx in sampled_frame_indices]) |
| | else: |
| | frames = vreader.get_batch(sampled_frame_indices).asnumpy() |
| |
|
| | frames = frames.transpose(0, 3, 1, 2) |
| | timestamps = [x / vid_fps for x in sampled_frame_indices] |
| |
|
| | if temporal_factor > 1: |
| | pad_length = temporal_factor - len(frames) % temporal_factor |
| | frames = np.concatenate([frames, frames[-1:].repeat(pad_length, axis=0)]) |
| | [timestamps.append(timestamps[-1] + 1 / fps) for _ in range(pad_length)] |
| |
|
| | frames = [frame for frame in frames] |
| |
|
| | return frames, timestamps |
| |
|
| |
|
| |
|
| | def round_by_factor(number: int, factor: int) -> int: |
| | """Returns the closest integer to 'number' that is divisible by 'factor'.""" |
| | return round(number / factor) * factor |
| |
|
| |
|
| | def ceil_by_factor(number: int, factor: int) -> int: |
| | """Returns the smallest integer greater than or equal to 'number' that is divisible by 'factor'.""" |
| | return math.ceil(number / factor) * factor |
| |
|
| |
|
| | def floor_by_factor(number: int, factor: int) -> int: |
| | """Returns the largest integer less than or equal to 'number' that is divisible by 'factor'.""" |
| | return math.floor(number / factor) * factor |
| |
|
| | def smart_resize( |
| | height: int, |
| | width: int, |
| | factor: int = 14, |
| | min_pixels: int = 0, |
| | max_pixels: int = 16384, |
| | ): |
| | """ |
| | Compute target (height, width) such that: |
| | - Both dimensions are divisible by factor. |
| | - Total pixels lie in [min_pixels, max_pixels]. |
| | - Aspect ratio is preserved as closely as possible. |
| | """ |
| | def round_by_factor(number: int, factor: int) -> int: |
| | """Returns the closest integer to 'number' that is divisible by 'factor'.""" |
| | return round(number / factor) * factor |
| | def ceil_by_factor(number: int, factor: int) -> int: |
| | """Returns the smallest integer greater than or equal to 'number' that is divisible by 'factor'.""" |
| | return math.ceil(number / factor) * factor |
| | def floor_by_factor(number: int, factor: int) -> int: |
| | """Returns the largest integer less than or equal to 'number' that is divisible by 'factor'.""" |
| | return math.floor(number / factor) * factor |
| |
|
| | max_ratio = 200 |
| | if max(height, width) / min(height, width) > max_ratio: |
| | raise ValueError( |
| | f"Aspect ratio must be < {max_ratio}, got {max(height, width) / min(height, width)}" |
| | ) |
| | h = max(factor, round_by_factor(height, factor)) |
| | w = max(factor, round_by_factor(width, factor)) |
| | if h * w > max_pixels: |
| | scale = math.sqrt((height * width) / max_pixels) |
| | h = floor_by_factor(height / scale, factor) |
| | w = floor_by_factor(width / scale, factor) |
| | elif h * w < min_pixels: |
| | scale = math.sqrt(min_pixels / (height * width)) |
| | h = ceil_by_factor(height * scale, factor) |
| | w = ceil_by_factor(width * scale, factor) |
| | return max(h, factor), max(w, factor) |
| |
|
| | |
| | def get_frame_sim( |
| | frame1: torch.Tensor, |
| | frame2: torch.Tensor, |
| | patch_size: int = 14, |
| | threshold: float = 0.7, |
| | epsilon: float = 1e-8, |
| | ) -> float: |
| | """Cosine similarity between two frames in HSV, averaged over patches. Returns mean similarity in [0, 1].""" |
| | assert frame1.dim() == 3 and frame2.dim() == 3, "Frames must be 3D tensors [C, H, W]" |
| |
|
| | def to_hsv_tensor(tensor: torch.Tensor) -> torch.Tensor: |
| | arr = tensor.cpu().permute(1, 2, 0).numpy() |
| | if arr.dtype in (np.float32, np.float64): |
| | arr = arr.astype(np.uint8) |
| | hsv = cv2.cvtColor(arr, cv2.COLOR_RGB2HSV) |
| | return torch.from_numpy(hsv).permute(2, 0, 1).to(tensor.device).float() |
| |
|
| | f1 = to_hsv_tensor(frame1) |
| | f2 = to_hsv_tensor(frame2) |
| | patch1 = rearrange(f1, "c (h p1) (w p2) -> h w (c p1 p2)", p1=patch_size, p2=patch_size).float() |
| | patch2 = rearrange(f2, "c (h p1) (w p2) -> h w (c p1 p2)", p1=patch_size, p2=patch_size).float() |
| |
|
| | norm1 = torch.norm(patch1, p=2, dim=-1, keepdim=True) + epsilon |
| | norm2 = torch.norm(patch2, p=2, dim=-1, keepdim=True) + epsilon |
| | cos_sim = (patch1 / norm1 * patch2 / norm2).sum(dim=-1) |
| |
|
| | both_near_zero = (norm1.squeeze() < 0.01) & (norm2.squeeze() < 0.01) |
| | similar = torch.ones_like(cos_sim) |
| | similar[~both_near_zero] = (cos_sim[~both_near_zero] > threshold).float() |
| | return similar[~both_near_zero].float().mean().item() |
| |
|
| |
|
| | |
| | K_PATCH = 14 |
| | K_MIN_PIXELS = 10 * 14 * 14 |
| | K_MAX_PIXELS = 10240 * 14 * 14 |
| |
|
| | def extract_ki_frames( |
| | frames: torch.Tensor, |
| | threshold: float = MIN_FRAME_SIMILARITY, |
| | ) -> list: |
| | """ |
| | Label each frame as keyframe (0) or non-keyframe (1) by comparing to the previous keyframe. |
| | First frame is always a keyframe; a new keyframe is chosen when similarity drops below threshold. |
| | """ |
| | assert frames.dim() == 4, "Frames must be 4D tensor [N, C, H, W]" |
| |
|
| | def _keyframe_indices(f: torch.Tensor) -> list: |
| | indices = [0] |
| | key = f[0] |
| | for i in range(1, f.size(0)): |
| | if get_frame_sim(key, f[i]) < threshold: |
| | indices.append(i) |
| | key = f[i] |
| | return indices |
| |
|
| | _, _, h, w = frames.shape |
| | rh, rw = smart_resize(h, w, factor=K_PATCH, min_pixels=K_MIN_PIXELS, max_pixels=K_MAX_PIXELS) |
| | resized = nn.functional.interpolate(frames, (rh, rw), mode="bilinear", antialias=True).float() |
| | k_indices = _keyframe_indices(resized) |
| | frame_types = torch.ones(frames.size(0), dtype=torch.int32) |
| | frame_types[k_indices] = 0 |
| | return frame_types.tolist() |
| |
|
| |
|
| | class ChatTemplateKwargs(TypedDict, total=False): |
| |
|
| | chat_template: Optional[str] |
| | add_system_prompt: Optional[bool] |
| | add_generation_prompt: Optional[bool] |
| |
|
| |
|
| | class PenguinVLQwen3ProcessorKwargs(ProcessingKwargs, ChatTemplateKwargs, total=False): |
| |
|
| | chat_template_kwargs: ChatTemplateKwargs = { |
| | **ChatTemplateKwargs.__annotations__, |
| | } |
| |
|
| | _defaults = { |
| | "text_kwargs": { |
| | "padding": False, |
| | }, |
| | "image_kwargs": { |
| | "merge_size": None, |
| | }, |
| | "chat_template_kwargs": { |
| | "chat_template": None, |
| | "add_system_prompt": False, |
| | "add_generation_prompt": False, |
| | }, |
| | } |
| |
|
| |
|
| | class PenguinVLQwen3Processor(ProcessorMixin): |
| |
|
| | attributes = ["image_processor", "tokenizer"] |
| | image_processor_class = "PenguinVLImageProcessor" |
| | tokenizer_class = ("Qwen2Tokenizer", "Qwen2TokenizerFast") |
| | valid_kwargs = ["chat_template", "image_merge_size", "video_merge_size", "fps", "max_frames"] |
| |
|
| | def __init__( |
| | self, |
| | image_processor=None, |
| | tokenizer=None, |
| | chat_template: str = None, |
| | image_merge_size: int = 1, |
| | video_merge_size: int = 2, |
| | fps: Optional[int] = 1, |
| | max_frames: Optional[int] = 128, |
| | use_codec = False, |
| | ): |
| | self.image_processor = image_processor |
| | self.tokenizer = tokenizer |
| | if chat_template is None: |
| | chat_template = self.tokenizer.chat_template |
| | self.chat_template = chat_template |
| |
|
| | self.image_merge_size = image_merge_size |
| | self.video_merge_size = video_merge_size |
| | self.fps = fps |
| | self.max_frames = max_frames |
| | self.use_codec = use_codec |
| | self.generation_prompt = self._infer_generation_prompt() |
| | self.generation_prompt_ids = self.tokenizer.encode(self.generation_prompt, return_tensors="pt") |
| | self.generation_prompt_length = len(self.generation_prompt_ids[0]) |
| | self.image_token_id = self.tokenizer.convert_tokens_to_ids(DEFAULT_IMAGE_TOKEN) |
| | self.eos_token_id = self.tokenizer.eos_token_id |
| |
|
| | @classmethod |
| | def _get_arguments_from_pretrained(cls, pretrained_model_name_or_path, **kwargs): |
| | args = [] |
| | for attribute_name in cls.attributes: |
| | class_name = getattr(cls, f"{attribute_name}_class") |
| | if isinstance(class_name, tuple): |
| | classes = tuple(_custom_import(n) if n is not None else None for n in class_name) |
| | use_fast = kwargs.get("use_fast", True) |
| | if use_fast and classes[1] is not None: |
| | attribute_class = classes[1] |
| | else: |
| | attribute_class = classes[0] |
| | else: |
| | attribute_class = _custom_import(class_name) |
| |
|
| | args.append(attribute_class.from_pretrained(pretrained_model_name_or_path, **kwargs)) |
| | return args |
| |
|
| | def get_generation_prompt(self): |
| | return self.generation_prompt |
| |
|
| | def get_generation_prompt_ids(self): |
| | return self.generation_prompt_ids |
| |
|
| | def _infer_generation_prompt(self): |
| | pseudo_message = [{"role": "user", "content": ""}] |
| | instruction = self.apply_chat_template(pseudo_message, tokenize=False, add_generation_prompt=True) |
| | conversation = self.apply_chat_template(pseudo_message, tokenize=False, add_generation_prompt=False) |
| | return instruction.replace(conversation, "") |
| |
|
| | def _get_downsampled_grid_sizes(self, image_inputs: Dict[str, Any]): |
| | grid_sizes = [] |
| | for grid_size, merge_size in zip(image_inputs.get("grid_sizes", []), image_inputs.get("merge_sizes", [])): |
| | if not torch.all(grid_size[1:] % merge_size == 0): |
| | warnings.warn(f"Grid size {grid_size} is not divisible by merge size. Some undesired errors may occur.") |
| | if grid_size[0] == 1: |
| | grid_sizes.append(grid_size[1:] / merge_size) |
| | elif grid_size[0] > 1: |
| | grid_sizes.extend([grid_size[1:] / merge_size] * grid_size[0]) |
| | return grid_sizes |
| |
|
| | def _get_visual_seq_len(self, grid_size: torch.Tensor): |
| | num_tokens = int(grid_size.prod().item()) |
| | return num_tokens |
| |
|
| | def load_images(self, image_path: Union[str, List[str], Image.Image, List[Image.Image]]): |
| | if isinstance(image_path, str) and os.path.isfile(image_path): |
| | |
| | images = [Image.open(image_path).convert('RGB')] |
| | elif isinstance(image_path, str) and os.path.isdir(image_path): |
| | |
| | images = [Image.open(os.path.join(image_path, f)).convert('RGB') for f in sorted(os.listdir(image_path))] |
| | elif isinstance(image_path, list) and isinstance(image_path[0], str): |
| | |
| | images = [Image.open(f).convert('RGB') for f in image_path] |
| | elif isinstance(image_path, list) and isinstance(image_path[0], Image.Image): |
| | images = [np.array(x) for x in image_path] |
| | elif isinstance(image_path, Image.Image): |
| | images = [np.array(image_path)] |
| | else: |
| | raise ValueError(f"Unsupported image path type: {type(image_path)}") |
| | return images |
| |
|
| | def load_video( |
| | self, |
| | video_path: str, |
| | start_time: Optional[float] = None, |
| | end_time: Optional[float] = None, |
| | fps: Optional[float] = None, |
| | max_frames: Optional[float] = None, |
| | size: Optional[int] = None, |
| | size_divisible: int = 1, |
| | precise_time: bool = False, |
| | verbose: bool = False, |
| | temporal_factor: int = 1 |
| | ): |
| | """ |
| | Load and process a video file and return the frames and the timestamps of each frame. |
| | Args: |
| | video_path (str): Path to the video file. |
| | start_time (float, optional): Start time in seconds. Defaults to None. |
| | end_time (float, optional): End time in seconds. Defaults to None. |
| | fps (float, optional): Frames per second. Defaults to None. |
| | num_frames (float, optional): Number of frames to sample. Defaults to None. |
| | size (int, optional): Size of the shortest side. Defaults to None. |
| | size_divisible (int, optional): Size divisible by this number. Defaults to 1. |
| | precise_time (bool, optional): Whether to use precise time. Defaults to False. |
| | verbose (bool, optional): Print ffmpeg output. Defaults to False. |
| | Returns: |
| | frames (List[PIL.Image]): List of frames. |
| | timestamps (List[float]): List of timestamps. |
| | """ |
| | if self.use_codec: |
| | return self.load_video_with_codec(**locals()) |
| | fps = self.fps if fps is None else fps |
| | max_frames = self.max_frames if max_frames is None else max_frames |
| |
|
| | if start_time is not None and end_time is not None and end_time - start_time < 1: |
| | return load_video_from_ids(video_path, start_time, end_time, fps=fps, max_frames=max_frames) |
| | if os.path.isdir(video_path): |
| | return load_video_from_ids(video_path, start_time, end_time, fps=fps, max_frames=max_frames) |
| | if video_path.endswith('.gif'): |
| | return load_video_from_ids(video_path, start_time, end_time, fps=fps, max_frames=max_frames) |
| | probe = ffmpeg.probe(video_path) |
| | duration = float(probe['format']['duration']) |
| | video_stream = next((stream for stream in probe['streams'] if stream['codec_type'] == 'video'), None) |
| | w, h = int(video_stream['width']), int(video_stream['height']) |
| |
|
| | kwargs, input_kwargs, output_kwargs = {}, {}, {} |
| | do_trim = start_time is not None or end_time is not None |
| | if start_time is not None: |
| | new_start_time = max(float(video_stream['start_time']), start_time) |
| | duration -= new_start_time - start_time |
| | start_time = new_start_time |
| | else: |
| | start_time = float(video_stream['start_time']) |
| | if end_time is not None: |
| | duration = min(duration, end_time - start_time) |
| | else: |
| | duration = duration |
| | if do_trim: |
| | kwargs = {'ss': start_time, 't': duration} |
| | if precise_time: |
| | output_kwargs.update(kwargs) |
| | else: |
| | input_kwargs.update(kwargs) |
| |
|
| | if size is not None: |
| | scale_factor = size / min(w, h) |
| | new_w, new_h = round(w * scale_factor), round(h * scale_factor) |
| | else: |
| | new_w, new_h = w, h |
| | new_w = new_w // size_divisible * size_divisible |
| | new_h = new_h // size_divisible * size_divisible |
| |
|
| | |
| | |
| | |
| | |
| |
|
| | stream = ffmpeg.input(video_path, **input_kwargs) |
| | if fps is not None: |
| | stream = ffmpeg.filter(stream, "fps", fps=fps, round="down") |
| | if new_w != w or new_h != h: |
| | stream = ffmpeg.filter(stream, 'scale', new_w, new_h) |
| | stream = ffmpeg.output(stream, "pipe:", format="rawvideo", pix_fmt="rgb24", **output_kwargs) |
| | out, _ = ffmpeg.run(stream, capture_stdout=True, quiet=not verbose) |
| |
|
| | frames = np.frombuffer(out, np.uint8).reshape([-1, new_h, new_w, 3]).transpose([0, 3, 1, 2]) |
| |
|
| | if fps is not None: |
| | timestamps = np.arange(start_time, start_time + duration + 1 / fps, 1 / fps)[:len(frames)] |
| | else: |
| | timestamps = np.linspace(start_time, start_time + duration, len(frames)) |
| |
|
| | if max_frames is not None and len(frames) > max_frames: |
| | indices = np.linspace(0, len(frames) - 1, max_frames, dtype=int) |
| | frames = frames[indices] |
| | timestamps = timestamps[indices] |
| |
|
| | if temporal_factor > 1: |
| | pad_length = temporal_factor - len(frames) % temporal_factor |
| | frames = np.concatenate([frames, frames[-1:].repeat(pad_length, axis=0)]) |
| | timestamps = np.concatenate([timestamps, timestamps[-1:].repeat(pad_length) + np.arange(1, pad_length + 1) / fps]) |
| |
|
| | frames_tensor = torch.from_numpy(frames.copy()).float() |
| | frame_types = extract_ki_frames(frames_tensor) |
| |
|
| | frames = [frame for frame in frames] |
| | timestamps = [timestamp for timestamp in timestamps] |
| |
|
| | return frames, timestamps, frame_types |
| | |
| | def load_video_with_codec( |
| | self, |
| | video_path: str, |
| | start_time: Optional[float] = None, |
| | end_time: Optional[float] = None, |
| | fps: Optional[float] = None, |
| | max_frames: Optional[float] = None, |
| | size: Optional[int] = None, |
| | size_divisible: int = 1, |
| | precise_time: bool = False, |
| | verbose: bool = False, |
| | temporal_factor: int = 1, |
| | slow_fast: bool = True |
| | ): |
| | """ |
| | Load a video by prioritizing I-frames (keyframes) and dynamically sampling |
| | additional frames between adjacent I-frames up to `max_frames`. |
| | Notes: |
| | - Real codec I-frames (keyframes) are always used as-is and do NOT follow `fps`. |
| | - If `fps` is provided, it controls how we sample additional non-I frames between |
| | adjacent I-frames (and still respects `max_frames`). |
| | - This function does NOT call `load_video_from_ids`. |
| | Returns: |
| | frames: List[np.ndarray] where each is CHW (3, H, W) uint8 |
| | timestamps: List[float] timestamps in seconds for each returned frame |
| | frame_types: List[int] where 0 = I-frame (keyframe), 1 = non-I-frame (sampled) |
| | """ |
| | return_frame_types = slow_fast |
| | max_frames = int(max_frames if max_frames is not None else self.max_frames) |
| | if max_frames <= 0: |
| | return ([], [], []) if return_frame_types else ([], []) |
| |
|
| | def _coerce_range(s: Optional[float], e: Optional[float]): |
| | if s is not None and e is not None: |
| | s = s if s >= 0.0 else 0.0 |
| | e = e if e >= 0.0 else 0.0 |
| | if s > e: |
| | s, e = e, s |
| | elif s == e: |
| | e = s + 1.0 |
| | return s, e |
| |
|
| | |
| | if os.path.isdir(video_path): |
| | |
| | |
| | |
| | start_time, end_time = _coerce_range(start_time, end_time) |
| | dir_fps = 3.0 |
| |
|
| | all_entries = sorted(os.listdir(video_path)) |
| | frame_files = [] |
| | for name in all_entries: |
| | p = os.path.join(video_path, name) |
| | if not os.path.isfile(p): |
| | continue |
| | if not name.lower().endswith((".jpg", ".jpeg", ".png", ".bmp", ".webp")): |
| | continue |
| | frame_files.append(name) |
| |
|
| | if len(frame_files) == 0: |
| | return ([], [], []) if return_frame_types else ([], []) |
| |
|
| | num_frames_of_video = len(frame_files) |
| | f_start = 0 if start_time is None else max(int(start_time * dir_fps) - 1, 0) |
| | f_end = (num_frames_of_video - 1) if end_time is None else min(int(end_time * dir_fps) - 1, num_frames_of_video - 1) |
| | if f_end < f_start: |
| | return ([], [], []) if return_frame_types else ([], []) |
| |
|
| | frame_indices = list(range(f_start, f_end + 1)) |
| | duration = len(frame_indices) |
| | sampled = frame_sample(duration, mode="uniform", num_frames=max_frames) |
| | sampled_frame_indices = [frame_indices[i] for i in sampled.tolist()] |
| |
|
| | frames = [] |
| | timestamps = [] |
| | for i in sampled_frame_indices: |
| | img = cv2.imread(os.path.join(video_path, frame_files[i])) |
| | if img is None: |
| | continue |
| | frames.append(cv2.cvtColor(img, cv2.COLOR_BGR2RGB).transpose(2, 0, 1)) |
| | timestamps.append(float(i) / dir_fps) |
| |
|
| | |
| | frame_types = [1] * len(frames) |
| | return (frames, timestamps, frame_types) if return_frame_types else (frames, timestamps) |
| |
|
| | if video_path.endswith('.gif'): |
| | gif_reader = imageio.get_reader(video_path) |
| | num_frames_of_video = len(gif_reader) |
| | if num_frames_of_video == 0: |
| | return ([], [], []) if return_frame_types else ([], []) |
| | n = min(max_frames, num_frames_of_video) |
| | idxs = np.linspace(0, num_frames_of_video - 1, n, dtype=int).tolist() |
| | frames = [ |
| | cv2.cvtColor(frame, cv2.COLOR_RGBA2RGB).transpose(2, 0, 1) |
| | for idx, frame in enumerate(gif_reader) if idx in set(idxs) |
| | ] |
| | |
| | timestamps = [float(i) for i in range(len(frames))] |
| | |
| | frame_types = [0] * len(frames) |
| | return (frames, timestamps, frame_types) if return_frame_types else (frames, timestamps) |
| |
|
| | def _get_video_stream_info(path: str): |
| | probe = ffmpeg.probe(path) |
| | fmt_duration = float(probe["format"]["duration"]) |
| | vstream = next((st for st in probe["streams"] if st.get("codec_type") == "video"), None) |
| | if vstream is None: |
| | raise ValueError(f"No video stream found in: {path}") |
| | w, h = int(vstream["width"]), int(vstream["height"]) |
| | stream_start = float(vstream.get("start_time") or 0.0) |
| | return probe, vstream, fmt_duration, (w, h), stream_start |
| |
|
| | def _safe_float(x) -> Optional[float]: |
| | if x is None: |
| | return None |
| | try: |
| | return float(x) |
| | except Exception: |
| | return None |
| |
|
| | def _get_iframe_timestamps(path: str, s: float, e: float) -> List[float]: |
| | """ |
| | Return sorted I-frame timestamps within [s, e]. |
| | Uses ffprobe with skip_frame=nokey to avoid scanning all frames. |
| | """ |
| | try: |
| | p = ffmpeg.probe( |
| | path, |
| | select_streams="v:0", |
| | skip_frame="nokey", |
| | show_frames=None, |
| | show_entries="frame=pict_type,pkt_pts_time,best_effort_timestamp_time,key_frame,pkt_size", |
| | of="json", |
| | ) |
| | except ffmpeg.Error as ex: |
| | print("ffprobe keyframe scan failed:", ex) |
| | return [] |
| | frames_meta = p.get("frames") or [] |
| | out_ts = [] |
| | for fr in frames_meta: |
| | |
| | pict_type = fr.get("pict_type") |
| | is_i = (pict_type == "I") or (pict_type is None and str(fr.get("key_frame")) == "1") |
| | if not is_i: |
| | continue |
| | ts = _safe_float(fr.get("pkt_pts_time")) |
| | if ts is None: |
| | ts = _safe_float(fr.get("best_effort_timestamp_time")) |
| | if ts is None: |
| | continue |
| | if ts < s or ts > e: |
| | continue |
| | size_bytes = int(fr.get("pkt_size", 0)) |
| | out_ts.append((ts, size_bytes)) |
| |
|
| | out_ts.sort(key=lambda x: x[0]) |
| | out_sizes = [x[1] for x in out_ts] |
| | return [x[0] for x in out_ts], out_sizes |
| |
|
| | def _normalize_uint8_nchw(data: torch.Tensor) -> torch.Tensor: |
| | """ |
| | Ensure tensor is NCHW uint8 on CPU with values in [0, 255]. |
| | torchcodec may return float in [0,1] or [0,255] depending on backend. |
| | """ |
| | if not isinstance(data, torch.Tensor): |
| | raise TypeError(f"Expected torch.Tensor, got {type(data)}") |
| | if data.ndim != 4: |
| | raise ValueError(f"Expected NCHW tensor, got shape {tuple(data.shape)}") |
| | if data.device.type != "cpu": |
| | data = data.cpu() |
| | if data.dtype != torch.uint8: |
| | d = data |
| | if d.is_floating_point(): |
| | mx = float(d.max().item()) if d.numel() > 0 else 0.0 |
| | if mx <= 1.0 + 1e-6: |
| | d = d * 255.0 |
| | d = d.round() |
| | data = d.clamp(0, 255).to(torch.uint8) |
| | return data |
| |
|
| |
|
| | def _allocate_remaining_floor_ratio(widths: np.ndarray, remaining: int) -> list[int]: |
| | """ |
| | Allocate `remaining` frames across windows proportionally by window width using floor, |
| | without redistributing leftover. |
| | This matches the spec: |
| | - prioritize large I-frame windows |
| | - use floor so the sum does not exceed `remaining` |
| | """ |
| | nwin = int(widths.shape[0]) |
| | if nwin == 0 or remaining <= 0: |
| | return [0] * nwin |
| | widths = np.maximum(widths.astype(float), 0.0) |
| | wsum = float(widths.sum()) |
| | if wsum <= 0.0: |
| | return [0] * nwin |
| | alloc = np.floor(float(remaining) * (widths / wsum)).astype(int) |
| | |
| | s = int(alloc.sum()) |
| | if s > remaining: |
| | |
| | order = np.argsort(widths) |
| | i = 0 |
| | while s > remaining and i < nwin: |
| | j = int(order[i]) |
| | if alloc[j] > 0: |
| | alloc[j] -= 1 |
| | s -= 1 |
| | else: |
| | i += 1 |
| | return alloc.tolist() |
| |
|
| | def _uniform_inside(a: float, b: float, k: int) -> List[float]: |
| | """k points uniformly spaced inside (a, b), excluding endpoints.""" |
| | if k <= 0: |
| | return [] |
| | if b <= a: |
| | return [] |
| | step = (b - a) / (k + 1) |
| | return [a + step * (j + 1) for j in range(k)] |
| |
|
| | def _sample_inside_fps(a: float, b: float, fps_val: float) -> List[float]: |
| | """Sample points at `fps_val` within (a, b), excluding endpoints.""" |
| | if fps_val is None: |
| | return [] |
| | try: |
| | fps_f = float(fps_val) |
| | except Exception: |
| | return [] |
| | if not (fps_f > 0.0): |
| | return [] |
| | if b <= a: |
| | return [] |
| | step = 1.0 / fps_f |
| | t = a + step |
| | out = [] |
| | |
| | |
| | |
| | max_points = int(max(0.0, (b - a) * fps_f)) + 2 |
| | n = 0 |
| | while t < b and n < max_points: |
| | out.append(float(t)) |
| | t += step |
| | n += 1 |
| | return out |
| |
|
| | start_time, end_time = _coerce_range(start_time, end_time) |
| | probe, video_stream, fmt_duration, (w, h), stream_start = _get_video_stream_info(video_path) |
| |
|
| | |
| | if start_time is None: |
| | start_time = float(stream_start) |
| | else: |
| | start_time = max(float(stream_start), float(start_time)) |
| |
|
| | if end_time is None: |
| | end_time = float(stream_start) + float(fmt_duration) |
| | else: |
| | end_time = float(end_time) |
| |
|
| | if end_time <= start_time: |
| | end_time = start_time + 1e-3 |
| |
|
| | |
| | if size is not None: |
| | scale_factor = size / min(w, h) |
| | new_w, new_h = round(w * scale_factor), round(h * scale_factor) |
| | else: |
| | new_w, new_h = w, h |
| | new_w = new_w // size_divisible * size_divisible |
| | new_h = new_h // size_divisible * size_divisible |
| |
|
| | |
| | iframe_ts, iframe_sizes = _get_iframe_timestamps(video_path, start_time, end_time) |
| |
|
| | |
| | timestamps: List[float] = [] |
| | frame_types: List[int] = [] |
| |
|
| | if len(iframe_ts) == 0: |
| | |
| | if end_time <= start_time: |
| | return ([], [], []) if return_frame_types else ([], []) |
| | if fps is None: |
| | n = max_frames |
| | timestamps = np.linspace(start_time, end_time, n, endpoint=False, dtype=float).tolist() |
| | else: |
| | try: |
| | fps_f = float(fps) |
| | except Exception: |
| | fps_f = 0.0 |
| | if fps_f > 0.0: |
| | step = 1.0 / fps_f |
| | timestamps = np.arange(start_time, end_time, step, dtype=float).tolist() |
| | if len(timestamps) > max_frames: |
| | idxs = np.linspace(0, len(timestamps) - 1, max_frames, dtype=int).tolist() |
| | idxs = list(dict.fromkeys(idxs)) |
| | timestamps = [timestamps[i] for i in idxs][:max_frames] |
| | else: |
| | timestamps = np.linspace(start_time, end_time, max_frames, endpoint=False, dtype=float).tolist() |
| | |
| | frame_types = [1] * len(timestamps) |
| | elif len(iframe_ts) >= max_frames: |
| | |
| | idxs = np.linspace(0, len(iframe_ts) - 1, max_frames, dtype=int).tolist() |
| | idxs = list(dict.fromkeys(idxs)) |
| | if len(idxs) != max_frames: |
| | missing = max_frames - len(idxs) |
| | all_idxs = np.arange(len(iframe_ts), dtype=int).tolist() |
| | remain = [i for i in all_idxs if i not in set(idxs)] |
| | if len(remain) > 0 and missing > 0: |
| | fill = np.linspace(0, len(remain) - 1, missing, dtype=int).tolist() |
| | idxs.extend([remain[i] for i in fill]) |
| | idxs = sorted(idxs)[:max_frames] |
| | timestamps = [iframe_ts[i] for i in idxs] |
| | frame_types = [0] * len(timestamps) |
| | else: |
| | |
| | timestamps = list(iframe_ts) |
| | frame_types = [0] * len(iframe_ts) |
| | remaining = max_frames - len(iframe_ts) |
| |
|
| | if len(iframe_ts) >= 2 and remaining > 0: |
| | left = np.array(iframe_ts[:-1], dtype=float) |
| | right = np.array(iframe_ts[1:], dtype=float) |
| |
|
| | widths = (right - left).astype(float) |
| | extra_ts: List[float] = [] |
| | if fps is None: |
| | |
| | alloc = _allocate_remaining_floor_ratio(widths, remaining) |
| | for a, b, k in zip(left.tolist(), right.tolist(), alloc): |
| | extra_ts.extend(_uniform_inside(float(a), float(b), int(k))) |
| | else: |
| | |
| | |
| | order = np.argsort(-widths).tolist() |
| | rem = int(remaining) |
| | for j in order: |
| | if rem <= 0: |
| | break |
| | a = float(left[j]) |
| | b = float(right[j]) |
| | cand = _sample_inside_fps(a, b, fps) |
| | if len(cand) == 0: |
| | continue |
| | if len(cand) > rem: |
| | cand = cand[:rem] |
| | extra_ts.extend(cand) |
| | rem -= len(cand) |
| |
|
| | |
| | if len(extra_ts) > 0: |
| | iframe_set = [float(x) for x in iframe_ts] |
| | def _far_from_iframes(t: float) -> bool: |
| | return all(abs(float(t) - it) > 1e-3 for it in iframe_set) |
| | extra_ts = [t for t in extra_ts if _far_from_iframes(t)] |
| |
|
| | timestamps.extend(extra_ts) |
| | frame_types.extend([1] * len(extra_ts)) |
| | elif remaining > 0: |
| | |
| | if end_time > start_time: |
| | it = float(iframe_ts[0]) |
| | if fps is None: |
| | extra_ts = np.linspace(start_time, end_time, remaining + 2, endpoint=True, dtype=float)[1:-1].tolist() |
| | else: |
| | extra_ts = _sample_inside_fps(float(start_time), float(end_time), fps) |
| | |
| | if len(extra_ts) > remaining and remaining > 0: |
| | idxs = np.linspace(0, len(extra_ts) - 1, remaining, dtype=int).tolist() |
| | idxs = list(dict.fromkeys(idxs)) |
| | extra_ts = [extra_ts[i] for i in idxs][:remaining] |
| | elif remaining <= 0: |
| | extra_ts = [] |
| |
|
| | |
| | extra_ts = [t for t in extra_ts if abs(float(t) - it) > 1e-3] |
| | |
| | while len(extra_ts) < remaining: |
| | extra_ts.append(min(end_time, max(start_time, it + 1e-3 * (len(extra_ts) + 1)))) |
| | timestamps.extend(extra_ts[:remaining]) |
| | frame_types.extend([1] * min(remaining, len(extra_ts))) |
| |
|
| | |
| | order = np.argsort(np.array(timestamps, dtype=float)).tolist() |
| | timestamps = [float(timestamps[i]) for i in order] |
| | frame_types = [int(frame_types[i]) for i in order] |
| |
|
| | |
| | |
| | if len(timestamps) == 0: |
| | return ([], [], []) if return_frame_types else ([], []) |
| |
|
| | try: |
| | from torchcodec.decoders import VideoDecoder |
| | except Exception as ex: |
| | raise ImportError( |
| | "torchcodec is required for video decoding in mm_utils.load_video. " |
| | "Please install torchcodec (https://github.com/pytorch/torchcodec)." |
| | ) from ex |
| |
|
| | |
| | |
| | |
| | |
| | if not os.path.exists(video_path): |
| | raise FileNotFoundError(f"Video file not found: {video_path}") |
| | data: torch.Tensor |
| | decoder = VideoDecoder(video_path, seek_mode="exact" if precise_time else "approximate") |
| | stream_end_time = decoder.metadata.end_stream_seconds |
| | stream_start_time = decoder.metadata.begin_stream_seconds |
| | |
| | if start_time != 0: |
| | t_req = [max(stream_start_time + 0.001, min(float(t), stream_end_time - 0.001)) for t in timestamps] |
| | else: |
| | t_req = [min(float(t), stream_end_time - 0.001) for t in timestamps] |
| | try: |
| | batch = decoder.get_frames_played_at(torch.tensor(t_req, dtype=torch.float32)) |
| | except Exception: |
| | batch = decoder.get_frames_played_at(t_req) |
| |
|
| | raw = getattr(batch, "data", None) |
| | if raw is None: |
| | raise RuntimeError("torchcodec FrameBatch missing `.data` attribute.") |
| | if not isinstance(raw, torch.Tensor): |
| | raise RuntimeError(f"torchcodec FrameBatch `.data` is not a torch.Tensor (got {type(raw)}).") |
| | data = _normalize_uint8_nchw(raw) |
| |
|
| | |
| | _, _, H, W = data.shape |
| | if int(new_h) != int(H) or int(new_w) != int(W): |
| | data_f = data.to(torch.float32) |
| | data_f = torch.nn.functional.interpolate( |
| | data_f, |
| | size=(int(new_h), int(new_w)), |
| | mode="bilinear", |
| | align_corners=False, |
| | ) |
| | data = data_f.round().clamp(0, 255).to(torch.uint8) |
| |
|
| | n_out = int(data.shape[0]) |
| | |
| | n_keep = min(n_out, len(t_req), len(frame_types)) |
| | data = data[:n_keep] |
| | timestamps = t_req[:n_keep] |
| | frame_types = frame_types[:n_keep] |
| |
|
| | frames: List[np.ndarray] = [data[i].numpy() for i in range(n_keep)] |
| |
|
| | |
| | if temporal_factor > 1 and len(frames) > 0: |
| | pad_length = (temporal_factor - (len(frames) % temporal_factor)) % temporal_factor |
| | if pad_length > 0: |
| | if len(timestamps) >= 2: |
| | dt = float(timestamps[-1] - timestamps[-2]) |
| | dt = dt if dt > 0 else 1e-3 |
| | else: |
| | dt = 1e-3 |
| | for _ in range(pad_length): |
| | frames.append(frames[-1].copy()) |
| | timestamps.append(float(timestamps[-1] + dt)) |
| | frame_types.append(int(frame_types[-1])) |
| |
|
| | return (frames, timestamps, frame_types) if return_frame_types else (frames, timestamps) |
| |
|
| | def _load_multimodal_data(self, conversation: Conversation): |
| | multimodal_info = defaultdict(list) |
| | new_conversation = [] |
| | for message in conversation: |
| | new_message = {"role": message["role"]} |
| | if not isinstance(message["content"], (list, tuple)): |
| | new_message["content"] = message["content"] |
| | new_conversation.append(new_message) |
| | continue |
| |
|
| | new_contents = [] |
| | for content in message["content"]: |
| | if not isinstance(content, dict): |
| | new_contents.append(content) |
| | continue |
| | assert "type" in content, "Content must have 'type' field." |
| | if content["type"] in ["image", "video"] and content["type"] in content and isinstance(content[content["type"]], dict): |
| | |
| | load_args = content[content["type"]] |
| | data_id = json.dumps({k: v for k, v in load_args.items() if not k in ["start_time", "end_time"]}) |
| | new_content = copy.deepcopy(content) |
| | multimodal_info[data_id].append(new_content) |
| | new_contents.append(new_content) |
| | else: |
| | new_contents.append(content) |
| |
|
| | new_message["content"] = new_contents |
| | new_conversation.append(new_message) |
| |
|
| | for data_id, contents in multimodal_info.items(): |
| | data_type = contents[0]["type"] |
| | if data_type == "image": |
| | image = self.load_images(contents[0][data_type]["image_path"])[0] |
| | for content in contents: |
| | content["image"] = [image.copy()] |
| |
|
| | elif data_type == "video": |
| | start_times = [content["video"].get("start_time", 0.) for content in contents] |
| | end_times = [content["video"].get("end_time", float("inf")) for content in contents] |
| |
|
| | load_args = contents[0][data_type] |
| | start_time, end_time = min(start_times), max(end_times) |
| | if start_time > 0: |
| | load_args["start_time"] = start_time |
| | if end_time < float("inf"): |
| | load_args["end_time"] = end_time |
| | images, timestamps, frame_types = self.load_video(**load_args) |
| |
|
| | for content, start_time, end_time in zip(contents, start_times, end_times): |
| | cur_images, cur_timestamps, cur_frame_types = [], [], [] |
| | for image, timestamp, frame_type in zip(images, timestamps, frame_types): |
| | if start_time <= timestamp <= end_time: |
| | cur_images.append(image.copy()) |
| | cur_timestamps.append(timestamp) |
| | cur_frame_types.append(frame_type) |
| |
|
| | content[data_type] = cur_images |
| | content["num_frames"] = len(cur_images) |
| | content["timestamps"] = cur_timestamps |
| | content["frame_types"] = cur_frame_types |
| |
|
| | return new_conversation |
| |
|
| | def _gather_multimodal_data(self, conversation: Conversation): |
| | images = [] |
| | clip_frame_types = [] |
| | for message in conversation: |
| | if not isinstance(message["content"], (list, tuple)): |
| | continue |
| | for content in message["content"]: |
| | if not isinstance(content, dict): |
| | continue |
| | if content["type"] == "video": |
| | video = content["video"] |
| | assert is_valid_video(video), f"Invalid video data: {video}." |
| | images.append(("video", video)) |
| | clip_frame_types.append(content.get("frame_types", None)) |
| | elif content["type"] == "image": |
| | image = content["image"] |
| | images.append(("image", image)) |
| | clip_frame_types.append(None) |
| | if len(images) == 0: |
| | return None, None |
| | return images, clip_frame_types |
| |
|
| | def _process_conversation_with_label( |
| | self, |
| | conversation: Conversation, |
| | image_inputs: Dict[str, Any], |
| | **kwargs, |
| | ): |
| | assert kwargs.pop("return_tensors", "pt") == "pt", "Only PyTorch tensors are supported when return_labels=True." |
| | assert not "add_generation_prompt" in kwargs, "'add_generation_prompt' argument is not supported when return_labels=True." |
| |
|
| | output_kwargs = self._merge_kwargs( |
| | PenguinVLQwen3ProcessorKwargs, |
| | tokenizer_init_kwargs=self.tokenizer.init_kwargs, |
| | **kwargs, |
| | ) |
| | output_kwargs["chat_template_kwargs"].pop("add_generation_prompt") |
| |
|
| | grid_sizes = self._get_downsampled_grid_sizes(image_inputs) |
| | text_inputs = {"input_ids": [], "labels": []} |
| | sample_types_list = [] |
| | image_idx = 0 |
| |
|
| | for message_idx, message in enumerate(conversation): |
| | prompt = self.apply_chat_template( |
| | [message], |
| | tokenize=False, |
| | add_generation_prompt=False, |
| | **output_kwargs["chat_template_kwargs"], |
| | ) |
| | prompt_chunks = prompt.split(DEFAULT_IMAGE_TOKEN) |
| | prompt = [] |
| | for chunk_idx in range(len(prompt_chunks) - 1): |
| | prompt.append(prompt_chunks[chunk_idx]) |
| | num_tokens = self._get_visual_seq_len(grid_sizes[image_idx]) |
| | prompt.append(DEFAULT_IMAGE_TOKEN * num_tokens) |
| | image_idx += 1 |
| | prompt.append(prompt_chunks[-1]) |
| | prompt = "".join(prompt) |
| |
|
| | |
| | input_ids = self.tokenizer.encode(prompt, return_tensors="pt", **output_kwargs["text_kwargs"])[0] |
| | text_inputs["input_ids"].append(input_ids) |
| |
|
| | targets = torch.full_like(input_ids, IGNORE_INDEX) |
| | sample_types = torch.full_like(input_ids, IGNORE_INDEX) |
| | if message["role"] == "assistant": |
| | targets[self.generation_prompt_length:-1] = input_ids[self.generation_prompt_length:-1].clone() |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| |
|
| | if message_idx > 0 and conversation[message_idx - 1]["role"] == "stream": |
| | targets[0] = input_ids[0] |
| | |
| | sample_types[0] = input_ids[0] |
| |
|
| | text_inputs["labels"].append(targets) |
| | sample_types_list.append(sample_types) |
| |
|
| | |
| | text_inputs = {k: torch.cat(v) for k, v in text_inputs.items()} |
| | sample_types = torch.cat(sample_types_list) |
| | types, counts = torch.unique(sample_types[sample_types > -1], return_counts=True) |
| |
|
| | if len(types) > 0: |
| | target_num_samples = counts.amin() |
| | for type_id, type_count in zip(types, counts): |
| | if type_count > target_num_samples: |
| | indices = torch.nonzero(sample_types == type_id)[:, 0] |
| | random_selector = torch.randperm(indices.size(0))[:-target_num_samples] |
| | text_inputs["labels"][indices[random_selector]] = IGNORE_INDEX |
| | |
| |
|
| | assert len(grid_sizes) == image_idx, "Number of images does not match the number of image tokens in the text." |
| |
|
| | return text_inputs |
| |
|
| | def _process_conversation_without_label( |
| | self, |
| | conversation: Conversation, |
| | image_inputs: Dict[str, Any], |
| | **kwargs, |
| | ): |
| | output_kwargs = self._merge_kwargs( |
| | PenguinVLQwen3ProcessorKwargs, |
| | tokenizer_init_kwargs=self.tokenizer.init_kwargs, |
| | **kwargs, |
| | ) |
| | prompt = self.apply_chat_template( |
| | conversation, |
| | tokenize=False, |
| | **output_kwargs["chat_template_kwargs"], |
| | ) |
| | return self.process_text(prompt, image_inputs, **output_kwargs["text_kwargs"]) |
| |
|
| | def _process_conversation( |
| | self, |
| | conversation: Conversation, |
| | images: Optional[Union[BatchedImage, BatchedNamedImage]] = None, |
| | return_labels: bool = False, |
| | **kwargs: Unpack[PenguinVLQwen3ProcessorKwargs], |
| | ) -> BatchFeature: |
| | assert isinstance(conversation, list), "Conversation must be a list of messages." |
| |
|
| | frame_types = None |
| | if images is None: |
| | conversation = self._load_multimodal_data(conversation) |
| | images, frame_types = self._gather_multimodal_data(conversation) |
| |
|
| | output_kwargs = self._merge_kwargs( |
| | PenguinVLQwen3ProcessorKwargs, |
| | tokenizer_init_kwargs=self.tokenizer.init_kwargs, |
| | **kwargs, |
| | ) |
| |
|
| | if images is not None: |
| | image_kwargs = output_kwargs["images_kwargs"] |
| | if frame_types is not None: |
| | image_kwargs["frame_types"] = frame_types |
| | image_inputs = self.process_images(images, **image_kwargs) |
| | else: |
| | image_inputs = {} |
| |
|
| | if return_labels: |
| | text_inputs = self._process_conversation_with_label(conversation, image_inputs, **kwargs) |
| | else: |
| | text_inputs = self._process_conversation_without_label(conversation, image_inputs, **kwargs) |
| |
|
| | return BatchFeature(data={**text_inputs, **image_inputs}) |
| |
|
| | def _process_plain( |
| | self, |
| | text: Union[TextInput, PreTokenizedInput] = None, |
| | images: Optional[Union[BatchedImage, BatchedNamedImage]] = None, |
| | return_labels: bool = False, |
| | **kwargs: Unpack[PenguinVLQwen3ProcessorKwargs], |
| | ) -> BatchFeature: |
| | if text is None: |
| | raise ValueError("You must provide 'text' or 'message'.") |
| | if return_labels: |
| | raise ValueError("return_labels is not supported for plain text processing.") |
| |
|
| | output_kwargs = self._merge_kwargs( |
| | PenguinVLQwen3ProcessorKwargs, |
| | tokenizer_init_kwargs=self.tokenizer.init_kwargs, |
| | **kwargs, |
| | ) |
| |
|
| | if images is not None: |
| | image_inputs = self.process_images(images, **output_kwargs["images_kwargs"]) |
| | else: |
| | image_inputs = {} |
| |
|
| | text_inputs = self.process_text(text, image_inputs, **output_kwargs["text_kwargs"]) |
| |
|
| | return BatchFeature(data={**text_inputs, **image_inputs}) |
| |
|
| | def process_images(self, images: Union[BatchedImage, BatchedNamedImage], **kwargs): |
| | modals, images = make_batched_images(images) |
| | if not "merge_size" in kwargs: |
| | kwargs["merge_size"] = [ |
| | self.image_merge_size if modal == "image" else self.video_merge_size |
| | for modal in modals |
| | ] |
| | image_inputs = self.image_processor(images=images, **kwargs) |
| | expanded_modals = [] |
| | for modal, img in zip(modals, images): |
| | num_frames = len(img) if is_valid_video(img) else 1 |
| | expanded_modals.extend([modal] * num_frames) |
| | image_inputs["modals"] = expanded_modals |
| | return image_inputs |
| |
|
| | def process_text( |
| | self, |
| | text: TextInput, |
| | image_inputs: Dict[str, Any], |
| | **kwargs, |
| | ): |
| | grid_sizes = self._get_downsampled_grid_sizes(image_inputs) |
| |
|
| | kwargs.pop("padding") |
| | kwargs.pop("padding_side") |
| |
|
| | image_idx = 0 |
| | while DEFAULT_IMAGE_TOKEN in text: |
| | num_tokens = self._get_visual_seq_len(grid_sizes[image_idx]) |
| | text = text.replace(DEFAULT_IMAGE_TOKEN, "<placeholder>" * num_tokens, 1) |
| | image_idx += 1 |
| | text = text.replace("<placeholder>", DEFAULT_IMAGE_TOKEN) |
| | |
| | assert len(grid_sizes) == image_idx, "Number of images does not match the number of image tokens in the text." |
| |
|
| | text_inputs = self.tokenizer(text, **kwargs) |
| | return text_inputs |
| |
|
| | def __call__( |
| | self, |
| | text: Optional[TextInput] = None, |
| | conversation: Optional[Conversation] = None, |
| | images: Optional[Union[BatchedImage, BatchedNamedImage]] = None, |
| | return_labels: bool = False, |
| | **kwargs: Unpack[PenguinVLQwen3ProcessorKwargs], |
| | ) -> BatchFeature: |
| | if conversation is not None: |
| | if text is not None: |
| | raise ValueError("You cannot provide 'message' with 'text'.") |
| | return self._process_conversation(conversation, images, return_labels, **kwargs) |
| | return self._process_plain(text, images, return_labels, **kwargs) |
| |
|
| | def batch_decode(self, *args, **kwargs): |
| | return self.tokenizer.batch_decode(*args, **kwargs) |
| |
|
| | def decode(self, *args, **kwargs): |
| | return self.tokenizer.decode(*args, **kwargs) |
| |
|
| | def apply_chat_template( |
| | self, |
| | conversation: Conversation, |
| | chat_template: Optional[str] = None, |
| | tokenize: bool = False, |
| | add_system_prompt: bool = False, |
| | add_generation_prompt: bool = False, |
| | add_think_prompt: bool = False, |
| | image_token: Optional[str] = DEFAULT_IMAGE_TOKEN, |
| | **kwargs, |
| | ) -> str: |
| | """ |
| | Similar to the `apply_chat_template` method on tokenizers, this method applies a Jinja template to input |
| | conversations to turn them into a single tokenizable string. |
| | Args: |
| | conversation (`List[Dict, str, str]`): |
| | The conversation to format. |
| | chat_template (`Optional[str]`, *optional*): |
| | The Jinja template to use for formatting the conversation. If not provided, the tokenizer's |
| | chat template is used. |
| | tokenize (`bool`, *optional*, defaults to `False`): |
| | Whether to tokenize the output or not. |
| | add_system_prompt (`bool`, *optional*, defaults to `False`): |
| | Whether to add the system prompt to the output or not. |
| | add_generation_prompt (`bool`, *optional*, defaults to `False`): |
| | Whether to add the generation prompt to the output or not. |
| | image_token (`Optional[str]`, *optional*, defaults to `<image>`): |
| | The token to use for indicating images in the conversation. |
| | **kwargs: |
| | Additional keyword arguments |
| | """ |
| |
|
| | if chat_template is None: |
| | if self.chat_template is not None: |
| | chat_template = self.chat_template |
| | else: |
| | raise ValueError( |
| | "No chat template is set for this processor. Please either set the `chat_template` attribute, " |
| | "or provide a chat template as an argument. See " |
| | "https://huggingface.co/docs/transformers/main/en/chat_templating for more information." |
| | ) |
| | return self.tokenizer.apply_chat_template( |
| | conversation, |
| | chat_template=chat_template, |
| | tokenize=tokenize, |
| | add_system_prompt=add_system_prompt, |
| | add_generation_prompt=add_generation_prompt, |
| | add_think_prompt=add_think_prompt, |
| | image_token=image_token, |
| | **kwargs |
| | ) |
| |
|
| | @property |
| | def model_input_names(self): |
| | tokenizer_input_names = self.tokenizer.model_input_names |
| | image_processor_input_names = self.image_processor.model_input_names |
| | return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names)) + ["modals"] |
| |
|
| | |
| | def _merge_kwargs( |
| | self, |
| | ModelProcessorKwargs: ProcessingKwargs, |
| | tokenizer_init_kwargs: Optional[Dict] = None, |
| | **kwargs, |
| | ) -> Dict[str, Dict]: |
| | """ |
| | Method to merge dictionaries of kwargs cleanly separated by modality within a Processor instance. |
| | The order of operations is as follows: |
| | 1) kwargs passed as before have highest priority to preserve BC. |
| | ```python |
| | high_priority_kwargs = {"crop_size" = {"height": 222, "width": 222}, "padding" = "max_length"} |
| | processor(..., **high_priority_kwargs) |
| | ``` |
| | 2) kwargs passed as modality-specific kwargs have second priority. This is the recommended API. |
| | ```python |
| | processor(..., text_kwargs={"padding": "max_length"}, images_kwargs={"crop_size": {"height": 222, "width": 222}}}) |
| | ``` |
| | 3) kwargs passed during instantiation of a modality processor have fourth priority. |
| | ```python |
| | tokenizer = tokenizer_class(..., {"padding": "max_length"}) |
| | image_processor = image_processor_class(...) |
| | processor(tokenizer, image_processor) # will pass max_length unless overriden by kwargs at call |
| | ``` |
| | 4) defaults kwargs specified at processor level have lowest priority. |
| | ```python |
| | class MyProcessingKwargs(ProcessingKwargs, CommonKwargs, TextKwargs, ImagesKwargs, total=False): |
| | _defaults = { |
| | "text_kwargs": { |
| | "padding": "max_length", |
| | "max_length": 64, |
| | }, |
| | } |
| | ``` |
| | Args: |
| | ModelProcessorKwargs (`ProcessingKwargs`): |
| | Typed dictionary of kwargs specifically required by the model passed. |
| | tokenizer_init_kwargs (`Dict`, *optional*): |
| | Dictionary of kwargs the tokenizer was instantiated with and need to take precedence over defaults. |
| | Returns: |
| | output_kwargs (`Dict`): |
| | Dictionary of per-modality kwargs to be passed to each modality-specific processor. |
| | """ |
| | |
| | output_kwargs = { |
| | "text_kwargs": {}, |
| | "images_kwargs": {}, |
| | "audio_kwargs": {}, |
| | "videos_kwargs": {}, |
| | "chat_template_kwargs": {}, |
| | "common_kwargs": {}, |
| | } |
| |
|
| | default_kwargs = { |
| | "text_kwargs": {}, |
| | "images_kwargs": {}, |
| | "audio_kwargs": {}, |
| | "videos_kwargs": {}, |
| | "chat_template_kwargs": {}, |
| | "common_kwargs": {}, |
| | } |
| |
|
| | used_keys = set() |
| |
|
| | |
| | for modality in default_kwargs: |
| | default_kwargs[modality] = ModelProcessorKwargs._defaults.get(modality, {}).copy() |
| | |
| | for modality_key in ModelProcessorKwargs.__annotations__[modality].__annotations__.keys(): |
| | |
| | if modality_key in tokenizer_init_kwargs: |
| | value = ( |
| | getattr(self.tokenizer, modality_key) |
| | if hasattr(self.tokenizer, modality_key) |
| | else tokenizer_init_kwargs[modality_key] |
| | ) |
| | default_kwargs[modality][modality_key] = value |
| | |
| | |
| | output_kwargs.update(default_kwargs) |
| |
|
| | |
| | non_modality_kwargs = set(kwargs) - set(output_kwargs) |
| | for modality in output_kwargs: |
| | for modality_key in ModelProcessorKwargs.__annotations__[modality].__annotations__.keys(): |
| | |
| | if modality in kwargs: |
| | kwarg_value = kwargs[modality].pop(modality_key, "__empty__") |
| | |
| | if kwarg_value != "__empty__" and modality_key in non_modality_kwargs: |
| | raise ValueError( |
| | f"Keyword argument {modality_key} was passed two times:\n" |
| | f"in a dictionary for {modality} and as a **kwarg." |
| | ) |
| | elif modality_key in kwargs: |
| | |
| | |
| | kwarg_value = kwargs.get(modality_key, "__empty__") |
| | else: |
| | kwarg_value = "__empty__" |
| | if kwarg_value != "__empty__": |
| | output_kwargs[modality][modality_key] = kwarg_value |
| | used_keys.add(modality_key) |
| |
|
| | |
| | if any(key in default_kwargs for key in kwargs): |
| | |
| | for modality, subdict in kwargs.items(): |
| | if modality in default_kwargs: |
| | for subkey, subvalue in subdict.items(): |
| | if subkey not in used_keys: |
| | output_kwargs[modality][subkey] = subvalue |
| | used_keys.add(subkey) |
| | else: |
| | |
| | for key in kwargs: |
| | if key not in used_keys: |
| | output_kwargs["common_kwargs"][key] = kwargs[key] |
| |
|
| | |
| | for modality in output_kwargs: |
| | output_kwargs[modality].update(output_kwargs["common_kwargs"]) |
| | return output_kwargs |