Spec-2 / spec2_processor.py
SVECTOR-OFFICIAL's picture
Upload 13 files
33c60bf verified
# Copyright 2025 SVECTOR AI and The Spec-2 Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, List, Optional, Union
import numpy as np
import PIL.Image
import torch
from transformers import ProcessorMixin
from transformers.image_utils import PILImageResampling, is_vision_available
from .image_processor import Spec2ImageProcessor
from .tokenizer import Spec2Tokenizer
if is_vision_available():
from PIL import Image
class Spec2Processor(ProcessorMixin):
"""
Constructs a Spec2 processor which combines a Spec2 image processor and a Spec2 tokenizer into a single processor.
The processor can be used to prepare inputs for the model by processing text and images appropriately.
Args:
image_processor (`Spec2ImageProcessor`):
An instance of `Spec2ImageProcessor`.
tokenizer (`Spec2Tokenizer`):
An instance of `Spec2Tokenizer`.
"""
attributes = ["image_processor", "tokenizer"]
image_processor_class = "Spec2ImageProcessor"
tokenizer_class = "Spec2Tokenizer"
def __init__(self, image_processor, tokenizer):
if not is_vision_available():
raise ImportError("Vision libraries are not available. Make sure to install PIL and Pillow: pip install Pillow")
self.image_processor = image_processor
self.tokenizer = tokenizer
self.image_token = self.tokenizer.image_token
self.image_token_id = self.tokenizer.image_token_id
def __call__(
self,
text=None,
images=None,
return_tensors=None,
**kwargs
):
"""
Process text and images for the model.
Args:
text (`str`, `List[str]`, `List[List[str]]`):
The text to be processed. Can be a string, a list of strings or a list of lists of strings.
images (`PIL.Image.Image`, `List[PIL.Image.Image]`, `torch.Tensor`, `List[torch.Tensor]`):
The images to be processed. Can be a PIL Image, a list of PIL Images, a tensor or a list of tensors.
return_tensors (`str`, optional):
The type of tensors to return. Can be one of: 'pt' (PyTorch), 'tf' (TensorFlow), 'np' (NumPy).
Returns:
A dictionary containing the processed inputs with keys like 'input_ids', 'attention_mask', 'pixel_values', etc.
"""
encoding = {}
# Process text inputs
if text is not None:
text_inputs = self._process_text(text, **kwargs)
encoding.update(text_inputs)
# Process image inputs
if images is not None:
image_features = self._process_images(images, **kwargs)
encoding.update(image_features)
# Handle multimodal case - if we have both text and images
if text is not None and images is not None:
encoding = self._merge_text_and_image_features(encoding, **kwargs)
# Convert to tensors if requested
if return_tensors is not None:
encoding = self._convert_to_tensors(encoding, return_tensors=return_tensors)
return encoding
def _process_text(self, text, **kwargs):
"""Process text inputs."""
if isinstance(text, str):
# Check if text already contains image token
if self.image_token not in text:
# For single text with images, we add the image token at the end
text = f"{text} {self.image_token}"
elif isinstance(text, list):
# For a list of texts, add image token if not already present
if all(isinstance(t, str) for t in text):
text = [f"{t} {self.image_token}" if self.image_token not in t else t for t in text]
# Tokenize text
text_encoding = self.tokenizer(text, return_tensors=None, **kwargs)
return text_encoding
def _process_images(self, images, **kwargs):
"""Process image inputs."""
# Convert single image to list
if not isinstance(images, list):
images = [images]
# Process images with image processor
image_features = self.image_processor(images, return_tensors=None, **kwargs)
return image_features
def _merge_text_and_image_features(self, encoding, **kwargs):
"""Merge text and image features for multimodal inputs."""
# This function handles the specific logic for merging text tokens with image embeddings
# For Spec-2, we maintain the tokens order and ensure image token is properly placed
input_ids = encoding.get("input_ids", [])
pixel_values = encoding.get("pixel_values", [])
if isinstance(input_ids[0], list): # batch case
batch_size = len(input_ids)
merged_encoding = {
"input_ids": input_ids,
"pixel_values": pixel_values,
"image_token_indices": []
}
# For each item in the batch, find the position of the image token
for i, ids in enumerate(input_ids):
image_token_indices = [j for j, id_val in enumerate(ids) if id_val == self.image_token_id]
if image_token_indices:
merged_encoding["image_token_indices"].append(image_token_indices[0])
else:
# If no image token found, append it at the end
input_ids[i].append(self.image_token_id)
merged_encoding["image_token_indices"].append(len(input_ids[i]) - 1)
# Update attention masks if present
if "attention_mask" in encoding:
merged_encoding["attention_mask"] = encoding["attention_mask"]
else: # single item case
image_token_indices = [i for i, id_val in enumerate(input_ids) if id_val == self.image_token_id]
if image_token_indices:
image_token_index = image_token_indices[0]
else:
# If no image token found, append it at the end
input_ids.append(self.image_token_id)
image_token_index = len(input_ids) - 1
merged_encoding = {
"input_ids": input_ids,
"pixel_values": pixel_values[0] if pixel_values else None,
"image_token_index": image_token_index
}
# Update attention mask if present
if "attention_mask" in encoding:
merged_encoding["attention_mask"] = encoding["attention_mask"]
return merged_encoding
def _convert_to_tensors(self, encoding, return_tensors="pt"):
"""Convert processed features to tensors."""
# Convert all features to tensors of the requested type
for key, value in encoding.items():
if key in ["pixel_values", "input_ids", "attention_mask"]:
if return_tensors == "pt":
if isinstance(value, list) and all(isinstance(v, list) for v in value):
# For batched inputs
encoding[key] = torch.tensor(value)
elif isinstance(value, list):
# For single inputs
encoding[key] = torch.tensor([value])
elif isinstance(value, np.ndarray):
encoding[key] = torch.tensor(value)
elif return_tensors == "np":
if isinstance(value, list):
encoding[key] = np.array(value)
elif isinstance(value, torch.Tensor):
encoding[key] = value.numpy()
# Add other tensor types (tf, etc.) as needed
return encoding
@property
def model_input_names(self):
tokenizer_input_names = self.tokenizer.model_input_names
image_processor_input_names = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))