|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
from typing import Dict, List, Optional, Union |
|
|
|
|
|
import numpy as np |
|
|
import PIL.Image |
|
|
import torch |
|
|
from transformers import ProcessorMixin |
|
|
from transformers.image_utils import PILImageResampling, is_vision_available |
|
|
|
|
|
from .image_processor import Spec2ImageProcessor |
|
|
from .tokenizer import Spec2Tokenizer |
|
|
|
|
|
|
|
|
if is_vision_available(): |
|
|
from PIL import Image |
|
|
|
|
|
|
|
|
class Spec2Processor(ProcessorMixin): |
|
|
""" |
|
|
Constructs a Spec2 processor which combines a Spec2 image processor and a Spec2 tokenizer into a single processor. |
|
|
|
|
|
The processor can be used to prepare inputs for the model by processing text and images appropriately. |
|
|
|
|
|
Args: |
|
|
image_processor (`Spec2ImageProcessor`): |
|
|
An instance of `Spec2ImageProcessor`. |
|
|
tokenizer (`Spec2Tokenizer`): |
|
|
An instance of `Spec2Tokenizer`. |
|
|
""" |
|
|
|
|
|
attributes = ["image_processor", "tokenizer"] |
|
|
image_processor_class = "Spec2ImageProcessor" |
|
|
tokenizer_class = "Spec2Tokenizer" |
|
|
|
|
|
def __init__(self, image_processor, tokenizer): |
|
|
if not is_vision_available(): |
|
|
raise ImportError("Vision libraries are not available. Make sure to install PIL and Pillow: pip install Pillow") |
|
|
|
|
|
self.image_processor = image_processor |
|
|
self.tokenizer = tokenizer |
|
|
self.image_token = self.tokenizer.image_token |
|
|
self.image_token_id = self.tokenizer.image_token_id |
|
|
|
|
|
def __call__( |
|
|
self, |
|
|
text=None, |
|
|
images=None, |
|
|
return_tensors=None, |
|
|
**kwargs |
|
|
): |
|
|
""" |
|
|
Process text and images for the model. |
|
|
|
|
|
Args: |
|
|
text (`str`, `List[str]`, `List[List[str]]`): |
|
|
The text to be processed. Can be a string, a list of strings or a list of lists of strings. |
|
|
images (`PIL.Image.Image`, `List[PIL.Image.Image]`, `torch.Tensor`, `List[torch.Tensor]`): |
|
|
The images to be processed. Can be a PIL Image, a list of PIL Images, a tensor or a list of tensors. |
|
|
return_tensors (`str`, optional): |
|
|
The type of tensors to return. Can be one of: 'pt' (PyTorch), 'tf' (TensorFlow), 'np' (NumPy). |
|
|
|
|
|
Returns: |
|
|
A dictionary containing the processed inputs with keys like 'input_ids', 'attention_mask', 'pixel_values', etc. |
|
|
""" |
|
|
encoding = {} |
|
|
|
|
|
|
|
|
if text is not None: |
|
|
text_inputs = self._process_text(text, **kwargs) |
|
|
encoding.update(text_inputs) |
|
|
|
|
|
|
|
|
if images is not None: |
|
|
image_features = self._process_images(images, **kwargs) |
|
|
encoding.update(image_features) |
|
|
|
|
|
|
|
|
if text is not None and images is not None: |
|
|
encoding = self._merge_text_and_image_features(encoding, **kwargs) |
|
|
|
|
|
|
|
|
if return_tensors is not None: |
|
|
encoding = self._convert_to_tensors(encoding, return_tensors=return_tensors) |
|
|
|
|
|
return encoding |
|
|
|
|
|
def _process_text(self, text, **kwargs): |
|
|
"""Process text inputs.""" |
|
|
if isinstance(text, str): |
|
|
|
|
|
if self.image_token not in text: |
|
|
|
|
|
text = f"{text} {self.image_token}" |
|
|
elif isinstance(text, list): |
|
|
|
|
|
if all(isinstance(t, str) for t in text): |
|
|
text = [f"{t} {self.image_token}" if self.image_token not in t else t for t in text] |
|
|
|
|
|
|
|
|
text_encoding = self.tokenizer(text, return_tensors=None, **kwargs) |
|
|
return text_encoding |
|
|
|
|
|
def _process_images(self, images, **kwargs): |
|
|
"""Process image inputs.""" |
|
|
|
|
|
if not isinstance(images, list): |
|
|
images = [images] |
|
|
|
|
|
|
|
|
image_features = self.image_processor(images, return_tensors=None, **kwargs) |
|
|
return image_features |
|
|
|
|
|
def _merge_text_and_image_features(self, encoding, **kwargs): |
|
|
"""Merge text and image features for multimodal inputs.""" |
|
|
|
|
|
|
|
|
|
|
|
input_ids = encoding.get("input_ids", []) |
|
|
pixel_values = encoding.get("pixel_values", []) |
|
|
|
|
|
if isinstance(input_ids[0], list): |
|
|
batch_size = len(input_ids) |
|
|
merged_encoding = { |
|
|
"input_ids": input_ids, |
|
|
"pixel_values": pixel_values, |
|
|
"image_token_indices": [] |
|
|
} |
|
|
|
|
|
|
|
|
for i, ids in enumerate(input_ids): |
|
|
image_token_indices = [j for j, id_val in enumerate(ids) if id_val == self.image_token_id] |
|
|
if image_token_indices: |
|
|
merged_encoding["image_token_indices"].append(image_token_indices[0]) |
|
|
else: |
|
|
|
|
|
input_ids[i].append(self.image_token_id) |
|
|
merged_encoding["image_token_indices"].append(len(input_ids[i]) - 1) |
|
|
|
|
|
|
|
|
if "attention_mask" in encoding: |
|
|
merged_encoding["attention_mask"] = encoding["attention_mask"] |
|
|
|
|
|
else: |
|
|
image_token_indices = [i for i, id_val in enumerate(input_ids) if id_val == self.image_token_id] |
|
|
if image_token_indices: |
|
|
image_token_index = image_token_indices[0] |
|
|
else: |
|
|
|
|
|
input_ids.append(self.image_token_id) |
|
|
image_token_index = len(input_ids) - 1 |
|
|
|
|
|
merged_encoding = { |
|
|
"input_ids": input_ids, |
|
|
"pixel_values": pixel_values[0] if pixel_values else None, |
|
|
"image_token_index": image_token_index |
|
|
} |
|
|
|
|
|
|
|
|
if "attention_mask" in encoding: |
|
|
merged_encoding["attention_mask"] = encoding["attention_mask"] |
|
|
|
|
|
return merged_encoding |
|
|
|
|
|
def _convert_to_tensors(self, encoding, return_tensors="pt"): |
|
|
"""Convert processed features to tensors.""" |
|
|
|
|
|
for key, value in encoding.items(): |
|
|
if key in ["pixel_values", "input_ids", "attention_mask"]: |
|
|
if return_tensors == "pt": |
|
|
if isinstance(value, list) and all(isinstance(v, list) for v in value): |
|
|
|
|
|
encoding[key] = torch.tensor(value) |
|
|
elif isinstance(value, list): |
|
|
|
|
|
encoding[key] = torch.tensor([value]) |
|
|
elif isinstance(value, np.ndarray): |
|
|
encoding[key] = torch.tensor(value) |
|
|
elif return_tensors == "np": |
|
|
if isinstance(value, list): |
|
|
encoding[key] = np.array(value) |
|
|
elif isinstance(value, torch.Tensor): |
|
|
encoding[key] = value.numpy() |
|
|
|
|
|
|
|
|
return encoding |
|
|
|
|
|
@property |
|
|
def model_input_names(self): |
|
|
tokenizer_input_names = self.tokenizer.model_input_names |
|
|
image_processor_input_names = self.image_processor.model_input_names |
|
|
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names)) |