Adaptation to HF Multimodal Processor
#1
by
mlinmg
- opened
- README.md +6 -12
- chat_template.json +3 -0
- config.json +2 -1
- modeling_ovis.py +0 -69
- processing_ovis.py +355 -0
- tokenizer_config.json +1 -1
README.md
CHANGED
|
@@ -159,20 +159,14 @@ pixel_values = [pixel_values]
|
|
| 159 |
|
| 160 |
# generate output
|
| 161 |
with torch.inference_mode():
|
| 162 |
-
|
| 163 |
-
|
| 164 |
-
|
| 165 |
-
|
| 166 |
-
|
| 167 |
-
temperature=None,
|
| 168 |
-
repetition_penalty=None,
|
| 169 |
-
eos_token_id=model.generation_config.eos_token_id,
|
| 170 |
-
pad_token_id=text_tokenizer.pad_token_id,
|
| 171 |
-
use_cache=True
|
| 172 |
-
)
|
| 173 |
-
output_ids = model.generate(input_ids, pixel_values=pixel_values, attention_mask=attention_mask, **gen_kwargs)[0]
|
| 174 |
output = text_tokenizer.decode(output_ids, skip_special_tokens=True)
|
| 175 |
print(f'Output:\n{output}')
|
|
|
|
| 176 |
```
|
| 177 |
|
| 178 |
<details>
|
|
|
|
| 159 |
|
| 160 |
# generate output
|
| 161 |
with torch.inference_mode():
|
| 162 |
+
if inputs['pixel_values'] is not None:
|
| 163 |
+
inputs['pixel_values'] = [pix.to(model.dtype).to(model.device) for pix in inputs['pixel_values']]
|
| 164 |
+
inputs = inputs.to('cuda')
|
| 165 |
+
|
| 166 |
+
output_ids = model.generate(inputs =inputs.pop('input_ids'), **inputs)[0]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 167 |
output = text_tokenizer.decode(output_ids, skip_special_tokens=True)
|
| 168 |
print(f'Output:\n{output}')
|
| 169 |
+
|
| 170 |
```
|
| 171 |
|
| 172 |
<details>
|
chat_template.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"chat_template": "{% for message in messages %}{% if loop.first and message['role'] != 'system' %}<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n{% endif %}<|im_start|>{{ message['role'] }}\n{% if message['content'] is string %}{{ message['content'] }}{% else %}{% for content in message['content'] %}{% if content['type'] == 'image' or 'image' in content or 'image_url' in content %}<image>\n{% elif 'text' in content %}{{ content['text'] }}{% endif %}{% endfor %}{% endif %}<|im_end|>\n{% endfor %}{% if add_generation_prompt %}<|im_start|>assistant\n{% endif %}"
|
| 3 |
+
}
|
config.json
CHANGED
|
@@ -4,7 +4,8 @@
|
|
| 4 |
],
|
| 5 |
"auto_map": {
|
| 6 |
"AutoConfig": "configuration_ovis.OvisConfig",
|
| 7 |
-
"AutoModelForCausalLM": "modeling_ovis.Ovis"
|
|
|
|
| 8 |
},
|
| 9 |
"conversation_formatter_class": "QwenConversationFormatter",
|
| 10 |
"disable_tie_weight": false,
|
|
|
|
| 4 |
],
|
| 5 |
"auto_map": {
|
| 6 |
"AutoConfig": "configuration_ovis.OvisConfig",
|
| 7 |
+
"AutoModelForCausalLM": "modeling_ovis.Ovis",
|
| 8 |
+
"AutoProcessor": "processing_ovis.OvisProcessor"
|
| 9 |
},
|
| 10 |
"conversation_formatter_class": "QwenConversationFormatter",
|
| 11 |
"disable_tie_weight": false,
|
modeling_ovis.py
CHANGED
|
@@ -480,75 +480,6 @@ class Ovis(OvisPreTrainedModel):
|
|
| 480 |
pad_sequence = torch.nn.utils.rnn.pad_sequence([i.flip(dims=[0]) for i in sequences],batch_first=True, padding_value=padding_value).flip(dims=[1])
|
| 481 |
return pad_sequence[:,-self.config.multimodal_max_length:]
|
| 482 |
|
| 483 |
-
def preprocess_inputs(
|
| 484 |
-
self,
|
| 485 |
-
text_or_conversations: Union[List[Dict], str],
|
| 486 |
-
images: Optional[List[PIL.Image.Image]],
|
| 487 |
-
max_partition=9,
|
| 488 |
-
generation_preface='',
|
| 489 |
-
return_labels=False,
|
| 490 |
-
propagate_exception=True,
|
| 491 |
-
frame_selector=None,
|
| 492 |
-
frame_selector_kwargs=None
|
| 493 |
-
):
|
| 494 |
-
# convert text to conversations
|
| 495 |
-
if isinstance(text_or_conversations, str):
|
| 496 |
-
conversations = [{
|
| 497 |
-
"from": "human",
|
| 498 |
-
"value": text_or_conversations
|
| 499 |
-
}]
|
| 500 |
-
elif isinstance(text_or_conversations, list):
|
| 501 |
-
conversations = text_or_conversations
|
| 502 |
-
else:
|
| 503 |
-
raise ValueError(f'Invalid type of `text_or_conversations`, expected `List[Dict]` or `str`,'
|
| 504 |
-
f' but got {type(text_or_conversations)}')
|
| 505 |
-
|
| 506 |
-
if frame_selector is not None:
|
| 507 |
-
frame_selector_kwargs = frame_selector_kwargs or {}
|
| 508 |
-
conversations, images = frame_selector(conversations=conversations, frames=images, **frame_selector_kwargs)
|
| 509 |
-
|
| 510 |
-
# format conversations
|
| 511 |
-
prompt, raw_input_ids, raw_labels = self.get_conversation_formatter().format(
|
| 512 |
-
conversations, generation_preface=generation_preface)
|
| 513 |
-
|
| 514 |
-
# place image placeholders
|
| 515 |
-
input_ids = []
|
| 516 |
-
labels = []
|
| 517 |
-
pixel_values = []
|
| 518 |
-
invalidate_label = False
|
| 519 |
-
image_token_indices = [i for i, v in enumerate(raw_input_ids) if v == IMAGE_TOKEN_ID]
|
| 520 |
-
last_image_token_index = -1
|
| 521 |
-
for i in range(len(image_token_indices)):
|
| 522 |
-
head = 0 if i == 0 else image_token_indices[i - 1] + 1
|
| 523 |
-
tail = image_token_indices[i]
|
| 524 |
-
last_image_token_index = tail
|
| 525 |
-
input_ids.extend(raw_input_ids[head:tail])
|
| 526 |
-
labels.extend(raw_labels[head:tail])
|
| 527 |
-
try:
|
| 528 |
-
image = images[i]
|
| 529 |
-
raw_pixel_values, image_placeholders = self.visual_tokenizer.preprocess_image(
|
| 530 |
-
image, max_partition=max_partition)
|
| 531 |
-
except Exception as e:
|
| 532 |
-
if propagate_exception:
|
| 533 |
-
raise e
|
| 534 |
-
logging.exception(e)
|
| 535 |
-
invalidate_label = True
|
| 536 |
-
raw_pixel_values, image_placeholders = self.visual_tokenizer.mock_input()
|
| 537 |
-
input_ids.extend(image_placeholders)
|
| 538 |
-
labels.extend([IGNORE_ID] * len(image_placeholders))
|
| 539 |
-
pixel_values.append(raw_pixel_values)
|
| 540 |
-
input_ids.extend(raw_input_ids[last_image_token_index + 1:])
|
| 541 |
-
labels.extend(raw_labels[last_image_token_index + 1:])
|
| 542 |
-
|
| 543 |
-
# return tensors
|
| 544 |
-
input_ids = torch.tensor(input_ids, dtype=torch.long)
|
| 545 |
-
labels = torch.tensor([IGNORE_ID] * len(labels) if invalidate_label else labels, dtype=torch.long)
|
| 546 |
-
pixel_values = torch.cat(pixel_values, dim=0) if len(pixel_values) > 0 else None
|
| 547 |
-
|
| 548 |
-
if return_labels:
|
| 549 |
-
return prompt, input_ids, pixel_values, labels
|
| 550 |
-
else:
|
| 551 |
-
return prompt, input_ids, pixel_values
|
| 552 |
|
| 553 |
def save_pretrained(
|
| 554 |
self,
|
|
|
|
| 480 |
pad_sequence = torch.nn.utils.rnn.pad_sequence([i.flip(dims=[0]) for i in sequences],batch_first=True, padding_value=padding_value).flip(dims=[1])
|
| 481 |
return pad_sequence[:,-self.config.multimodal_max_length:]
|
| 482 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 483 |
|
| 484 |
def save_pretrained(
|
| 485 |
self,
|
processing_ovis.py
ADDED
|
@@ -0,0 +1,355 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
# Copyright 2025 The Qwen Team and The HuggingFace Inc. team. All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
|
| 5 |
+
# and OPT implementations in this library. It has been modified from its
|
| 6 |
+
# original forms to accommodate minor architectural differences compared
|
| 7 |
+
# to GPT-NeoX and OPT used by the Meta AI team that trained the model.
|
| 8 |
+
#
|
| 9 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 10 |
+
# you may not use this file except in compliance with the License.
|
| 11 |
+
# You may obtain a copy of the License at
|
| 12 |
+
#
|
| 13 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 14 |
+
#
|
| 15 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 16 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 17 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 18 |
+
# See the License for the specific language governing permissions and
|
| 19 |
+
# limitations under the License.
|
| 20 |
+
from collections import defaultdict
|
| 21 |
+
from typing import List, Union
|
| 22 |
+
|
| 23 |
+
import PIL
|
| 24 |
+
import torch
|
| 25 |
+
from transformers import BatchFeature
|
| 26 |
+
from transformers.image_utils import ImageInput
|
| 27 |
+
from transformers.processing_utils import ProcessingKwargs, ProcessorMixin, Unpack
|
| 28 |
+
from transformers.tokenization_utils_base import TextInput, PreTokenizedInput
|
| 29 |
+
IGNORE_ID = -100
|
| 30 |
+
IMAGE_TOKEN_ID = -200
|
| 31 |
+
IMAGE_TOKEN = "<image>"
|
| 32 |
+
IMAGE_ATOM_ID = -300
|
| 33 |
+
IMAGE_INDICATOR_IDS = [-301, -302, -303, -304, -305]
|
| 34 |
+
|
| 35 |
+
class OvisProcessorKwargs(ProcessingKwargs, total=False):
|
| 36 |
+
_defaults = {
|
| 37 |
+
"text_kwargs": {
|
| 38 |
+
"padding": False,
|
| 39 |
+
},
|
| 40 |
+
"images_kwargs": {
|
| 41 |
+
'max_partition':9,
|
| 42 |
+
'covering_threshold':0.9,
|
| 43 |
+
'convert_to_rgb':True,
|
| 44 |
+
'return_tensors':'pt'},
|
| 45 |
+
}
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
class OvisProcessor(ProcessorMixin):
|
| 49 |
+
r"""
|
| 50 |
+
Constructs a Ovis processor which wraps a Ovis image processor and a Qwen2 tokenizer into a single processor.
|
| 51 |
+
[`OvisProcessor`] offers all the functionalities of [`Qwen2VLImageProcessor`] and [`Qwen2TokenizerFast`]. See the
|
| 52 |
+
[`~OvisProcessor.__call__`] and [`~OvisProcessor.decode`] for more information.
|
| 53 |
+
Args:
|
| 54 |
+
image_processor ([`Qwen2VLImageProcessor`], *optional*):
|
| 55 |
+
The image processor is a required input.
|
| 56 |
+
tokenizer ([`Qwen2TokenizerFast`], *optional*):
|
| 57 |
+
The tokenizer is a required input.
|
| 58 |
+
chat_template (`str`, *optional*): A Jinja template which will be used to convert lists of messages
|
| 59 |
+
in a chat into a tokenizable string.
|
| 60 |
+
"""
|
| 61 |
+
|
| 62 |
+
attributes = ["image_processor", "tokenizer"]
|
| 63 |
+
valid_kwargs = ["chat_template"]
|
| 64 |
+
|
| 65 |
+
image_processor_class = "AutoImageProcessor"
|
| 66 |
+
tokenizer_class = ("Qwen2Tokenizer", "Qwen2TokenizerFast")
|
| 67 |
+
|
| 68 |
+
def __init__(self, image_processor=None, tokenizer=None, chat_template=None, **kwargs):
|
| 69 |
+
self.image_token = "<|image_pad|>" if not hasattr(tokenizer, "image_token") else tokenizer.image_token
|
| 70 |
+
self.video_token = "<|video_pad|>" if not hasattr(tokenizer, "video_token") else tokenizer.video_token
|
| 71 |
+
super().__init__(image_processor, tokenizer, chat_template=chat_template)
|
| 72 |
+
|
| 73 |
+
def __call__(
|
| 74 |
+
self,
|
| 75 |
+
images: ImageInput = None,
|
| 76 |
+
text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None,
|
| 77 |
+
**kwargs: Unpack[OvisProcessorKwargs],
|
| 78 |
+
) -> BatchFeature:
|
| 79 |
+
"""
|
| 80 |
+
Main method to prepare for the model one or several sequences(s) and image(s). This method forwards the `text`
|
| 81 |
+
and `kwargs` arguments to Qwen2TokenizerFast's [`~Qwen2TokenizerFast.__call__`] if `text` is not `None` to encode
|
| 82 |
+
the text. To prepare the vision inputs, this method forwards the `vision_infos` and `kwrags` arguments to
|
| 83 |
+
Qwen2VLImageProcessor's [`~Qwen2VLImageProcessor.__call__`] if `vision_infos` is not `None`.
|
| 84 |
+
|
| 85 |
+
Args:
|
| 86 |
+
images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `List[PIL.Image.Image]`, `List[np.ndarray]`, `List[torch.Tensor]`):
|
| 87 |
+
The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch
|
| 88 |
+
tensor. Both channels-first and channels-last formats are supported.
|
| 89 |
+
text (`str`, `List[str]`, `List[List[str]]`):
|
| 90 |
+
The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
|
| 91 |
+
(pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
|
| 92 |
+
`is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
|
| 93 |
+
videos (`np.ndarray`, `torch.Tensor`, `List[np.ndarray]`, `List[torch.Tensor]`):
|
| 94 |
+
The image or batch of videos to be prepared. Each video can be a 4D NumPy array or PyTorch
|
| 95 |
+
tensor, or a nested list of 3D frames. Both channels-first and channels-last formats are supported.
|
| 96 |
+
return_tensors (`str` or [`~utils.TensorType`], *optional*):
|
| 97 |
+
If set, will return tensors of a particular framework. Acceptable values are:
|
| 98 |
+
- `'tf'`: Return TensorFlow `tf.constant` objects.
|
| 99 |
+
- `'pt'`: Return PyTorch `torch.Tensor` objects.
|
| 100 |
+
- `'np'`: Return NumPy `np.ndarray` objects.
|
| 101 |
+
- `'jax'`: Return JAX `jnp.ndarray` objects.
|
| 102 |
+
|
| 103 |
+
Returns:
|
| 104 |
+
[`BatchFeature`]: A [`BatchFeature`] with the following fields:
|
| 105 |
+
|
| 106 |
+
- **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`.
|
| 107 |
+
- **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
|
| 108 |
+
`return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not
|
| 109 |
+
`None`).
|
| 110 |
+
- **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`.
|
| 111 |
+
- **pixel_values_videos** -- Pixel values of videos to be fed to a model. Returned when `videos` is not `None`.
|
| 112 |
+
- **image_grid_thw** -- List of image 3D grid in LLM. Returned when `images` is not `None`.
|
| 113 |
+
- **video_grid_thw** -- List of video 3D grid in LLM. Returned when `videos` is not `None`.
|
| 114 |
+
- **second_per_grid_ts** -- List of video seconds per time grid. Returned when `videos` is not `None`.
|
| 115 |
+
"""
|
| 116 |
+
output_kwargs = self._merge_kwargs(
|
| 117 |
+
OvisProcessorKwargs,
|
| 118 |
+
tokenizer_init_kwargs=self.tokenizer.init_kwargs,
|
| 119 |
+
**kwargs,
|
| 120 |
+
)
|
| 121 |
+
|
| 122 |
+
# Process all images first
|
| 123 |
+
image_features = {}
|
| 124 |
+
if images is not None:
|
| 125 |
+
processed_images = []
|
| 126 |
+
image_placeholders_list = []
|
| 127 |
+
|
| 128 |
+
# Process each image
|
| 129 |
+
for image in images if isinstance(images, list) else [images]:
|
| 130 |
+
pixel_values, image_placeholders = self.preprocess_image(
|
| 131 |
+
image=image, **output_kwargs["images_kwargs"]
|
| 132 |
+
)
|
| 133 |
+
processed_images.append(pixel_values)
|
| 134 |
+
image_placeholders_list.append(image_placeholders)
|
| 135 |
+
|
| 136 |
+
# assign all processed images
|
| 137 |
+
if processed_images:
|
| 138 |
+
image_features["image_placeholders"] = image_placeholders_list
|
| 139 |
+
|
| 140 |
+
# Process text input
|
| 141 |
+
if text is not None:
|
| 142 |
+
|
| 143 |
+
if not isinstance(text, list):
|
| 144 |
+
text = [text]
|
| 145 |
+
|
| 146 |
+
all_input_ids = torch.tensor([], dtype=torch.long)
|
| 147 |
+
all_attention_mask = torch.tensor([], dtype=torch.long)
|
| 148 |
+
|
| 149 |
+
for idx, txt in enumerate(text):
|
| 150 |
+
# Split text by IMAGE_TOKEN
|
| 151 |
+
text_parts = txt.split(IMAGE_TOKEN)
|
| 152 |
+
|
| 153 |
+
# Tokenize each text part
|
| 154 |
+
full_input_ids= torch.tensor([], dtype=torch.long)
|
| 155 |
+
full_attention_mask = torch.tensor([], dtype=torch.long)
|
| 156 |
+
|
| 157 |
+
for i, part in enumerate(text_parts):
|
| 158 |
+
# Process text part
|
| 159 |
+
text_tokens = self.tokenizer(part, **output_kwargs["text_kwargs"])
|
| 160 |
+
full_input_ids=torch.cat([full_input_ids,torch.tensor(text_tokens.input_ids, dtype=full_input_ids.dtype, device=full_input_ids.device)], dim=-1)
|
| 161 |
+
full_attention_mask=torch.cat([full_attention_mask,torch.tensor(text_tokens.attention_mask)], dim=-1)
|
| 162 |
+
|
| 163 |
+
# Add image placeholder tokens after each text part (except the last one)
|
| 164 |
+
if i < len(text_parts) - 1 and "image_placeholders" in image_features:
|
| 165 |
+
if idx < len(image_features["image_placeholders"]):
|
| 166 |
+
placeholder_ids = image_features["image_placeholders"][idx]
|
| 167 |
+
full_input_ids=torch.cat([full_input_ids,torch.tensor(placeholder_ids).unsqueeze(0)], dim=-1)
|
| 168 |
+
full_attention_mask=torch.cat([full_attention_mask,torch.tensor([1] * len(placeholder_ids)).unsqueeze(0)], dim=-1)
|
| 169 |
+
last_bigger_tensor_dim = all_input_ids.shape[-1]
|
| 170 |
+
if full_input_ids.shape[-1] > last_bigger_tensor_dim > 0: # we skip the first
|
| 171 |
+
# we pad the all_input_ids with pad tokens and we adjust the attn mask
|
| 172 |
+
all_input_ids = torch.cat([all_input_ids,
|
| 173 |
+
torch.full((1, full_input_ids.shape[-1] - last_bigger_tensor_dim),
|
| 174 |
+
self.tokenizer.pad_token_id, dtype=torch.long)], dim=-1)
|
| 175 |
+
all_attention_mask = torch.cat([all_attention_mask,
|
| 176 |
+
torch.zeros((1, full_input_ids.shape[-1] - last_bigger_tensor_dim),
|
| 177 |
+
dtype=torch.long)], dim=-1)
|
| 178 |
+
last_bigger_tensor_dim = full_input_ids.shape[-1]
|
| 179 |
+
all_input_ids = torch.cat([all_input_ids, full_input_ids], dim=0)
|
| 180 |
+
all_attention_mask = torch.cat([ all_attention_mask, full_attention_mask], dim=0)
|
| 181 |
+
|
| 182 |
+
# Create the output with text features
|
| 183 |
+
output = BatchFeature(
|
| 184 |
+
data={
|
| 185 |
+
"input_ids": all_input_ids,
|
| 186 |
+
"attention_mask": all_attention_mask,
|
| 187 |
+
}
|
| 188 |
+
)
|
| 189 |
+
|
| 190 |
+
# Add image features if present
|
| 191 |
+
if image_features:
|
| 192 |
+
output["pixel_values"] = processed_images
|
| 193 |
+
|
| 194 |
+
return output
|
| 195 |
+
|
| 196 |
+
|
| 197 |
+
# If only images were provided
|
| 198 |
+
return BatchFeature(data=image_features)
|
| 199 |
+
|
| 200 |
+
|
| 201 |
+
|
| 202 |
+
def get_image_size(self):
|
| 203 |
+
height = self.image_processor.crop_size["height"]
|
| 204 |
+
width = self.image_processor.crop_size["width"]
|
| 205 |
+
return height, width
|
| 206 |
+
|
| 207 |
+
@staticmethod
|
| 208 |
+
def construct_image_placeholders(grid):
|
| 209 |
+
image_placeholders = [IMAGE_INDICATOR_IDS[0], IMAGE_ATOM_ID, IMAGE_INDICATOR_IDS[1]]
|
| 210 |
+
if grid[0] * grid[1] > 1:
|
| 211 |
+
for r in range(grid[0]):
|
| 212 |
+
for c in range(grid[1]):
|
| 213 |
+
image_placeholders.append(IMAGE_ATOM_ID)
|
| 214 |
+
if c < grid[1] - 1:
|
| 215 |
+
image_placeholders.append(IMAGE_INDICATOR_IDS[2])
|
| 216 |
+
if r < grid[0] - 1:
|
| 217 |
+
image_placeholders.append(IMAGE_INDICATOR_IDS[3])
|
| 218 |
+
image_placeholders.append(IMAGE_INDICATOR_IDS[4])
|
| 219 |
+
return image_placeholders
|
| 220 |
+
def preprocess_image(self, image: PIL.Image.Image, max_partition, covering_threshold, convert_to_rgb, return_tensors):
|
| 221 |
+
def _preprocess(img: PIL.Image.Image, side):
|
| 222 |
+
# first resize and preprocess
|
| 223 |
+
w, h = img.size
|
| 224 |
+
if w == h:
|
| 225 |
+
new_width = new_height = side
|
| 226 |
+
elif w > h:
|
| 227 |
+
new_width = side
|
| 228 |
+
new_height = int(h / w * new_width)
|
| 229 |
+
else:
|
| 230 |
+
new_height = side
|
| 231 |
+
new_width = int(w / h * new_height)
|
| 232 |
+
new_size = dict(height=new_height, width=new_width)
|
| 233 |
+
pixel_values = self.image_processor.preprocess(img, size=new_size, return_tensors=return_tensors)['pixel_values']
|
| 234 |
+
|
| 235 |
+
# then pad to square
|
| 236 |
+
square_values = torch.zeros([1, 3, side, side], dtype=pixel_values.dtype, device=pixel_values.device)
|
| 237 |
+
new_height, new_width = pixel_values.shape[2:]
|
| 238 |
+
if new_height == new_width:
|
| 239 |
+
square_values[:, :, :, :] = pixel_values
|
| 240 |
+
elif new_height > new_width:
|
| 241 |
+
from_index = (side - new_width) // 2
|
| 242 |
+
square_values[:, :, :, from_index:from_index + new_width] = pixel_values
|
| 243 |
+
else:
|
| 244 |
+
from_index = (side - new_height) // 2
|
| 245 |
+
square_values[:, :, from_index:from_index + new_height, :] = pixel_values
|
| 246 |
+
|
| 247 |
+
return square_values
|
| 248 |
+
|
| 249 |
+
def _partition(img, grid):
|
| 250 |
+
w, h = img.size
|
| 251 |
+
row_height = h // grid[0]
|
| 252 |
+
col_width = w // grid[1]
|
| 253 |
+
|
| 254 |
+
partition = []
|
| 255 |
+
for row in range(grid[0]):
|
| 256 |
+
for col in range(grid[1]):
|
| 257 |
+
left = col * col_width
|
| 258 |
+
upper = row * row_height
|
| 259 |
+
right = w if col == grid[1] - 1 else (col + 1) * col_width
|
| 260 |
+
lower = h if row == grid[0] - 1 else (row + 1) * row_height
|
| 261 |
+
partition.append((left, upper, right, lower))
|
| 262 |
+
|
| 263 |
+
return partition
|
| 264 |
+
|
| 265 |
+
def _covering_area(left, upper, right, lower, side):
|
| 266 |
+
w = right - left
|
| 267 |
+
h = lower - upper
|
| 268 |
+
w, h = max(w, h), min(w, h)
|
| 269 |
+
if w > side:
|
| 270 |
+
h = h / w * side
|
| 271 |
+
w = side
|
| 272 |
+
return w * h
|
| 273 |
+
|
| 274 |
+
def _get_best_grid(img, side):
|
| 275 |
+
img_area = img.size[0] * img.size[1]
|
| 276 |
+
|
| 277 |
+
candidate_grids = []
|
| 278 |
+
for i in range(1, max_partition + 1):
|
| 279 |
+
for j in range(1, max_partition + 1):
|
| 280 |
+
if i * j <= max_partition:
|
| 281 |
+
candidate_grids.append((i, j))
|
| 282 |
+
|
| 283 |
+
all_grids = []
|
| 284 |
+
good_grids = []
|
| 285 |
+
for grid in candidate_grids:
|
| 286 |
+
partition = _partition(img, grid)
|
| 287 |
+
covering_ratio = sum([_covering_area(*p, side) for p in partition]) / img_area
|
| 288 |
+
assert covering_ratio <= 1.0
|
| 289 |
+
all_grids.append((grid, covering_ratio))
|
| 290 |
+
if covering_ratio > covering_threshold:
|
| 291 |
+
good_grids.append((grid, covering_ratio))
|
| 292 |
+
|
| 293 |
+
if len(good_grids) > 0:
|
| 294 |
+
# pick the good partition with minimum #sub_images and break the tie using covering_ratio
|
| 295 |
+
return sorted(good_grids, key=lambda x: (x[0][0] * x[0][1], -x[1]))[0][0]
|
| 296 |
+
else:
|
| 297 |
+
# pick the partition with maximum covering_ratio and break the tie using #sub_images
|
| 298 |
+
return sorted(all_grids, key=lambda x: (-x[1], x[0][0] * x[0][1]))[0][0]
|
| 299 |
+
|
| 300 |
+
if convert_to_rgb and image.mode != 'RGB':
|
| 301 |
+
image = image.convert('RGB')
|
| 302 |
+
|
| 303 |
+
|
| 304 |
+
sides = self.get_image_size()
|
| 305 |
+
if sides[0] != sides[1]:
|
| 306 |
+
raise ValueError('get_image_size() returns non-square size')
|
| 307 |
+
side = sides[0]
|
| 308 |
+
grid = _get_best_grid(image, side)
|
| 309 |
+
partition = _partition(image, grid)
|
| 310 |
+
crops = [image.crop(p) for p in partition]
|
| 311 |
+
if len(crops) > 1:
|
| 312 |
+
crops.insert(0, image)
|
| 313 |
+
pixel_values = torch.cat([_preprocess(crop, side) for crop in crops], dim=0)
|
| 314 |
+
image_placeholders = self.construct_image_placeholders(grid)
|
| 315 |
+
return pixel_values, image_placeholders
|
| 316 |
+
|
| 317 |
+
def batch_decode(self, *args, **kwargs):
|
| 318 |
+
"""
|
| 319 |
+
This method forwards all its arguments to Qwen2TokenizerFast's [`~PreTrainedTokenizer.batch_decode`]. Please
|
| 320 |
+
refer to the docstring of this method for more information.
|
| 321 |
+
"""
|
| 322 |
+
return self.tokenizer.batch_decode(*args, **kwargs)
|
| 323 |
+
|
| 324 |
+
def decode(self, *args, **kwargs):
|
| 325 |
+
"""
|
| 326 |
+
This method forwards all its arguments to Qwen2TokenizerFast's [`~PreTrainedTokenizer.decode`]. Please refer to
|
| 327 |
+
the docstring of this method for more information.
|
| 328 |
+
"""
|
| 329 |
+
return self.tokenizer.decode(*args, **kwargs)
|
| 330 |
+
|
| 331 |
+
def post_process_image_text_to_text(self, generated_outputs):
|
| 332 |
+
"""
|
| 333 |
+
Post-process the output of the model to decode the text.
|
| 334 |
+
|
| 335 |
+
Args:
|
| 336 |
+
generated_outputs (`torch.Tensor` or `np.ndarray`):
|
| 337 |
+
The output of the model `generate` function. The output is expected to be a tensor of shape `(batch_size, sequence_length)`
|
| 338 |
+
or `(sequence_length,)`.
|
| 339 |
+
|
| 340 |
+
Returns:
|
| 341 |
+
`List[str]`: The decoded text.
|
| 342 |
+
"""
|
| 343 |
+
return self.tokenizer.batch_decode(
|
| 344 |
+
generated_outputs, skip_special_tokens=True, clean_up_tokenization_spaces=False
|
| 345 |
+
)
|
| 346 |
+
|
| 347 |
+
@property
|
| 348 |
+
def model_input_names(self):
|
| 349 |
+
tokenizer_input_names = self.tokenizer.model_input_names
|
| 350 |
+
image_processor_input_names = self.image_processor.model_input_names
|
| 351 |
+
names_from_processor = list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
|
| 352 |
+
return names_from_processor + ["second_per_grid_ts"]
|
| 353 |
+
|
| 354 |
+
|
| 355 |
+
__all__ = ["OvisProcessor"]
|
tokenizer_config.json
CHANGED
|
@@ -195,7 +195,7 @@
|
|
| 195 |
"<|video_pad|>"
|
| 196 |
],
|
| 197 |
"bos_token": null,
|
| 198 |
-
"chat_template": "{
|
| 199 |
"clean_up_tokenization_spaces": false,
|
| 200 |
"eos_token": "<|im_end|>",
|
| 201 |
"errors": "replace",
|
|
|
|
| 195 |
"<|video_pad|>"
|
| 196 |
],
|
| 197 |
"bos_token": null,
|
| 198 |
+
"chat_template": "{% for message in messages %}{% if loop.first and message['role'] != 'system' %}<|im_start|>system You are a helpful assistant.<|im_end|> {% endif %}<|im_start|>{{ message['role'] }}{% if message['content'] is string %} {{ message['content'] }}{% else %}{% for content in message['content'] %}{% if content['type'] == 'image' or (content is mapping and ('image' in content or 'image_url' in content)) %} <image>{% elif content['type'] == 'text' or 'text' in content %} {{ content['text'] }}{% endif %}{% endfor %}{% endif %}<|im_end|> {% endfor %}{% if add_generation_prompt %}<|im_start|>assistant {% endif %}",
|
| 199 |
"clean_up_tokenization_spaces": false,
|
| 200 |
"eos_token": "<|im_end|>",
|
| 201 |
"errors": "replace",
|