File size: 13,472 Bytes
bc59815 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 |
from typing import List, Optional, Union
import numpy as np
from transformers import BatchFeature
from transformers.tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput
from transformers.image_utils import ImageInput, VideoInput
from transformers.processing_utils import ProcessingKwargs, ProcessorMixin, Unpack, VideosKwargs
class HithinkOmniVideosProcessorKwargs(VideosKwargs, total=False):
fps: Union[List[float], float]
class HithinkOmniProcessorKwargs(ProcessingKwargs, total=False):
videos_kwargs: HithinkOmniVideosProcessorKwargs
_defaults = {
"text_kwargs": {
"padding": False,
},
"videos_kwargs": {"fps": 2.0},
}
class HithinkOmniProcessor(ProcessorMixin):
r"""
Constructs a HithinkOmni processor which wraps a Qwen2.5-VL image processor and a HithinkOmni tokenizer into a single processor.
[`HithinkOmniProcessor`] offers all the functionalities of [`Qwen2VLImageProcessor`] and [`PreTrainedTokenizerFast`]. See the
[`~HithinkOmniProcessor.__call__`] and [`~HithinkOmniProcessor.decode`] for more information.
Args:
image_processor ([`Qwen2VLImageProcessor`], *optional*):
The image processor is a required input.
feature_extractor ([`WhisperFeatureExtractor`], *optional*):
The feature extractor is a required input.
tokenizer ([`PreTrainedTokenizerFast`], *optional*):
The tokenizer is a required input.
chat_template (`str`, *optional*): A Jinja template which will be used to convert lists of messages
in a chat into a tokenizable string.
"""
attributes = ["image_processor", "feature_extractor", "tokenizer"]
valid_kwargs = ["chat_template"]
image_processor_class = "Qwen2VLImageProcessor"
feature_extractor_class = "WhisperFeatureExtractor"
tokenizer_class = "PreTrainedTokenizerFast"
def __init__(self, image_processor=None, feature_extractor=None, tokenizer=None, chat_template=None, **kwargs):
tokenizer.model_input_names = ["input_ids", "attention_mask"] # do not include token_type_ids
super().__init__(image_processor, feature_extractor, tokenizer, chat_template=chat_template)
self.image_token = getattr(tokenizer, 'image_token', '<|image_pad|>')
self.video_token = getattr(tokenizer, 'video_token', '<|video_pad|>')
self.chat_template = tokenizer.chat_template if chat_template is None else chat_template
def __call__(
self,
images: ImageInput = None,
text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None,
videos: VideoInput = None,
audios: Union[np.ndarray, List[np.ndarray]] = None,
sampling_rate: Optional[int] = None,
**kwargs: Unpack[HithinkOmniProcessorKwargs],
) -> BatchFeature:
"""
Main method to prepare for the model one or several sequences(s) and image(s). This method forwards the `text`
and `kwargs` arguments to PreTrainedTokenizerFast's [`~PreTrainedTokenizerFast.__call__`] if `text` is not `None` to encode
the text. To prepare the vision inputs, this method forwards the `vision_infos` and `kwrags` arguments to
Qwen2VLImageProcessor's [`~Qwen2VLImageProcessor.__call__`] if `vision_infos` is not `None`.
Args:
images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `List[PIL.Image.Image]`, `List[np.ndarray]`, `List[torch.Tensor]`):
The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch
tensor. Both channels-first and channels-last formats are supported.
text (`str`, `List[str]`, `List[List[str]]`):
The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
(pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
`is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
videos (`np.ndarray`, `torch.Tensor`, `List[np.ndarray]`, `List[torch.Tensor]`):
The image or batch of videos to be prepared. Each video can be a 4D NumPy array or PyTorch
tensor, or a nested list of 3D frames. Both channels-first and channels-last formats are supported.
audios (`np.ndarray`, `List[np.ndarray]`):
The audio or batch of audios to be prepared. Each audio can be a NumPy array.
sampling_rate (`int`, defaults to 16000):
The sampling rate at which the audio files should be digitalized expressed in hertz (Hz).
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors of a particular framework. Acceptable values are:
- `'tf'`: Return TensorFlow `tf.constant` objects.
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return NumPy `np.ndarray` objects.
- `'jax'`: Return JAX `jnp.ndarray` objects.
Returns:
[`BatchFeature`]: A [`BatchFeature`] with the following fields:
- **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`.
- **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
`return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not
`None`).
- **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`.
- **pixel_values_videos** -- Pixel values of videos to be fed to a model. Returned when `videos` is not `None`.
- **image_grid_thw** -- List of image 3D grid in LLM. Returned when `images` is not `None`.
- **video_grid_thw** -- List of video 3D grid in LLM. Returned when `videos` is not `None`.
- **second_per_grid_ts** -- List of video seconds per time grid. Returned when `videos` is not `None`.
"""
output_kwargs = self._merge_kwargs(
HithinkOmniProcessorKwargs,
tokenizer_init_kwargs=self.tokenizer.init_kwargs,
**kwargs,
)
if images is not None:
image_inputs = self.image_processor(images=images, videos=None, **output_kwargs["images_kwargs"])
image_grid_thw = image_inputs["image_grid_thw"]
else:
image_inputs = {}
image_grid_thw = None
if videos is not None:
videos_inputs = self.image_processor(images=None, videos=videos, **output_kwargs["images_kwargs"])
video_grid_thw = videos_inputs["video_grid_thw"]
fps = output_kwargs["videos_kwargs"].pop("fps", 2.0)
if isinstance(fps, (int, float)):
second_per_grid_ts = [self.image_processor.temporal_patch_size / fps] * len(video_grid_thw)
elif hasattr(fps, "__len__") and len(fps) == len(video_grid_thw):
second_per_grid_ts = [self.image_processor.temporal_patch_size / tmp for tmp in fps]
else:
raise ValueError(
f"The length of fps ({len(fps) if hasattr(fps, '__len__') else fps}) must be equal to the length of video_grid_thw ({len(video_grid_thw)}) or fps should be a single number."
)
videos_inputs.update({"second_per_grid_ts": second_per_grid_ts})
else:
videos_inputs = {}
video_grid_thw = None
if not isinstance(text, list):
text = [text]
if image_grid_thw is not None:
merge_length = self.image_processor.merge_size**2
index = 0
for i in range(len(text)):
while self.image_token in text[i]:
text[i] = text[i].replace(
self.image_token,
"<|placeholder|>" * (image_grid_thw[index].prod() // merge_length),
1,
)
index += 1
text[i] = text[i].replace("<|placeholder|>", self.image_token)
if video_grid_thw is not None:
merge_length = self.image_processor.merge_size**2
index = 0
for i in range(len(text)):
while self.video_token in text[i]:
text[i] = text[i].replace(
self.video_token,
"<|placeholder|>" * (video_grid_thw[index].prod() // merge_length),
1,
)
index += 1
text[i] = text[i].replace("<|placeholder|>", self.video_token)
if audios is not None:
audio_inputs = self.feature_extractor(
audios, sampling_rate=sampling_rate, return_attention_mask=True, padding="max_length", **kwargs
)
audio_inputs["feature_attention_mask"] = audio_inputs.pop(
"attention_mask"
) # rename attention_mask to prevent conflicts later on
audio_output_lengths = self.get_feat_extract_output_lengths(
audio_inputs['feature_attention_mask'].sum(-1)
)
index = 0
for i in range(len(text)):
while "<|AUDIO|>" in text[i]:
text[i] = text[i].replace(
"<|AUDIO|>", "<|placeholder|>" * audio_output_lengths[index], 1
)
index += 1
text[i] = text[i].replace("<|placeholder|>", "<|AUDIO|>")
else:
audio_inputs = {}
text_inputs =self.tokenizer(text, **output_kwargs["text_kwargs"])
return BatchFeature(data={**text_inputs, **image_inputs, **videos_inputs, **audio_inputs})
@staticmethod
def get_feat_extract_input_length(audio_length):
"""
Computes the input length of the audio encoder (i.e. output of the feature extractor)
e.g. 30-second audio has 480,000 samples (sampling_rate = 16,000), the feature length will be 3,000
"""
return int(np.ceil((audio_length - 40) / 160)) # 第一帧需要200样本,后续每帧需要160样本
@staticmethod
def get_feat_extract_output_lengths(input_lengths):
"""
Computes the output length of the convolutional layers and the output length of the audio encoder
"""
input_lengths = (input_lengths - 1) // 2 + 1
output_lengths = (input_lengths - 2) // 2 + 1
return output_lengths
def featurize_audio_chunk(self, audio: np.ndarray, is_last: bool, n_extracted_frames: int = 0, **kwargs):
"""
Extract the features from the audio chunk during streaming inference
"""
n_frames = (len(audio) - 40) / 160 # 第一帧需要200样本,后续每帧需要160样本
n_frames = int(np.ceil(n_frames) if is_last else np.floor(n_frames))
n_new_frames = n_frames - n_extracted_frames
i_end = n_frames * 160 + 40
i_start = max(0, (n_extracted_frames + 1 - 3) * 160) # 滑窗需要400样本,即最少3帧
if n_new_frames <= 0 or n_frames < 2:
return
a = audio[i_start: i_end] # 截取计算new frames需要的chunk
if is_last and (n_pad := int(np.ceil(len(a) / 160)) * 160 - len(a)): # pad to multiple of 160
a = np.pad(a, [0, n_pad])
features = self.feature_extractor(
a, sampling_rate=self.feature_extractor.sampling_rate, padding='do_not_pad', **kwargs
)['input_features']
return features[:, :, -n_new_frames:]
def batch_decode(self, *args, **kwargs):
"""
This method forwards all its arguments to PreTrainedTokenizerFast's [`~PreTrainedTokenizer.batch_decode`]. Please
refer to the docstring of this method for more information.
"""
return self.tokenizer.batch_decode(*args, **kwargs)
def decode(self, *args, **kwargs):
"""
This method forwards all its arguments to PreTrainedTokenizerFast's [`~PreTrainedTokenizer.decode`]. Please refer to
the docstring of this method for more information.
"""
return self.tokenizer.decode(*args, **kwargs)
def post_process_image_text_to_text(self, generated_outputs):
"""
Post-process the output of the model to decode the text.
Args:
generated_outputs (`torch.Tensor` or `np.ndarray`):
The output of the model `generate` function. The output is expected to be a tensor of shape `(batch_size, sequence_length)`
or `(sequence_length,)`.
Returns:
`List[str]`: The decoded text.
"""
return self.tokenizer.batch_decode(
generated_outputs, skip_special_tokens=True, clean_up_tokenization_spaces=False
)
@property
def model_input_names(self):
tokenizer_input_names = self.tokenizer.model_input_names
image_processor_input_names = self.image_processor.model_input_names
feature_extractor_input_names = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names
+ feature_extractor_input_names + ["feature_attention_mask"])) # audio
|