File size: 4,054 Bytes
89b38b0 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 | import re
import torch
from qwen_vl_utils import process_vision_info
from src.constants import (
DEFAULT_IMAGE_TOKEN,
DEFAULT_VIDEO_TOKEN,
LLAVA_IMAGE_TOKEN,
LLAVA_VIDEO_TOKEN,
VISION_START_TOKEN,
VISION_END_TOKEN,
)
def replace_image_tokens(input_string, is_video=False):
if is_video:
pattern = r'\n?' + re.escape(LLAVA_VIDEO_TOKEN) + r'\n?'
replacement = VISION_START_TOKEN + DEFAULT_VIDEO_TOKEN + VISION_END_TOKEN
else:
pattern = r'\n?' + re.escape(LLAVA_IMAGE_TOKEN) + r'\n?'
replacement = VISION_START_TOKEN + DEFAULT_IMAGE_TOKEN + VISION_END_TOKEN
return re.sub(pattern, replacement, input_string)
def llava_to_openai(conversations, is_video=False):
role_mapping = {"human": "user", "gpt": "assistant"}
transformed_data = []
for conversation in conversations:
transformed_content = replace_image_tokens(conversation["value"], is_video=is_video)
transformed_entry = {
"role": role_mapping.get(conversation["from"], conversation["from"]),
"content": transformed_content,
}
transformed_data.append(transformed_entry)
return transformed_data
def truncate_sequence(input_ids, labels, max_length, eos_token_id):
if input_ids.size(0) > max_length:
input_ids = input_ids[:max_length-1]
labels = labels[:max_length-1]
if eos_token_id is not None:
input_ids = torch.cat([input_ids, torch.tensor([eos_token_id])])
labels = torch.cat([labels, torch.tensor([eos_token_id])])
return input_ids, labels
def pad_sequence(sequences, padding_side='right', padding_value=0):
"""
Pad a list of sequences to the same length.
sequences: list of tensors in [seq_len, *] shape
"""
assert padding_side in ['right', 'left']
max_size = sequences[0].size()
trailing_dims = max_size[1:]
max_len = max(len(seq) for seq in sequences)
batch_size = len(sequences)
output = sequences[0].new_full((batch_size, max_len) + trailing_dims, padding_value)
for i, seq in enumerate(sequences):
length = seq.size(0)
if padding_side == 'right':
output.data[i, :length] = seq
else:
output.data[i, -length:] = seq
return output
def get_image_info(image_path, min_pixel, max_pixel, width, height, image_patch_size):
# Using this because of process_vision_info function
# Need to fix this in the future
content = {
"type": "image",
"image": image_path,
"min_pixels": min_pixel,
"max_pixels": max_pixel
}
if width is not None and height is not None:
content["resized_width"] = width
content["resized_height"] = height
messages = [
{
"role": "user",
"content": [content]
}
]
image_input, _ = process_vision_info(messages, image_patch_size=image_patch_size)
return image_input[0]
def get_video_info(video_path, min_pixels, max_pixels, width, height, fps, image_patch_size, return_video_metadata=False):
# Using this because of process_vision_info function
# Need to fix this in the future
content = {
"type": "video",
"video": video_path,
"min_pixels": min_pixels,
"max_pixels": max_pixels,
"fps": fps
}
if width is not None and height is not None:
content["resized_width"] = width
content["resized_height"] = height
messages = [
{
"role": "user",
"content": [content]
}
]
_, video_input, video_kwargs = process_vision_info(
messages,
return_video_kwargs=True,
image_patch_size=image_patch_size,
return_video_metadata=return_video_metadata
)
return video_input[0], video_kwargs
def samples_per_class_from_ids(label_ids, num_classes):
counts = torch.bincount(
torch.as_tensor(label_ids, dtype=torch.long),
minlength=num_classes
)
return counts.tolist() |