Pu Miao commited on
Commit
4a192d8
·
1 Parent(s): baddae7
evaluation/internvl.py ADDED
@@ -0,0 +1,166 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ import torch
3
+ import torchvision.transforms as T
4
+ from PIL import Image
5
+ from torchvision.transforms.functional import InterpolationMode
6
+ from transformers import AutoConfig, AutoTokenizer, AutoModel
7
+ import os
8
+ from utils.utils import COUNTING_BASE_PROMPT, RELATION_BASE_PROMPT, parse_model_answer, eval_loop, eval_pipeline
9
+
10
+ current_dir = os.path.dirname(os.path.abspath(__file__))
11
+ parent_dir = os.path.split(current_dir)[0]
12
+
13
+
14
+
15
+ IMAGENET_MEAN = (0.485, 0.456, 0.406)
16
+ IMAGENET_STD = (0.229, 0.224, 0.225)
17
+
18
+
19
+ def build_transform(input_size):
20
+ MEAN, STD = IMAGENET_MEAN, IMAGENET_STD
21
+ transform = T.Compose([
22
+ T.Lambda(lambda img: img.convert('RGB') if img.mode != 'RGB' else img),
23
+ T.Resize((input_size, input_size), interpolation=InterpolationMode.BICUBIC),
24
+ T.ToTensor(),
25
+ T.Normalize(mean=MEAN, std=STD)
26
+ ])
27
+ return transform
28
+
29
+ def find_closest_aspect_ratio(aspect_ratio, target_ratios, width, height, image_size):
30
+ best_ratio_diff = float('inf')
31
+ best_ratio = (1, 1)
32
+ area = width * height
33
+ for ratio in target_ratios:
34
+ target_aspect_ratio = ratio[0] / ratio[1]
35
+ ratio_diff = abs(aspect_ratio - target_aspect_ratio)
36
+ if ratio_diff < best_ratio_diff:
37
+ best_ratio_diff = ratio_diff
38
+ best_ratio = ratio
39
+ elif ratio_diff == best_ratio_diff:
40
+ if area > 0.5 * image_size * image_size * ratio[0] * ratio[1]:
41
+ best_ratio = ratio
42
+ return best_ratio
43
+
44
+ def dynamic_preprocess(image, min_num=1, max_num=12, image_size=448, use_thumbnail=False):
45
+ orig_width, orig_height = image.size
46
+ aspect_ratio = orig_width / orig_height
47
+
48
+ target_ratios = set((i, j) for n in range(min_num, max_num + 1) for i in range(1, n + 1) for j in range(1, n + 1) if i * j <= max_num and i * j >= min_num)
49
+ target_ratios = sorted(target_ratios, key=lambda x: x[0] * x[1])
50
+
51
+ target_aspect_ratio = find_closest_aspect_ratio(
52
+ aspect_ratio, target_ratios, orig_width, orig_height, image_size)
53
+
54
+ target_width = image_size * target_aspect_ratio[0]
55
+ target_height = image_size * target_aspect_ratio[1]
56
+ blocks = target_aspect_ratio[0] * target_aspect_ratio[1]
57
+
58
+ resized_img = image.resize((target_width, target_height))
59
+ processed_images = []
60
+ for i in range(blocks):
61
+ box = (
62
+ (i % (target_width // image_size)) * image_size,
63
+ (i // (target_width // image_size)) * image_size,
64
+ ((i % (target_width // image_size)) + 1) * image_size,
65
+ ((i // (target_width // image_size)) + 1) * image_size
66
+ )
67
+ split_img = resized_img.crop(box)
68
+ processed_images.append(split_img)
69
+ assert len(processed_images) == blocks
70
+ if use_thumbnail and len(processed_images) != 1:
71
+ thumbnail_img = image.resize((image_size, image_size))
72
+ processed_images.append(thumbnail_img)
73
+ return processed_images
74
+
75
+ def load_image(image_file, input_size=448, max_num=12):
76
+ image = Image.open(image_file).convert('RGB')
77
+ transform = build_transform(input_size=input_size)
78
+ images = dynamic_preprocess(image, image_size=input_size, use_thumbnail=True, max_num=max_num)
79
+ pixel_values = [transform(image) for image in images]
80
+ pixel_values = torch.stack(pixel_values)
81
+ return pixel_values
82
+
83
+ def split_model(path):
84
+ device_map = {}
85
+ world_size = torch.cuda.device_count()
86
+ config = AutoConfig.from_pretrained(path, trust_remote_code=True)
87
+ num_layers = config.llm_config.num_hidden_layers
88
+ num_layers_per_gpu = math.ceil(num_layers / (world_size - 0.5))
89
+ num_layers_per_gpu = [num_layers_per_gpu] * world_size
90
+ num_layers_per_gpu[0] = math.ceil(num_layers_per_gpu[0] * 0.5)
91
+ layer_cnt = 0
92
+ for i, num_layer in enumerate(num_layers_per_gpu):
93
+ for j in range(num_layer):
94
+ device_map[f'language_model.model.layers.{layer_cnt}'] = i
95
+ layer_cnt += 1
96
+ device_map['vision_model'] = 0
97
+ device_map['mlp1'] = 0
98
+ device_map['language_model.model.tok_embeddings'] = 0
99
+ device_map['language_model.model.embed_tokens'] = 0
100
+ device_map['language_model.output'] = 0
101
+ device_map['language_model.model.norm'] = 0
102
+ device_map['language_model.model.rotary_emb'] = 0
103
+ device_map['language_model.lm_head'] = 0
104
+ device_map[f'language_model.model.layers.{num_layers - 1}'] = 0
105
+ return device_map
106
+
107
+
108
+
109
+
110
+
111
+ def process(model, item, task_type="counting", **kwargs):
112
+ """
113
+ :param model:
114
+ :param item:
115
+ :param task_type:
116
+ :param kwargs:
117
+ :return:
118
+ """
119
+ generation_config = kwargs.get('generation_config')
120
+ tokenizer = kwargs.get('tokenizer')
121
+ question = item['question']
122
+ image_path = os.path.join(parent_dir, item['image_path'])
123
+ if task_type == "counting":
124
+ formatted_question = COUNTING_BASE_PROMPT + question
125
+ else: # relation task
126
+ formatted_question = RELATION_BASE_PROMPT + question
127
+ pixel_values = load_image(image_path, max_num=12).to(torch.bfloat16).cuda()
128
+ model_answer = model.chat(tokenizer, pixel_values, formatted_question, generation_config )
129
+ parsed_answer = parse_model_answer(model_answer, task_type)
130
+ result = {'model_answer': model_answer, 'parsed_answer': parsed_answer}
131
+ return result
132
+
133
+
134
+
135
+
136
+ def main():
137
+ """
138
+ InternVL3
139
+ """
140
+ model_name_or_path = 'OpenGVLab/InternVL3-2B'
141
+ model_name = model_name_or_path.split('/')[-1]
142
+ cache_dir = './cache/'
143
+ device_map = split_model(model_name_or_path)
144
+ model = AutoModel.from_pretrained(
145
+ model_name_or_path,
146
+ torch_dtype=torch.bfloat16,
147
+ load_in_8bit=False,
148
+ low_cpu_mem_usage=True,
149
+ use_flash_attn=True,
150
+ trust_remote_code=True,
151
+ cache_dir=cache_dir,
152
+ device_map=device_map).eval()
153
+
154
+ tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True, use_fast=False)
155
+ generation_config = dict(max_new_tokens=1024, do_sample=True, pad_token_id=tokenizer.convert_tokens_to_ids(tokenizer.pad_token))
156
+ params = {
157
+ 'model': model,
158
+ 'tokenizer': tokenizer,
159
+ 'generation_config': generation_config,
160
+ 'process_fn': process,
161
+ }
162
+ eval_pipeline(model_name, current_dir, params)
163
+
164
+
165
+ if __name__ == "__main__":
166
+ main()
evaluation/qwen2.5_vl.py ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from transformers import Qwen2_5_VLForConditionalGeneration, AutoProcessor
3
+ from utils.utils import COUNTING_BASE_PROMPT, RELATION_BASE_PROMPT, eval_loop, parse_model_answer, eval_pipeline
4
+ from utils.vision_process import process_vision_info
5
+
6
+ current_dir = os.path.dirname(os.path.abspath(__file__))
7
+ parent_dir = os.path.split(current_dir)[0]
8
+
9
+
10
+
11
+ def process(model, item, task_type="counting", **kwargs):
12
+ processor = kwargs.get('processor')
13
+ question = item['question']
14
+ image_path = os.path.join(parent_dir, item['image_path'])
15
+ if task_type == "counting":
16
+ formatted_question = COUNTING_BASE_PROMPT + question
17
+ else: # relation task
18
+ formatted_question = RELATION_BASE_PROMPT + question
19
+
20
+ messages = [
21
+ {
22
+ "role": "user",
23
+ "content": [
24
+ {
25
+ "type": "image",
26
+ "image": image_path,
27
+ },
28
+ {"type": "text", "text": formatted_question},
29
+ ],
30
+ }
31
+ ]
32
+
33
+ text = processor.apply_chat_template(
34
+ messages, tokenize=False, add_generation_prompt=True
35
+ )
36
+ image_inputs, video_inputs = process_vision_info(messages)
37
+ inputs = processor(
38
+ text=[text],
39
+ images=image_inputs,
40
+ videos=video_inputs,
41
+ padding=True,
42
+ return_tensors="pt",
43
+ )
44
+ inputs = inputs.to(model.device)
45
+
46
+ generated_ids = model.generate(**inputs, max_new_tokens=1000)
47
+ generated_ids_trimmed = [
48
+ out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)
49
+ ]
50
+ output_text = processor.batch_decode(
51
+ generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False
52
+ )
53
+ model_answer = output_text[0]
54
+ parsed_answer = parse_model_answer(model_answer, task_type)
55
+ result = {'model_answer': model_answer, 'parsed_answer': parsed_answer}
56
+ return result
57
+
58
+
59
+ def main():
60
+ model_name_or_path = 'Qwen/Qwen2.5-VL-3B-Instruct'
61
+ model_name = model_name_or_path.split('/')[-1]
62
+ cache_dir = './cache/'
63
+
64
+ model = Qwen2_5_VLForConditionalGeneration.from_pretrained(model_name_or_path, torch_dtype="auto", device_map="auto", cache_dir=cache_dir)
65
+ processor = AutoProcessor.from_pretrained(model_name_or_path)
66
+
67
+ params = {
68
+ 'model': model,
69
+ 'processor': processor,
70
+ 'process_fn': process,
71
+ }
72
+ eval_pipeline(model_name, current_dir, params)
73
+
74
+ if __name__ == "__main__":
75
+ main()
evaluation/utils/utils.py ADDED
@@ -0,0 +1,108 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import logging
3
+ import os
4
+ import re
5
+ from tqdm import tqdm
6
+
7
+ logger = logging.getLogger(__name__)
8
+ handler = logging.StreamHandler()
9
+ logger.addHandler(handler)
10
+ logger.setLevel(logging.INFO)
11
+
12
+ COUNTING_BASE_PROMPT = 'You should output a json string with format {"answer": a int number}.Your output should be directly parsed by json.loads function. eg.```json{"answer": 1}```.\nNow the question is:'
13
+ RELATION_BASE_PROMPT = 'You should output a json string with format {"answer": "str"}, where str must be one of ["up", "under", "back", "front", "left", "right"]. Your output should be directly parsed by json.loads function. eg.```json{"answer": "left"}```.\nNow the question is:'
14
+ VALID_RELATIONS = ["up", "under", "back", "front", "left", "right"]
15
+ SPLIT_SYMBOL = "="*50
16
+
17
+ def parse_model_answer(model_output, task_type="counting"):
18
+ """
19
+ Parse the output JSON format answer of the model
20
+ :param model_output:
21
+ :param task_type:
22
+ :return:
23
+ """
24
+ pattern = r'```json\s*(\{.*?\})\s*```'
25
+ match = re.search(pattern, model_output, re.DOTALL)
26
+ parsed_answer = None
27
+ if match:
28
+ try:
29
+ json_str = match.group(1)
30
+ result = json.loads(json_str)
31
+ answer = result.get('answer')
32
+ if task_type == "counting":
33
+ if isinstance(answer, str) and answer.isdigit():
34
+ parsed_answer = int(answer)
35
+ elif isinstance(answer, int):
36
+ parsed_answer = answer
37
+ else:
38
+ parsed_answer = None
39
+ else: # relation task
40
+ parsed_answer = answer if answer in VALID_RELATIONS else None
41
+ except Exception as e:
42
+ logger.error(f"{model_output};\n{str(e)}")
43
+ return parsed_answer
44
+
45
+ def eval_loop(model, dataset, process_fn, task_type="counting", **kwargs):
46
+ """
47
+ :param dataset:
48
+ :param model:
49
+ :param process_fn:
50
+ :param task_type:
51
+ :param kwargs:
52
+ :return:
53
+ """
54
+ eval_result = []
55
+ for item in tqdm(dataset):
56
+ result = process_fn(model, item, task_type=task_type, **kwargs)
57
+ resp = {
58
+ "id": item["id"],
59
+ "question": item["question"],
60
+ "image_path": item['image_path'],
61
+ "model_answer": result['model_answer'],
62
+ "parsed_answer": result['parsed_answer'],
63
+ "ground_truth": item['answer']
64
+ }
65
+ eval_item_info = f"""\n{json.dumps(resp, indent=4)}\n"""
66
+ logger.info(eval_item_info)
67
+ eval_result.append(resp)
68
+ return eval_result
69
+
70
+
71
+ def eval_pipeline(model_name, current_dir, params):
72
+ results = {}
73
+ parent_dir = os.path.split(current_dir)[0]
74
+ # Counting
75
+ print("process counting dataset...")
76
+ counting_data_path = os.path.join(parent_dir, 'Counting.json')
77
+ relations_data_path = os.path.join(parent_dir, 'Relation.json')
78
+ combination_data_path = os.path.join(parent_dir, 'Combination.json')
79
+
80
+ counting_data = json.load(open(counting_data_path, 'r', encoding='utf-8'))
81
+ counting_results = eval_loop(dataset=counting_data, **params)
82
+ results["counting_results"] = counting_results
83
+
84
+ # Relation
85
+ print("process relations dataset...")
86
+ relations_data = json.load(open(relations_data_path, 'r', encoding='utf-8'))
87
+ relations_results = eval_loop(dataset=relations_data, task_type="relation", **params)
88
+ results["relations_results"] = relations_results
89
+
90
+ # Relation
91
+ print("process combination dataset...")
92
+
93
+ combination_data = json.load(open(combination_data_path, 'r', encoding='utf-8'))
94
+ combination_results = eval_loop(dataset=combination_data, **params)
95
+ results["combination_results"] = combination_results
96
+
97
+ result_parent_path = os.path.join(current_dir, './result/')
98
+ if not os.path.exists(result_parent_path):
99
+ os.makedirs(result_parent_path)
100
+
101
+ result_path = os.path.join(result_parent_path, f'{model_name}_results.json')
102
+ with open(result_path, 'w', encoding='utf-8') as f:
103
+ json.dump(results, f, ensure_ascii=False, indent=2)
104
+
105
+ print(f"The process has finished. The evaluation results are saved to ./result/{model_name}_results.json")
106
+ print(f"The number of counting samples processed successfully: {len(results['counting_results'])}")
107
+ print(f"The number of relationship samples processed successfully: {len(results['relations_results'])}")
108
+ print(f"The number of combination samples processed successfully: {len(results['combination_results'])}")
evaluation/utils/vision_process.py ADDED
@@ -0,0 +1,493 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import base64
4
+ import copy
5
+ import logging
6
+ import math
7
+ import os
8
+ import sys
9
+ import time
10
+ import warnings
11
+ from functools import lru_cache
12
+ from io import BytesIO
13
+ from typing import Optional
14
+
15
+ import requests
16
+ import torch
17
+ import torchvision
18
+ from packaging import version
19
+ from PIL import Image
20
+ from torchvision import io, transforms
21
+ from torchvision.transforms import InterpolationMode
22
+
23
+
24
+ logger = logging.getLogger(__name__)
25
+
26
+ IMAGE_FACTOR = 28
27
+ MIN_PIXELS = 4 * 28 * 28
28
+ MAX_PIXELS = 16384 * 28 * 28
29
+ MAX_RATIO = 200
30
+
31
+ VIDEO_MIN_PIXELS = 128 * 28 * 28
32
+ VIDEO_MAX_PIXELS = 768 * 28 * 28
33
+ FRAME_FACTOR = 2
34
+ FPS = 2.0
35
+ FPS_MIN_FRAMES = 4
36
+ FPS_MAX_FRAMES = 768
37
+
38
+ # Set the maximum number of video token inputs.
39
+ # Here, 128K represents the maximum number of input tokens for the VLLM model.
40
+ # Remember to adjust it according to your own configuration.
41
+ VIDEO_TOTAL_PIXELS = int(float(os.environ.get('VIDEO_MAX_PIXELS', 128000 * 28 * 28 * 0.9)))
42
+ logger.info(f"set VIDEO_TOTAL_PIXELS: {VIDEO_TOTAL_PIXELS}")
43
+
44
+
45
+ def round_by_factor(number: int, factor: int) -> int:
46
+ """Returns the closest integer to 'number' that is divisible by 'factor'."""
47
+ return round(number / factor) * factor
48
+
49
+
50
+ def ceil_by_factor(number: int, factor: int) -> int:
51
+ """Returns the smallest integer greater than or equal to 'number' that is divisible by 'factor'."""
52
+ return math.ceil(number / factor) * factor
53
+
54
+
55
+ def floor_by_factor(number: int, factor: int) -> int:
56
+ """Returns the largest integer less than or equal to 'number' that is divisible by 'factor'."""
57
+ return math.floor(number / factor) * factor
58
+
59
+
60
+ def smart_resize(
61
+ height: int, width: int, factor: int = IMAGE_FACTOR, min_pixels: int = MIN_PIXELS, max_pixels: int = MAX_PIXELS
62
+ ) -> tuple[int, int]:
63
+ """
64
+ Rescales the image so that the following conditions are met:
65
+
66
+ 1. Both dimensions (height and width) are divisible by 'factor'.
67
+
68
+ 2. The total number of pixels is within the range ['min_pixels', 'max_pixels'].
69
+
70
+ 3. The aspect ratio of the image is maintained as closely as possible.
71
+ """
72
+ if max(height, width) / min(height, width) > MAX_RATIO:
73
+ raise ValueError(
74
+ f"absolute aspect ratio must be smaller than {MAX_RATIO}, got {max(height, width) / min(height, width)}"
75
+ )
76
+ h_bar = max(factor, round_by_factor(height, factor))
77
+ w_bar = max(factor, round_by_factor(width, factor))
78
+ if h_bar * w_bar > max_pixels:
79
+ beta = math.sqrt((height * width) / max_pixels)
80
+ h_bar = floor_by_factor(height / beta, factor)
81
+ w_bar = floor_by_factor(width / beta, factor)
82
+ elif h_bar * w_bar < min_pixels:
83
+ beta = math.sqrt(min_pixels / (height * width))
84
+ h_bar = ceil_by_factor(height * beta, factor)
85
+ w_bar = ceil_by_factor(width * beta, factor)
86
+ return h_bar, w_bar
87
+
88
+
89
+ def to_rgb(pil_image: Image.Image) -> Image.Image:
90
+ if pil_image.mode == 'RGBA':
91
+ white_background = Image.new("RGB", pil_image.size, (255, 255, 255))
92
+ white_background.paste(pil_image, mask=pil_image.split()[3]) # Use alpha channel as mask
93
+ return white_background
94
+ else:
95
+ return pil_image.convert("RGB")
96
+
97
+
98
+ def fetch_image(ele: dict[str, str | Image.Image], size_factor: int = IMAGE_FACTOR) -> Image.Image:
99
+ if "image" in ele:
100
+ image = ele["image"]
101
+ else:
102
+ image = ele["image_url"]
103
+ image_obj = None
104
+ if isinstance(image, Image.Image):
105
+ image_obj = image
106
+ elif image.startswith("http://") or image.startswith("https://"):
107
+ # fix memory leak issue while using BytesIO
108
+ with requests.get(image, stream=True) as response:
109
+ response.raise_for_status()
110
+ with BytesIO(response.content) as bio:
111
+ image_obj = copy.deepcopy(Image.open(bio))
112
+ elif image.startswith("file://"):
113
+ image_obj = Image.open(image[7:])
114
+ elif image.startswith("data:image"):
115
+ if "base64," in image:
116
+ _, base64_data = image.split("base64,", 1)
117
+ data = base64.b64decode(base64_data)
118
+ # fix memory leak issue while using BytesIO
119
+ with BytesIO(data) as bio:
120
+ image_obj = copy.deepcopy(Image.open(bio))
121
+ else:
122
+ image_obj = Image.open(image)
123
+ if image_obj is None:
124
+ raise ValueError(f"Unrecognized image input, support local path, http url, base64 and PIL.Image, got {image}")
125
+ image = to_rgb(image_obj)
126
+ ## resize
127
+ if "resized_height" in ele and "resized_width" in ele:
128
+ resized_height, resized_width = smart_resize(
129
+ ele["resized_height"],
130
+ ele["resized_width"],
131
+ factor=size_factor,
132
+ )
133
+ else:
134
+ width, height = image.size
135
+ min_pixels = ele.get("min_pixels", MIN_PIXELS)
136
+ max_pixels = ele.get("max_pixels", MAX_PIXELS)
137
+ resized_height, resized_width = smart_resize(
138
+ height,
139
+ width,
140
+ factor=size_factor,
141
+ min_pixels=min_pixels,
142
+ max_pixels=max_pixels,
143
+ )
144
+ image = image.resize((resized_width, resized_height))
145
+
146
+ return image
147
+
148
+
149
+ def smart_nframes(
150
+ ele: dict,
151
+ total_frames: int,
152
+ video_fps: int | float,
153
+ ) -> int:
154
+ """calculate the number of frames for video used for model inputs.
155
+
156
+ Args:
157
+ ele (dict): a dict contains the configuration of video.
158
+ support either `fps` or `nframes`:
159
+ - nframes: the number of frames to extract for model inputs.
160
+ - fps: the fps to extract frames for model inputs.
161
+ - min_frames: the minimum number of frames of the video, only used when fps is provided.
162
+ - max_frames: the maximum number of frames of the video, only used when fps is provided.
163
+ total_frames (int): the original total number of frames of the video.
164
+ video_fps (int | float): the original fps of the video.
165
+
166
+ Raises:
167
+ ValueError: nframes should in interval [FRAME_FACTOR, total_frames].
168
+
169
+ Returns:
170
+ int: the number of frames for video used for model inputs.
171
+ """
172
+ assert not ("fps" in ele and "nframes" in ele), "Only accept either `fps` or `nframes`"
173
+ if "nframes" in ele:
174
+ nframes = round_by_factor(ele["nframes"], FRAME_FACTOR)
175
+ else:
176
+ fps = ele.get("fps", FPS)
177
+ min_frames = ceil_by_factor(ele.get("min_frames", FPS_MIN_FRAMES), FRAME_FACTOR)
178
+ max_frames = floor_by_factor(ele.get("max_frames", min(FPS_MAX_FRAMES, total_frames)), FRAME_FACTOR)
179
+ nframes = total_frames / video_fps * fps
180
+ if nframes > total_frames:
181
+ logger.warning(f"smart_nframes: nframes[{nframes}] > total_frames[{total_frames}]")
182
+ nframes = min(min(max(nframes, min_frames), max_frames), total_frames)
183
+ nframes = floor_by_factor(nframes, FRAME_FACTOR)
184
+ if not (FRAME_FACTOR <= nframes and nframes <= total_frames):
185
+ raise ValueError(f"nframes should in interval [{FRAME_FACTOR}, {total_frames}], but got {nframes}.")
186
+ return nframes
187
+
188
+
189
+ def _read_video_torchvision(
190
+ ele: dict,
191
+ ) -> (torch.Tensor, float):
192
+ """read video using torchvision.io.read_video
193
+
194
+ Args:
195
+ ele (dict): a dict contains the configuration of video.
196
+ support keys:
197
+ - video: the path of video. support "file://", "http://", "https://" and local path.
198
+ - video_start: the start time of video.
199
+ - video_end: the end time of video.
200
+ Returns:
201
+ torch.Tensor: the video tensor with shape (T, C, H, W).
202
+ """
203
+ video_path = ele["video"]
204
+ if version.parse(torchvision.__version__) < version.parse("0.19.0"):
205
+ if "http://" in video_path or "https://" in video_path:
206
+ warnings.warn("torchvision < 0.19.0 does not support http/https video path, please upgrade to 0.19.0.")
207
+ if "file://" in video_path:
208
+ video_path = video_path[7:]
209
+ st = time.time()
210
+ video, audio, info = io.read_video(
211
+ video_path,
212
+ start_pts=ele.get("video_start", 0.0),
213
+ end_pts=ele.get("video_end", None),
214
+ pts_unit="sec",
215
+ output_format="TCHW",
216
+ )
217
+ total_frames, video_fps = video.size(0), info["video_fps"]
218
+ logger.info(f"torchvision: {video_path=}, {total_frames=}, {video_fps=}, time={time.time() - st:.3f}s")
219
+ nframes = smart_nframes(ele, total_frames=total_frames, video_fps=video_fps)
220
+ idx = torch.linspace(0, total_frames - 1, nframes).round().long()
221
+ sample_fps = nframes / max(total_frames, 1e-6) * video_fps
222
+ video = video[idx]
223
+ return video, sample_fps
224
+
225
+
226
+ def is_decord_available() -> bool:
227
+ import importlib.util
228
+
229
+ return importlib.util.find_spec("decord") is not None
230
+
231
+
232
+ def calculate_video_frame_range(
233
+ ele: dict,
234
+ total_frames: int,
235
+ video_fps: float,
236
+ ) -> tuple[int, int, int]:
237
+ """
238
+ Calculate the start and end frame indices based on the given time range.
239
+
240
+ Args:
241
+ ele (dict): A dictionary containing optional 'video_start' and 'video_end' keys (in seconds).
242
+ total_frames (int): Total number of frames in the video.
243
+ video_fps (float): Frames per second of the video.
244
+
245
+ Returns:
246
+ tuple: A tuple containing (start_frame, end_frame, frame_count).
247
+
248
+ Raises:
249
+ ValueError: If input parameters are invalid or the time range is inconsistent.
250
+ """
251
+ # Validate essential parameters
252
+ if video_fps <= 0:
253
+ raise ValueError("video_fps must be a positive number")
254
+ if total_frames <= 0:
255
+ raise ValueError("total_frames must be a positive integer")
256
+
257
+ # Get start and end time in seconds
258
+ video_start = ele.get("video_start", None)
259
+ video_end = ele.get("video_end", None)
260
+ if video_start is None and video_end is None:
261
+ return 0, total_frames - 1, total_frames
262
+
263
+ max_duration = total_frames / video_fps
264
+ # Process start frame
265
+ if video_start is not None:
266
+ video_start_clamped = max(0.0, min(video_start, max_duration))
267
+ start_frame = math.ceil(video_start_clamped * video_fps)
268
+ else:
269
+ start_frame = 0
270
+ # Process end frame
271
+ if video_end is not None:
272
+ video_end_clamped = max(0.0, min(video_end, max_duration))
273
+ end_frame = math.floor(video_end_clamped * video_fps)
274
+ end_frame = min(end_frame, total_frames - 1)
275
+ else:
276
+ end_frame = total_frames - 1
277
+
278
+ # Validate frame order
279
+ if start_frame >= end_frame:
280
+ raise ValueError(
281
+ f"Invalid time range: Start frame {start_frame} (at {video_start_clamped if video_start is not None else 0}s) "
282
+ f"exceeds end frame {end_frame} (at {video_end_clamped if video_end is not None else max_duration}s). "
283
+ f"Video duration: {max_duration:.2f}s ({total_frames} frames @ {video_fps}fps)"
284
+ )
285
+
286
+ logger.info(f"calculate video frame range: {start_frame=}, {end_frame=}, {total_frames=} from {video_start=}, {video_end=}, {video_fps=:.3f}")
287
+ return start_frame, end_frame, end_frame - start_frame + 1
288
+
289
+
290
+ def _read_video_decord(
291
+ ele: dict,
292
+ ) -> (torch.Tensor, float):
293
+ """read video using decord.VideoReader
294
+
295
+ Args:
296
+ ele (dict): a dict contains the configuration of video.
297
+ support keys:
298
+ - video: the path of video. support "file://", "http://", "https://" and local path.
299
+ - video_start: the start time of video.
300
+ - video_end: the end time of video.
301
+ Returns:
302
+ torch.Tensor: the video tensor with shape (T, C, H, W).
303
+ """
304
+ import decord
305
+ video_path = ele["video"]
306
+ st = time.time()
307
+ vr = decord.VideoReader(video_path)
308
+ total_frames, video_fps = len(vr), vr.get_avg_fps()
309
+ start_frame, end_frame, total_frames = calculate_video_frame_range(
310
+ ele,
311
+ total_frames,
312
+ video_fps,
313
+ )
314
+ nframes = smart_nframes(ele, total_frames=total_frames, video_fps=video_fps)
315
+ idx = torch.linspace(start_frame, end_frame, nframes).round().long().tolist()
316
+ video = vr.get_batch(idx).asnumpy()
317
+ video = torch.tensor(video).permute(0, 3, 1, 2) # Convert to TCHW format
318
+ logger.info(f"decord: {video_path=}, {total_frames=}, {video_fps=}, time={time.time() - st:.3f}s")
319
+ sample_fps = nframes / max(total_frames, 1e-6) * video_fps
320
+ return video, sample_fps
321
+
322
+
323
+ def is_torchcodec_available() -> bool:
324
+ """Check if torchcodec is available and properly installed."""
325
+ try:
326
+ import importlib.util
327
+ if importlib.util.find_spec("torchcodec") is None:
328
+ return False
329
+ from torchcodec.decoders import VideoDecoder
330
+ return True
331
+ except (ImportError, AttributeError, Exception):
332
+ return False
333
+
334
+
335
+ def _read_video_torchcodec(
336
+ ele: dict,
337
+ ) -> (torch.Tensor, float):
338
+ """read video using torchcodec.decoders.VideoDecoder
339
+
340
+ Args:
341
+ ele (dict): a dict contains the configuration of video.
342
+ support keys:
343
+ - video: the path of video. support "file://", "http://", "https://" and local path.
344
+ - video_start: the start time of video.
345
+ - video_end: the end time of video.
346
+ Returns:
347
+ torch.Tensor: the video tensor with shape (T, C, H, W).
348
+ """
349
+ from torchcodec.decoders import VideoDecoder
350
+ TORCHCODEC_NUM_THREADS = int(os.environ.get('TORCHCODEC_NUM_THREADS', 8))
351
+ logger.info(f"set TORCHCODEC_NUM_THREADS: {TORCHCODEC_NUM_THREADS}")
352
+ video_path = ele["video"]
353
+ st = time.time()
354
+ decoder = VideoDecoder(video_path, num_ffmpeg_threads=TORCHCODEC_NUM_THREADS)
355
+ video_fps = decoder.metadata.average_fps
356
+ total_frames = decoder.metadata.num_frames
357
+ start_frame, end_frame, total_frames = calculate_video_frame_range(
358
+ ele,
359
+ total_frames,
360
+ video_fps,
361
+ )
362
+ nframes = smart_nframes(ele, total_frames=total_frames, video_fps=video_fps)
363
+ idx = torch.linspace(start_frame, end_frame, nframes).round().long().tolist()
364
+ sample_fps = nframes / max(total_frames, 1e-6) * video_fps
365
+ video = decoder.get_frames_at(indices=idx).data
366
+ logger.info(f"torchcodec: {video_path=}, {total_frames=}, {video_fps=}, time={time.time() - st:.3f}s")
367
+ return video, sample_fps
368
+
369
+
370
+ VIDEO_READER_BACKENDS = {
371
+ "decord": _read_video_decord,
372
+ "torchvision": _read_video_torchvision,
373
+ "torchcodec": _read_video_torchcodec,
374
+ }
375
+
376
+ FORCE_QWENVL_VIDEO_READER = os.getenv("FORCE_QWENVL_VIDEO_READER", None)
377
+
378
+
379
+ @lru_cache(maxsize=1)
380
+ def get_video_reader_backend() -> str:
381
+ if FORCE_QWENVL_VIDEO_READER is not None:
382
+ video_reader_backend = FORCE_QWENVL_VIDEO_READER
383
+ elif is_torchcodec_available():
384
+ video_reader_backend = "torchcodec"
385
+ elif is_decord_available():
386
+ video_reader_backend = "decord"
387
+ else:
388
+ video_reader_backend = "torchvision"
389
+ print(f"qwen-vl-utils using {video_reader_backend} to read video.", file=sys.stderr)
390
+ return video_reader_backend
391
+
392
+
393
+ def fetch_video(ele: dict, image_factor: int = IMAGE_FACTOR, return_video_sample_fps: bool = False) -> torch.Tensor | list[Image.Image]:
394
+ if isinstance(ele["video"], str):
395
+ video_reader_backend = get_video_reader_backend()
396
+ try:
397
+ video, sample_fps = VIDEO_READER_BACKENDS[video_reader_backend](ele)
398
+ except Exception as e:
399
+ logger.warning(f"video_reader_backend {video_reader_backend} error, use torchvision as default, msg: {e}")
400
+ video, sample_fps = VIDEO_READER_BACKENDS["torchvision"](ele)
401
+
402
+ nframes, _, height, width = video.shape
403
+ min_pixels = ele.get("min_pixels", VIDEO_MIN_PIXELS)
404
+ total_pixels = ele.get("total_pixels", VIDEO_TOTAL_PIXELS)
405
+ max_pixels = max(min(VIDEO_MAX_PIXELS, total_pixels / nframes * FRAME_FACTOR), int(min_pixels * 1.05))
406
+ max_pixels_supposed = ele.get("max_pixels", max_pixels)
407
+ if max_pixels_supposed > max_pixels:
408
+ logger.warning(f"The given max_pixels[{max_pixels_supposed}] exceeds limit[{max_pixels}].")
409
+ max_pixels = min(max_pixels_supposed, max_pixels)
410
+ if "resized_height" in ele and "resized_width" in ele:
411
+ resized_height, resized_width = smart_resize(
412
+ ele["resized_height"],
413
+ ele["resized_width"],
414
+ factor=image_factor,
415
+ )
416
+ else:
417
+ resized_height, resized_width = smart_resize(
418
+ height,
419
+ width,
420
+ factor=image_factor,
421
+ min_pixels=min_pixels,
422
+ max_pixels=max_pixels,
423
+ )
424
+ video = transforms.functional.resize(
425
+ video,
426
+ [resized_height, resized_width],
427
+ interpolation=InterpolationMode.BICUBIC,
428
+ antialias=True,
429
+ ).float()
430
+ if return_video_sample_fps:
431
+ return video, sample_fps
432
+ return video
433
+ else:
434
+ assert isinstance(ele["video"], (list, tuple))
435
+ process_info = ele.copy()
436
+ process_info.pop("type", None)
437
+ process_info.pop("video", None)
438
+ images = [
439
+ fetch_image({"image": video_element, **process_info}, size_factor=image_factor)
440
+ for video_element in ele["video"]
441
+ ]
442
+ nframes = ceil_by_factor(len(images), FRAME_FACTOR)
443
+ if len(images) < nframes:
444
+ images.extend([images[-1]] * (nframes - len(images)))
445
+ if return_video_sample_fps:
446
+ return images, process_info.pop("fps", 2.0)
447
+ return images
448
+
449
+
450
+ def extract_vision_info(conversations: list[dict] | list[list[dict]]) -> list[dict]:
451
+ vision_infos = []
452
+ if isinstance(conversations[0], dict):
453
+ conversations = [conversations]
454
+ for conversation in conversations:
455
+ for message in conversation:
456
+ if isinstance(message["content"], list):
457
+ for ele in message["content"]:
458
+ if (
459
+ "image" in ele
460
+ or "image_url" in ele
461
+ or "video" in ele
462
+ or ele.get("type","") in ("image", "image_url", "video")
463
+ ):
464
+ vision_infos.append(ele)
465
+ return vision_infos
466
+
467
+
468
+ def process_vision_info(
469
+ conversations: list[dict] | list[list[dict]],
470
+ return_video_kwargs: bool = False,
471
+ ) -> tuple[list[Image.Image] | None, list[torch.Tensor | list[Image.Image]] | None, Optional[dict]]:
472
+
473
+ vision_infos = extract_vision_info(conversations)
474
+ ## Read images or videos
475
+ image_inputs = []
476
+ video_inputs = []
477
+ video_sample_fps_list = []
478
+ for vision_info in vision_infos:
479
+ if "image" in vision_info or "image_url" in vision_info:
480
+ image_inputs.append(fetch_image(vision_info))
481
+ elif "video" in vision_info:
482
+ video_input, video_sample_fps = fetch_video(vision_info, return_video_sample_fps=True)
483
+ video_sample_fps_list.append(video_sample_fps)
484
+ video_inputs.append(video_input)
485
+ else:
486
+ raise ValueError("image, image_url or video should in content.")
487
+ if len(image_inputs) == 0:
488
+ image_inputs = None
489
+ if len(video_inputs) == 0:
490
+ video_inputs = None
491
+ if return_video_kwargs:
492
+ return image_inputs, video_inputs, {'fps': video_sample_fps_list}
493
+ return image_inputs, video_inputs