| import os |
| from transformers import Qwen2_5_VLForConditionalGeneration, AutoProcessor |
| from utils.utils import COUNTING_BASE_PROMPT, RELATION_BASE_PROMPT, eval_loop, parse_model_answer, eval_pipeline |
| from utils.vision_process import process_vision_info |
|
|
| current_dir = os.path.dirname(os.path.abspath(__file__)) |
| parent_dir = os.path.split(current_dir)[0] |
|
|
|
|
|
|
| def process(model, item, task_type="counting", **kwargs): |
| processor = kwargs.get('processor') |
| question = item['question'] |
| image_path = os.path.join(parent_dir, item['image_path']) |
| if task_type == "counting": |
| formatted_question = COUNTING_BASE_PROMPT + question |
| else: |
| formatted_question = RELATION_BASE_PROMPT + question |
| |
| messages = [ |
| { |
| "role": "user", |
| "content": [ |
| { |
| "type": "image", |
| "image": image_path, |
| }, |
| {"type": "text", "text": formatted_question}, |
| ], |
| } |
| ] |
|
|
| text = processor.apply_chat_template( |
| messages, tokenize=False, add_generation_prompt=True |
| ) |
| image_inputs, video_inputs = process_vision_info(messages) |
| inputs = processor( |
| text=[text], |
| images=image_inputs, |
| videos=video_inputs, |
| padding=True, |
| return_tensors="pt", |
| ) |
| inputs = inputs.to(model.device) |
|
|
| generated_ids = model.generate(**inputs, max_new_tokens=1000) |
| generated_ids_trimmed = [ |
| out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) |
| ] |
| output_text = processor.batch_decode( |
| generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False |
| ) |
| model_answer = output_text[0] |
| parsed_answer = parse_model_answer(model_answer, task_type) |
| result = {'model_answer': model_answer, 'parsed_answer': parsed_answer} |
| return result |
|
|
|
|
| def main(): |
| model_name_or_path = 'Qwen/Qwen2.5-VL-3B-Instruct' |
| model_name = model_name_or_path.split('/')[-1] |
| cache_dir = './cache/' |
|
|
| model = Qwen2_5_VLForConditionalGeneration.from_pretrained(model_name_or_path, torch_dtype="auto", device_map="auto", cache_dir=cache_dir) |
| processor = AutoProcessor.from_pretrained(model_name_or_path) |
|
|
| params = { |
| 'model': model, |
| 'processor': processor, |
| 'process_fn': process, |
| } |
| eval_pipeline(model_name, current_dir, params) |
|
|
| if __name__ == "__main__": |
| main() |