File size: 5,660 Bytes
ba1d61a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
import os
import json
import argparse
from tqdm import tqdm
import time
import torch
from transformers import AutoModelForCausalLM, AutoProcessor

LEVEL_DIRS = ["level1", "level2", "level3"]
GENERIC_RESULT_PATTERN = "_result.json"

def get_media_type(file_path: str) -> str:
    ext = os.path.splitext(file_path)[1].lower()
    if ext in ['.mp4', '.avi', '.mov', '.mkv', '.webm']:
        return 'video'
    elif ext in ['.jpg', '.jpeg', '.png', '.bmp', '.gif', '.webp']:
        return 'image'
    else:
        raise ValueError(f"Unsupported file extension: {ext} in file {file_path}")

def process_task(task_path: str, model, processor, result_suffix: str, device: str):


    source_json_files = [
        f for f in os.listdir(task_path)
        if f.endswith('.json') and GENERIC_RESULT_PATTERN not in f
    ]

    if not source_json_files:
        return

    for json_filename in source_json_files:
        dataset_json_path = os.path.join(task_path, json_filename)
        result_json_path = os.path.join(task_path, f"{os.path.splitext(json_filename)[0]}{result_suffix}")

        if os.path.exists(result_json_path):
            continue

        try:
            with open(dataset_json_path, 'r', encoding='utf-8') as f:
                data = json.load(f)
        except (json.JSONDecodeError, FileNotFoundError) as e:
            continue

        all_results = []
        for item in tqdm(data, desc=f"  Processing {json_filename}"):
            start_time = time.time()
            model_output = "N/A"

            try:
                prompt_text = item['conversations'][0]['value']
                ground_truth = item['conversations'][1]['value']
                media_path_key = 'image' if 'image' in item else 'video'
                media_relative_path = item.get(media_path_key)

                if not media_relative_path:
                    raise ValueError("Missing 'image' or 'video' key in JSON entry.")

                media_full_path = os.path.join(task_path, media_relative_path)
                if not os.path.exists(media_full_path):
                    raise FileNotFoundError(f"Media file not found: {media_full_path}")

                media_type = get_media_type(media_full_path)
                clean_prompt = prompt_text.replace("<image>", "").replace("<video>", "").strip()

                if media_type == 'image':
                    media_content = {"type": "image", "image": {"image_path": media_full_path}}
                else:
                    media_content = {"type": "video", "video": {"video_path": media_full_path, "fps": 1, "max_frames": 128}}
                
                conversation = [
                    {"role": "system", "content": "You are a helpful assistant."},
                    {
                        "role": "user",
                        "content": [
                            media_content,
                            {"type": "text", "text": clean_prompt},
                        ]
                    },
                ]

                inputs = processor(conversation=conversation, return_tensors="pt")
                inputs = {k: v.to(device) if isinstance(v, torch.Tensor) else v for k, v in inputs.items()}
                if "pixel_values" in inputs:
                    inputs["pixel_values"] = inputs["pixel_values"].to(torch.bfloat16)

                output = model.generate(**inputs, max_new_tokens=1024, do_sample=False)
                response = processor.batch_decode(output, skip_special_tokens=True)[0].strip()

                last_turn_markers = ['assistant\n', 'assistant:']
                for marker in last_turn_markers:
                    if marker in response.lower():
                        response = response.split(marker, 1)[-1].strip()
                        break
                model_output = response

            except Exception as e:
                model_output = f"ERROR: {str(e)}"

            end_time = time.time()
            all_results.append({
                "id": item.get('id', 'N/A'),
                "prompt": prompt_text,
                "model_output": model_output,
                "ground_truth": ground_truth,
                "processing_time_seconds": round(end_time - start_time, 2)
            })

        with open(result_json_path, 'w', encoding='utf-8') as f:
            json.dump(all_results, f, indent=4, ensure_ascii=False)


def main():
    parser = argparse.ArgumentParser(description="Batch processing script for a local multimodal model using the Transformers library.")

    parser.add_argument("--model-path", required=True, help="Full path to the local model directory")
    parser.add_argument("--result-suffix", required=True, help="Suffix for the generated result files")
    parser.add_argument("--device", default="cuda:0", help="Device to run the model")

    args = parser.parse_args()
    try:
        model = AutoModelForCausalLM.from_pretrained(
            args.model_path,
            trust_remote_code=True,
            torch_dtype=torch.bfloat16,
            attn_implementation="flash_attention_2",
            device_map=args.device
        )
        processor = AutoProcessor.from_pretrained(args.model_path, trust_remote_code=True)
    except Exception as e:
        return

    for level_dir in LEVEL_DIRS:
        level_path = os.path.join(os.getcwd(), level_dir)
        if not os.path.isdir(level_path):
            continue

        task_dirs = sorted([d.path for d in os.scandir(level_path) if d.is_dir()])
        for task_path in task_dirs:
            process_task(task_path, model, processor, args.result_suffix, args.device)


if __name__ == "__main__":
    main()