File size: 6,412 Bytes
ff15a6c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 |
import json
import os
from datetime import datetime
import torch
from transformers import AutoProcessor, Qwen2_5_VLForConditionalGeneration
from qwen_vl_utils import process_vision_info
MODEL_PATH = os.environ.get("QWEN_VL_MODEL_PATH", "./qwen2_5_vl_model")
DEFAULT_INPUT_JSON = os.environ.get("EVAL_INPUT_JSON", "test.json")
DEFAULT_OUTPUT_JSON = os.environ.get("EVAL_OUTPUT_JSON", "results/qwen_sft_eval_results.json")
model = Qwen2_5_VLForConditionalGeneration.from_pretrained(
MODEL_PATH, torch_dtype="auto", device_map="auto"
)
processor = AutoProcessor.from_pretrained(MODEL_PATH)
def build_prompt(action):
return (
"Given a video and an action description, reply with one of the following options ONLY:\n"
"- 'yes' if the action is completed,\n"
"- 'no' if the action is not completed,\n"
"- 'Not exists' if the action in the video does not match the given action.\n\n"
f"Action: {action}"
)
def call_qwen_vl_with_video(prompt, video_path, max_pixels=151200, fps=0.5):
if not os.path.exists(video_path):
print(f"[error] Video not found: {video_path}")
return "Error: Video not found"
try:
messages = [
{
"role": "user",
"content": [
{
"type": "video",
"video": f"file://{video_path}",
"max_pixels": max_pixels,
"fps": fps,
},
{"type": "text", "text": prompt},
],
}
]
text = processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
image_inputs, video_inputs = process_vision_info(messages)
inputs = processor(
text=[text],
images=image_inputs,
videos=video_inputs,
fps=fps,
padding=True,
return_tensors="pt",
).to(model.device)
generated_ids = model.generate(**inputs, max_new_tokens=20)
generated_ids_trimmed = [
out_ids[len(in_ids) :] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)
]
output_text = processor.batch_decode(
generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False
)
return output_text[0].strip().lower()
except Exception as exc:
print(f"[error] Failed to process {video_path}: {exc}")
return "Error: Video processing failed"
def get_processed_videos(output_json_path):
if not os.path.exists(output_json_path):
return set()
try:
with open(output_json_path, "r", encoding="utf-8") as file:
data = json.load(file)
return {item["video"] for item in data}
except (json.JSONDecodeError, FileNotFoundError):
return set()
def save_result_json_list(output_path, result_entry):
result_entry["timestamp"] = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
if os.path.exists(output_path):
try:
with open(output_path, "r", encoding="utf-8") as file:
data = json.load(file)
except json.JSONDecodeError:
data = []
else:
data = []
data.append(result_entry)
with open(output_path, "w", encoding="utf-8") as file:
json.dump(data, file, indent=2, ensure_ascii=False)
def evaluate(json_path, output_json_path):
processed_videos = get_processed_videos(output_json_path)
print(f"[info] Previously processed videos: {len(processed_videos)}")
with open(json_path, "r", encoding="utf-8") as file:
data = json.load(file)
total = 0
correct = 0
skipped = 0
for idx, item in enumerate(data):
video_path = item["video"]
action = item["action"]
ground_truth = item["if_finish"].strip().lower()
if video_path in processed_videos:
print(
f"[{idx + 1}/{len(data)}] Skip {os.path.basename(video_path)} (already processed)"
)
skipped += 1
continue
print(f"[{idx + 1}/{len(data)}] Process {os.path.basename(video_path)}")
if not os.path.exists(video_path):
print(f"[warning] Missing file: {video_path}")
continue
prompt = build_prompt(action)
prediction = call_qwen_vl_with_video(prompt, video_path)
if prediction.startswith("Error:"):
print(f"[warning] Skipping {video_path}: {prediction}")
continue
if "not exists" in prediction:
predicted_label = "Not exists"
elif "yes" in prediction:
predicted_label = "yes"
elif "no" in prediction:
predicted_label = "no"
else:
print(f"[warning] Unrecognized output: {prediction}")
predicted_label = "Unknown"
is_correct = predicted_label.lower() == ground_truth.lower()
result_entry = {
"video": video_path,
"action": action,
"ground_truth": ground_truth,
"prediction": predicted_label,
"raw_output": prediction,
"correct": is_correct,
}
save_result_json_list(output_json_path, result_entry)
total += 1
if is_correct:
correct += 1
if total > 0:
current_accuracy = correct / total
print(f"[session] Accuracy: {current_accuracy * 100:.2f}% ({correct}/{total})")
else:
print("[session] No new videos processed.")
processed_entries = get_processed_videos(output_json_path)
all_total = len(processed_entries)
if all_total > 0:
with open(output_json_path, "r", encoding="utf-8") as file:
results_data = json.load(file)
all_correct = sum(1 for item in results_data if item.get("correct", False))
overall_accuracy = all_correct / all_total
print(f"[overall] Accuracy: {overall_accuracy * 100:.2f}% ({all_correct}/{all_total})")
print(f"[overall] Skipped videos: {skipped}")
def main():
json_path = DEFAULT_INPUT_JSON
output_json_path = DEFAULT_OUTPUT_JSON
output_dir = os.path.dirname(output_json_path)
if output_dir:
os.makedirs(output_dir, exist_ok=True)
print("Starting Qwen2.5-VL evaluation...")
evaluate(json_path, output_json_path)
if __name__ == "__main__":
main()
|