qiukingballball's picture
Upload eval.py
ff15a6c verified
import json
import os
from datetime import datetime
import torch
from transformers import AutoProcessor, Qwen2_5_VLForConditionalGeneration
from qwen_vl_utils import process_vision_info
MODEL_PATH = os.environ.get("QWEN_VL_MODEL_PATH", "./qwen2_5_vl_model")
DEFAULT_INPUT_JSON = os.environ.get("EVAL_INPUT_JSON", "test.json")
DEFAULT_OUTPUT_JSON = os.environ.get("EVAL_OUTPUT_JSON", "results/qwen_sft_eval_results.json")
model = Qwen2_5_VLForConditionalGeneration.from_pretrained(
MODEL_PATH, torch_dtype="auto", device_map="auto"
)
processor = AutoProcessor.from_pretrained(MODEL_PATH)
def build_prompt(action):
return (
"Given a video and an action description, reply with one of the following options ONLY:\n"
"- 'yes' if the action is completed,\n"
"- 'no' if the action is not completed,\n"
"- 'Not exists' if the action in the video does not match the given action.\n\n"
f"Action: {action}"
)
def call_qwen_vl_with_video(prompt, video_path, max_pixels=151200, fps=0.5):
if not os.path.exists(video_path):
print(f"[error] Video not found: {video_path}")
return "Error: Video not found"
try:
messages = [
{
"role": "user",
"content": [
{
"type": "video",
"video": f"file://{video_path}",
"max_pixels": max_pixels,
"fps": fps,
},
{"type": "text", "text": prompt},
],
}
]
text = processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
image_inputs, video_inputs = process_vision_info(messages)
inputs = processor(
text=[text],
images=image_inputs,
videos=video_inputs,
fps=fps,
padding=True,
return_tensors="pt",
).to(model.device)
generated_ids = model.generate(**inputs, max_new_tokens=20)
generated_ids_trimmed = [
out_ids[len(in_ids) :] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)
]
output_text = processor.batch_decode(
generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False
)
return output_text[0].strip().lower()
except Exception as exc:
print(f"[error] Failed to process {video_path}: {exc}")
return "Error: Video processing failed"
def get_processed_videos(output_json_path):
if not os.path.exists(output_json_path):
return set()
try:
with open(output_json_path, "r", encoding="utf-8") as file:
data = json.load(file)
return {item["video"] for item in data}
except (json.JSONDecodeError, FileNotFoundError):
return set()
def save_result_json_list(output_path, result_entry):
result_entry["timestamp"] = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
if os.path.exists(output_path):
try:
with open(output_path, "r", encoding="utf-8") as file:
data = json.load(file)
except json.JSONDecodeError:
data = []
else:
data = []
data.append(result_entry)
with open(output_path, "w", encoding="utf-8") as file:
json.dump(data, file, indent=2, ensure_ascii=False)
def evaluate(json_path, output_json_path):
processed_videos = get_processed_videos(output_json_path)
print(f"[info] Previously processed videos: {len(processed_videos)}")
with open(json_path, "r", encoding="utf-8") as file:
data = json.load(file)
total = 0
correct = 0
skipped = 0
for idx, item in enumerate(data):
video_path = item["video"]
action = item["action"]
ground_truth = item["if_finish"].strip().lower()
if video_path in processed_videos:
print(
f"[{idx + 1}/{len(data)}] Skip {os.path.basename(video_path)} (already processed)"
)
skipped += 1
continue
print(f"[{idx + 1}/{len(data)}] Process {os.path.basename(video_path)}")
if not os.path.exists(video_path):
print(f"[warning] Missing file: {video_path}")
continue
prompt = build_prompt(action)
prediction = call_qwen_vl_with_video(prompt, video_path)
if prediction.startswith("Error:"):
print(f"[warning] Skipping {video_path}: {prediction}")
continue
if "not exists" in prediction:
predicted_label = "Not exists"
elif "yes" in prediction:
predicted_label = "yes"
elif "no" in prediction:
predicted_label = "no"
else:
print(f"[warning] Unrecognized output: {prediction}")
predicted_label = "Unknown"
is_correct = predicted_label.lower() == ground_truth.lower()
result_entry = {
"video": video_path,
"action": action,
"ground_truth": ground_truth,
"prediction": predicted_label,
"raw_output": prediction,
"correct": is_correct,
}
save_result_json_list(output_json_path, result_entry)
total += 1
if is_correct:
correct += 1
if total > 0:
current_accuracy = correct / total
print(f"[session] Accuracy: {current_accuracy * 100:.2f}% ({correct}/{total})")
else:
print("[session] No new videos processed.")
processed_entries = get_processed_videos(output_json_path)
all_total = len(processed_entries)
if all_total > 0:
with open(output_json_path, "r", encoding="utf-8") as file:
results_data = json.load(file)
all_correct = sum(1 for item in results_data if item.get("correct", False))
overall_accuracy = all_correct / all_total
print(f"[overall] Accuracy: {overall_accuracy * 100:.2f}% ({all_correct}/{all_total})")
print(f"[overall] Skipped videos: {skipped}")
def main():
json_path = DEFAULT_INPUT_JSON
output_json_path = DEFAULT_OUTPUT_JSON
output_dir = os.path.dirname(output_json_path)
if output_dir:
os.makedirs(output_dir, exist_ok=True)
print("Starting Qwen2.5-VL evaluation...")
evaluate(json_path, output_json_path)
if __name__ == "__main__":
main()