Upload miomio.py with huggingface_hub
Browse files
miomio.py
ADDED
|
@@ -0,0 +1,176 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Evaluate Qwen2.5-VL-7B on MCQA-style fire dataset using multiple GPUs for model loading.
|
| 3 |
+
"""
|
| 4 |
+
|
| 5 |
+
import json
|
| 6 |
+
import re
|
| 7 |
+
import torch
|
| 8 |
+
import os
|
| 9 |
+
import csv
|
| 10 |
+
from PIL import Image
|
| 11 |
+
from tqdm import tqdm
|
| 12 |
+
from transformers import AutoProcessor, AutoModelForVision2Seq
|
| 13 |
+
import logging
|
| 14 |
+
from collections import defaultdict
|
| 15 |
+
from qwen_vl_utils import process_vision_info
|
| 16 |
+
|
| 17 |
+
# --- configuration ---------------------------------------------------------
|
| 18 |
+
MODEL_ID = "Qwen/Qwen2.5-VL-7B-Instruct"
|
| 19 |
+
DATA_FILE = "/home/muzammal/Projects/Qwen-2.5-VL/qa_dataset_concat_cleaned.json"
|
| 20 |
+
IMAGE_ROOT = "/home/muzammal/Projects/Qwen-2.5-VL/UniFire_11k/UniFire"
|
| 21 |
+
MAX_TOKENS = 12
|
| 22 |
+
LOG_FILE = "missing_images_qwen.log"
|
| 23 |
+
CSV_RESULT_DIM = "vqa_qwen_accuracy_by_dimension.csv"
|
| 24 |
+
CSV_RESULT_SCEN = "vqa_qwen_accuracy_by_scenario.csv"
|
| 25 |
+
|
| 26 |
+
# --- logging setup --------------------------------------------------------
|
| 27 |
+
logging.basicConfig(filename=LOG_FILE, filemode="w", level=logging.WARNING)
|
| 28 |
+
|
| 29 |
+
# --- helper functions ------------------------------------------------------
|
| 30 |
+
def build_prompt(question, options):
|
| 31 |
+
return (
|
| 32 |
+
"<|image|>\n"
|
| 33 |
+
"You are given the picture of fire. Answer with the option letter (A, B, C, or D) only from the given choices directly.\n\n"
|
| 34 |
+
f"Question: {question}\n"
|
| 35 |
+
f"{options[0]}\n"
|
| 36 |
+
f"{options[1]}\n"
|
| 37 |
+
f"{options[2]}\n"
|
| 38 |
+
f"{options[3]}\n\n"
|
| 39 |
+
"Answer:"
|
| 40 |
+
)
|
| 41 |
+
|
| 42 |
+
def extract_letter(text):
|
| 43 |
+
m = re.search(r"\b([A-D])\b", text, re.IGNORECASE)
|
| 44 |
+
return m.group(1).upper() if m else ""
|
| 45 |
+
|
| 46 |
+
# --- load model & processor with multi-GPU --------------------------------
|
| 47 |
+
os.environ["CUDA_VISIBLE_DEVICES"] = "0,1,3" # Use GPU 0,1,3
|
| 48 |
+
|
| 49 |
+
processor = AutoProcessor.from_pretrained(MODEL_ID)
|
| 50 |
+
model = AutoModelForVision2Seq.from_pretrained(
|
| 51 |
+
MODEL_ID,
|
| 52 |
+
torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
|
| 53 |
+
device_map="auto"
|
| 54 |
+
)
|
| 55 |
+
|
| 56 |
+
# --- main evaluation loop --------------------------------------------------
|
| 57 |
+
with open(DATA_FILE) as f:
|
| 58 |
+
dataset = json.load(f)
|
| 59 |
+
|
| 60 |
+
correct = 0
|
| 61 |
+
results = []
|
| 62 |
+
missing = []
|
| 63 |
+
|
| 64 |
+
# 统计:按问题维度、场景分类
|
| 65 |
+
dimension_stats = defaultdict(lambda: {"correct": 0, "total": 0}) # 创建默认字典,不用初始化新场景
|
| 66 |
+
scenario_stats = defaultdict(lambda: {"correct": 0, "total": 0}) # 遇到新key,自动初始化为0
|
| 67 |
+
|
| 68 |
+
total_qa = 0
|
| 69 |
+
|
| 70 |
+
# Outer evaluation loop
|
| 71 |
+
for scene_path, scene in tqdm(dataset.items(), desc="Evaluating MCQA VQA"):
|
| 72 |
+
print(f"Processing scene: {scene_path}")
|
| 73 |
+
image_path = os.path.join(IMAGE_ROOT, scene_path)
|
| 74 |
+
print(f"Image path: {image_path}")
|
| 75 |
+
|
| 76 |
+
try:
|
| 77 |
+
image = Image.open(image_path).convert("RGB")
|
| 78 |
+
except Exception as e:
|
| 79 |
+
logging.warning(f"Missing or unreadable image: {scene_path} - {e}")
|
| 80 |
+
missing.append(scene_path)
|
| 81 |
+
continue
|
| 82 |
+
|
| 83 |
+
scenario = scene.get("scenario", "Unknown")
|
| 84 |
+
|
| 85 |
+
for i, qa in enumerate(scene.get("QA_pairs", [])):
|
| 86 |
+
qid = f"{scene_path}_{i}"
|
| 87 |
+
options = qa["options"]
|
| 88 |
+
|
| 89 |
+
# Build prompt without <image> tag
|
| 90 |
+
text_prompt = (
|
| 91 |
+
"You are given the picture of fire. Answer with the option letter (A, B, C, or D) only from the given choices directly.\n\n"
|
| 92 |
+
f"Question: {qa['question']}\n"
|
| 93 |
+
f"{options[0]}\n{options[1]}\n{options[2]}\n{options[3]}\n\nAnswer:"
|
| 94 |
+
)
|
| 95 |
+
|
| 96 |
+
# Build messages with image and text
|
| 97 |
+
messages = [{
|
| 98 |
+
"role": "user",
|
| 99 |
+
"content": [
|
| 100 |
+
{"type": "image", "image": image},
|
| 101 |
+
{"type": "text", "text": text_prompt},
|
| 102 |
+
]
|
| 103 |
+
}]
|
| 104 |
+
|
| 105 |
+
chat_prompt = processor.apply_chat_template(
|
| 106 |
+
messages, tokenize=False, add_generation_prompt=True
|
| 107 |
+
)
|
| 108 |
+
image_inputs, video_inputs = process_vision_info(messages)
|
| 109 |
+
|
| 110 |
+
inputs = processor(
|
| 111 |
+
text=[chat_prompt],
|
| 112 |
+
images=image_inputs,
|
| 113 |
+
videos=video_inputs,
|
| 114 |
+
return_tensors="pt"
|
| 115 |
+
).to(model.device)
|
| 116 |
+
|
| 117 |
+
output = model.generate(**inputs, max_new_tokens=MAX_TOKENS)
|
| 118 |
+
decoded = processor.batch_decode(output, skip_special_tokens=True)[0]
|
| 119 |
+
assistant_response = decoded.split("assistant\n")[-1].strip() # check the assistant only, not the system message
|
| 120 |
+
|
| 121 |
+
pred = extract_letter(assistant_response) # only take the last letter in the "answer" so that the options won't influence the answer extraction
|
| 122 |
+
|
| 123 |
+
correct_option = qa["answer"][0].upper()
|
| 124 |
+
|
| 125 |
+
print("="*50)
|
| 126 |
+
print("[Full decoded output]\n", decoded)
|
| 127 |
+
print("[Assistant-only response]\n", assistant_response)
|
| 128 |
+
print("[Extracted Letter]:", pred)
|
| 129 |
+
print("[Ground Truth]:", correct_option)
|
| 130 |
+
print("="*50)
|
| 131 |
+
|
| 132 |
+
|
| 133 |
+
is_correct = pred == correct_option
|
| 134 |
+
correct += is_correct
|
| 135 |
+
total_qa += 1
|
| 136 |
+
|
| 137 |
+
question_text = qa["question"]
|
| 138 |
+
dimension_stats[question_text]["total"] += 1
|
| 139 |
+
scenario_stats[scenario]["total"] += 1
|
| 140 |
+
if is_correct:
|
| 141 |
+
dimension_stats[question_text]["correct"] += 1
|
| 142 |
+
scenario_stats[scenario]["correct"] += 1
|
| 143 |
+
|
| 144 |
+
results.append({
|
| 145 |
+
"id": qid,
|
| 146 |
+
"question": question_text,
|
| 147 |
+
"scenario": scenario,
|
| 148 |
+
"gt": correct_option,
|
| 149 |
+
"pred": pred,
|
| 150 |
+
"is_correct": is_correct
|
| 151 |
+
})
|
| 152 |
+
|
| 153 |
+
|
| 154 |
+
accuracy = correct / total_qa if total_qa > 0 else 0
|
| 155 |
+
print(f"\nEvaluated {total_qa} QA pairs.")
|
| 156 |
+
print(f"Missing images: {len(missing)} logged in {LOG_FILE}")
|
| 157 |
+
print(f"Overall accuracy: {accuracy:.2%}\n")
|
| 158 |
+
|
| 159 |
+
# --- write per-dimension accuracy to CSV -----------------------------------
|
| 160 |
+
with open(CSV_RESULT_DIM, mode="w", newline="", encoding="utf-8") as f:
|
| 161 |
+
writer = csv.writer(f)
|
| 162 |
+
writer.writerow(["Question Dimension", "Correct", "Total", "Accuracy"])
|
| 163 |
+
for question, stats in dimension_stats.items():
|
| 164 |
+
acc = stats["correct"] / stats["total"] if stats["total"] > 0 else 0
|
| 165 |
+
writer.writerow([question, stats["correct"], stats["total"], f"{acc:.2%}"])
|
| 166 |
+
|
| 167 |
+
# --- write per-scenario accuracy to CSV ------------------------------------
|
| 168 |
+
with open(CSV_RESULT_SCEN, mode="w", newline="", encoding="utf-8") as f:
|
| 169 |
+
writer = csv.writer(f)
|
| 170 |
+
writer.writerow(["Fire Scenario", "Correct", "Total", "Accuracy"])
|
| 171 |
+
for scenario, stats in scenario_stats.items():
|
| 172 |
+
acc = stats["correct"] / stats["total"] if stats["total"] > 0 else 0
|
| 173 |
+
writer.writerow([scenario, stats["correct"], stats["total"], f"{acc:.2%}"])
|
| 174 |
+
|
| 175 |
+
print(f"Per-dimension accuracy written to {CSV_RESULT_DIM}")
|
| 176 |
+
print(f"Per-scenario accuracy written to {CSV_RESULT_SCEN}")
|