SitongGong's picture
Upload folder: ESTP-Bench
7f3f41b verified
import json,os
import numpy as np
def load_multiple_json(file_path):
"""读取包含多个 JSON 对象的文件,并将每个 JSON 对象解析成 Python 对象,存放在列表中。"""
with open(file_path, 'r', encoding='utf-8') as f:
content = f.read()
decoder = json.JSONDecoder()
pos = 0
results = []
content_length = len(content)
while pos < content_length:
# 跳过空白字符
while pos < content_length and content[pos].isspace():
pos += 1
if pos >= content_length:
break
try:
obj, new_pos = decoder.raw_decode(content, pos)
results.append(obj)
pos = new_pos
except json.JSONDecodeError as e:
# 出现解析错误则退出循环
print(f"JSON 解析错误: {e}")
break
return results
import argparse
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--eval_file', type=str, default='')
parser.add_argument('--eval_mode', type=str, default='all')
return parser.parse_args()
args = parse_args()
# eval_file = '/root/videollm-online/data/estp_dataset/result/estp_bench_sq_MiniCPMV_passiveevaluator_llama_5_5.json' # tmp_predict_VideollmOnline_v2_correctness
eval_file = '/root/videollm-online/data/estp_dataset/result/estp_bench_sq_MiniCPMV_passiveevaluator_deepseek_1_2.json'
# eval_file = '/root/videollm-online/data/estp_dataset/estpSqa_baseline_5cases/MiniCPMV_fbf_5casesevaluator_deepseek_5_5.json'
# eval_file = '/root/videollm-online/data/estp_dataset/estpSqa_baseline_5cases/MiniCPMV_fbf_5cases_0.175evaluator_deepseek_5_5.json'
# eval_file = '/root/videollm-online/data/estp_dataset/estpSqa_baseline_5cases/MiniCPMV_fbf_5cases_0.175evaluator_deepseek_1_2.json'
eval_file = '/root/videollm-online/data/estp_dataset/estpSqa_baseline/MiniCPMV_fbf_singleQA_0.175_v2evaluator_deepseek_1_2.json'
eval_model = 'MiniCPMV'
# eval_file = '/root/videollm-online/data/estp_dataset/estp_bench_sq_VideollmOnline0.9evaluator_deepseek_1_2.json'
# eval_file = '/root/videollm-online/data/estp_dataset/estp_bench_sq_VideollmOnline0.8evaluator_deepseek_1_2.json'
# # eval_file = '/root/videollm-online/data/estp_dataset/estpSqa_ours/LIVE_IT0.95.json'
# eval_model = 'VideollmOnline' # VideollmOnline MiniCPMV
# eval_file = '/root/videollm-online/data/estp_dataset/estpSqa_baseline/MMDuetevaluator_deepseek_1_2.json'
# eval_model = 'MMDuet'
# eval_file = '/root/videollm-online/data/estp_dataset/result/estp_bench_sq_EWO_frame_by_frame_v6_fusion_dinov2evaluator_llama_5_5.json'
# eval_file = '/root/videollm-online/data/estp_dataset/result/estp_bench_sq_EWO_frame_by_frame_fitVal_5_cases_v2evaluator_llama_5_5.json'
# eval_file = '/root/videollm-online/data/estp_dataset/result/estpBenchSq5Cases_fbf_EWOFitValStage3Firstevaluator_llama_5_5.json'
# eval_file = '/root/videollm-online/data/estp_dataset/result/estpBenchSq5Cases_fbf_EWOInDomainITstage2evaluator_llama_5_5.json'
# eval_file = '/root/videollm-online/data/estp_dataset/result/estpBenchSq_fbf_EWOInDomainITstage2evaluator_llama_5_5.json'
# eval_file = '/root/videollm-online/data/estp_dataset/result/estpBenchSq5Cases_fbf_EWOFitValStage3First0.6evaluator_llama_5_5.json'
# eval_file = '/root/videollm-online/data/estp_dataset/result/estpBenchSq5Cases_fbf_EWOFitValStage3First0.7evaluator_llama_5_5.json'
# eval_file = '/root/videollm-online/data/estp_dataset/result/estpBenchSq5Cases_fbf_EWOFitValStage3highv2evaluator_llama_5_5.json'
# eval_file = '/root/videollm-online/data/estp_dataset/result/estpBenchSq5Cases_fbf_EWOFitValStage3firstv2evaluator_llama_5_5.json'
# eval_file = '/root/videollm-online/data/estp_dataset/result/estpBenchSq5Cases_fbf_EWOFitValStage3HighRegion_evaluator_llama_5_5.json'
# eval_file = '/root/videollm-online/data/estp_dataset/train_/estpBenchSq5Cases_fbf_beaconlivel_h_stage2_v2evaluator_deepseek_5_5.json'
# eval_file = '/root/videollm-online/data/estp_dataset/estpSqa_ours_5cases/LivebaseStage3_high11_evaluator_deepseek_1_2.json'
# eval_file = '/root/videollm-online/data/estp_dataset/estpSqa_ours_5cases/LivebaseStage2evaluator_deepseek_5_5.json'
eval_file = '/root/videollm-online/data/estp_dataset/estpSqa_ours_5cases/LivebaseStage3_high0.31_11evaluator_deepseek_1_2.json'
# eval_file = '/root/videollm-online/data/estp_dataset/estpSqa_ours_5cases/LivebaseStage3_high11_evaluator_deepseek_1_2.json'
# eval_file = '/root/videollm-online/data/estp_dataset/estpSqa_ours/LivebaseStage3_high11_evaluator_deepseek_1_2.json'
# eval_file = '/root/videollm-online/data/estp_dataset/estpSqa_ours/LivebaseStage2evaluator_deepseek_1_2.json'
eval_file = '/root/videollm-online/data/estp_dataset/estpSqa_ours/LivebaseStage3_high0.31_1_lowevaluator_deepseek_1_2.json'
eval_model = 'EWO'
# eval_file = '/root/videollm-online/data/estp_dataset/estpSqa_baseline/LLaVAOneVision_passiveevaluator_deepseek_1_2.json'
# eval_file = '/root/videollm-online/data/estp_dataset/estpSqa_baseline/LLaVAOneVision_fbf_singleQA_0.175evaluator_deepseek_1_2.json'
# eval_model = 'LLaVAOneVision'
# eval_file = '/root/videollm-online/data/estp_dataset/estpSqa_baseline/LLaVANextVideo7B_passiveevaluator_deepseek_1_2.json'
# eval_file = '/root/videollm-online/data/estp_dataset/estpSqa_baseline/LLaVANextVideo7B_fbf_singleQA_0.175evaluator_deepseek_1_2.json'
# eval_model = 'LLaVANextVideo7B'
# eval_file = '/root/videollm-online/data/estp_dataset/estpSqa_baseline_5cases/Qwen2VL_fbf_5casesevaluator_deepseek_5_5.json'
# eval_file = '/root/videollm-online/data/estp_dataset/estpSqa_baseline/Qwen2VL_fbf_singleQA_0.175evaluator_deepseek_1_2.json'
# eval_file = '/root/videollm-online/data/estp_dataset/estpSqa_baseline/Qwen2VL_passiveevaluator_deepseek_1_2.json'
# eval_model = 'Qwen2VL'
# eval_file = '/root/videollm-online/data/estp_dataset/estpSqa_baseline/InternVLV28_fbf_0.175evaluator_deepseek_1_2.json'
# eval_file = '/root/videollm-online/data/estp_dataset/estpSqa_baseline/InternVLV28_passiveevaluator_deepseek_1_2.json'
# eval_model = 'InternVLV28'
# eval_model = 'Lavila'
# eval_file = '/root/videollm-online/data/estp_dataset/estpSqa_baseline/Lavila_streaming_v2evaluator_deepseek_1_2.json'
# eval_model = 'EgoVLP'
# eval_file = '/root/videollm-online/data/estp_dataset/estpSqa_baseline/EgoVLP_streaming_v2evaluator_deepseek_1_2.json'
# eval_model = 'CLIP'
# eval_file = '/root/videollm-online/data/estp_dataset/estpSqa_baseline/CLIP_streaming_v2evaluator_deepseek_1_2.json'
parent_dir = os.path.dirname(eval_file)
eval_files = [os.path.join(parent_dir, f) for f in os.listdir(parent_dir) if f.startswith(eval_file.split('/')[-1])]
eval_result = []
for eval_file in eval_files:
eval_result += load_multiple_json(eval_file)
task2number = {
"Object Recognition": 0,
"Attribute Perception": 0,
"Text-Rich Understanding": 0,
"Object Localization": 0,
"Object State Change Recognition": 0,
"Ego Object Localization": 0,
"Ego Object State Change Recognition": 0,
"Action Recognition": 0,
"Object Function": 0,
"Information Function": 0,
"Action Reasoning": 0,
"Task Understanding": 0,
}
task2score = {
"Object Recognition": 0,
"Attribute Perception": 0,
"Text-Rich Understanding": 0,
"Object Localization": 0,
"Object State Change Recognition": 0,
"Ego Object Localization": 0,
"Ego Object State Change Recognition": 0,
"Action Recognition": 0,
"Object Function": 0,
"Information Function": 0,
"Action Reasoning": 0,
"Task Understanding": 0,
}
task2recall = {
"Object Recognition": 0,
"Attribute Perception": 0,
"Text-Rich Understanding": 0,
"Object Localization": 0,
"Object State Change Recognition": 0,
"Ego Object Localization": 0,
"Ego Object State Change Recognition": 0,
"Action Recognition": 0,
"Object Function": 0,
"Information Function": 0,
"Action Reasoning": 0,
"Task Understanding": 0,
}
task2recall_score = {
"Object Recognition": 0,
"Attribute Perception": 0,
"Text-Rich Understanding": 0,
"Object Localization": 0,
"Object State Change Recognition": 0,
"Ego Object Localization": 0,
"Ego Object State Change Recognition": 0,
"Action Recognition": 0,
"Object Function": 0,
"Information Function": 0,
"Action Reasoning": 0,
"Task Understanding": 0,
}
task2nonrecall_pred = {
"Object Recognition": 0,
"Attribute Perception": 0,
"Text-Rich Understanding": 0,
"Object Localization": 0,
"Object State Change Recognition": 0,
"Ego Object Localization": 0,
"Ego Object State Change Recognition": 0,
"Action Recognition": 0,
"Object Function": 0,
"Information Function": 0,
"Action Reasoning": 0,
"Task Understanding": 0,
}
task2precision = {
"Object Recognition": 0,
"Attribute Perception": 0,
"Text-Rich Understanding": 0,
"Object Localization": 0,
"Object State Change Recognition": 0,
"Ego Object Localization": 0,
"Ego Object State Change Recognition": 0,
"Action Recognition": 0,
"Object Function": 0,
"Information Function": 0,
"Action Reasoning": 0,
"Task Understanding": 0,
}
def scoreMean(score):
score = score.max(axis=0)
return score.mean()
def validScoreMean(score):
# FP
nonrecall_pred = (score.max(axis=0) == 0).sum()
# valid_score
valid_score = np.zeros(score.shape[0])
valid_recall = np.zeros(score.shape[0])
valid_recall_score = np.zeros(score.shape[0])
for i,s in enumerate(score):
valid_s = s[s > 0]
if len(valid_s) > 0:
valid_score[i] = valid_s.sum() / (len(valid_s) + (nonrecall_pred / score.shape[0]))
valid_score[i] = valid_s.sum() / (valid_s.sum() + (nonrecall_pred / score.shape[0]))
# valid_score[i] = valid_s.max() / (1 + (nonrecall_pred / score.shape[0]))
# valid_score[i] = valid_s.max() * len(valid_s) / (len(valid_s) + (nonrecall_pred / score.shape[0]))
valid_recall[i] = 1
valid_recall_score[i] = valid_s.sum() / len(valid_s)
return valid_score.mean(), valid_recall.mean(), valid_recall_score.mean(), nonrecall_pred
BETA = 1
def validScoreF1(score):
if score.shape[1] == 0:
return 0, 0, 0, 0, 0
# FP
FP = (score.max(axis=0) == 0).sum()
# TP
TP = 0
# valid_score
valid_score = np.zeros(score.shape[0])
valid_recall = np.zeros(score.shape[0])
valid_recall_score = np.zeros(score.shape[0])
for i,s in enumerate(score):
valid_s = s[s > 0]
if len(valid_s) > 0:
# four type compute text-time precision
valid_score[i] = valid_s.sum() / len(valid_s)
# valid_score[i] = valid_s.max()
# valid_score[i] = valid_s.sum()
valid_recall[i] = 1
valid_recall_score[i] = valid_s.sum() / len(valid_s)
# valid_recall_score[i] = valid_s.max()
# valid_score[i] = valid_s.sum()
TP = valid_score.sum()
precision = TP / (TP + FP)
recall = valid_recall_score.mean()
# breakpoint()
if precision == 0 or recall == 0:
F1 = 0
else:
F1 = (1 + BETA**2) * precision * recall / ((BETA**2 * precision) + recall)
F1 = 2*TP / (2*TP + FP + score.shape[0] - valid_recall.sum())
return F1, valid_recall.mean(), valid_recall_score.mean(), FP, precision
def topkValidScoreMean(score, k=10):
score = score.max(axis=1)
score = score[:k]
valid_score = score[score > 0]
return valid_score.mean()
total_fps = 0
total_kv_cache_size = 0
total_response_number = 0
total_answer_number = 0
total_precision = 0
for ll in eval_result:
if eval_model in ll.keys():
total_answer_number += len(ll['conversation'])
this_turn_response_number = 0
for response in ll[eval_model]:
if response['role'].lower() == 'assistant':
if 'fps' in response:
total_fps+=response['fps']
total_response_number += 1
this_turn_response_number += 1
if 'kv_cache_size' in response:
total_kv_cache_size += response['kv_cache_size']
task2number[ll['Task Type'].strip()] += 1
# breakpoint()
text_score = np.array(ll['evaluator_output_text']) / 10
reponse_score = np.array(ll['evaluator_output_reponse']) / 10
# if ll['Task Type'] == 'Ego Object State Change Recognition':
if args.eval_mode == 'all' and eval_model not in ['EgoVLP', 'CLIP', 'Lavila']:
score = (text_score+reponse_score)
elif args.eval_mode == 'text':
score = text_score
elif args.eval_mode == 'response' or eval_model in ['EgoVLP', 'CLIP', 'Lavila']:
score = reponse_score
score_mean, recall_mean, recall_score_mean, nonrecall_pred, precision = validScoreF1(score)
task2score[ll['Task Type'].strip()] += score_mean
task2recall[ll['Task Type'].strip()] += recall_mean
task2recall_score[ll['Task Type'].strip()] += recall_score_mean
task2nonrecall_pred[ll['Task Type'].strip()] += nonrecall_pred
task2precision[ll['Task Type'].strip()] += precision
total_precision += nonrecall_pred / this_turn_response_number if this_turn_response_number > 0 else 0
for k,v in task2score.items():
if task2number[k] == 0:
task2score[k] = 0
task2recall[k] = 0
task2recall_score[k] = 0
task2nonrecall_pred[k] = 0
task2precision[k] = 0
else:
task2score[k] = v / task2number[k] * 100
task2recall[k] = task2recall[k] / task2number[k] * 100
task2recall_score[k] = task2recall_score[k] / task2number[k] * 100
task2nonrecall_pred[k] = task2nonrecall_pred[k] / task2number[k]
task2precision[k] = task2precision[k] / task2number[k] * 100
print(json.dumps(task2number, indent=4))
print(json.dumps({k: round(v, 2) for k,v in task2score.items()}, indent=4))
print(json.dumps(task2recall, indent=4))
# print(json.dumps(task2recall_score, indent=4))
# print(json.dumps(task2nonrecall_pred, indent=4))
print("Total question number: ", sum(task2number.values()))
print(f"Average Score: {sum(task2score.values())/len(task2number.values())}")
# Calculate average for first 8 task types
first_8_tasks = list(task2number.keys())[:8]
first_8_scores = [task2score[task] for task in first_8_tasks]
first_8_valid_scores = [score for score in first_8_scores if score > 0]
first_8_recall_scores = [task2recall[task] for task in first_8_tasks]
try:
print(f"Average Score (First 8 Tasks): {sum(first_8_valid_scores)/len(first_8_valid_scores):.2f}")
except:
print(f"Average Score (First 8 Tasks): 0.0")
# Calculate average for last 4 task types
last_4_tasks = list(task2number.keys())[-4:]
last_4_scores = [task2score[task] for task in last_4_tasks]
last_4_valid_scores = [score for score in last_4_scores if score > 0]
last_4_recall_scores = [task2recall[task] for task in last_4_tasks]
try:
print(f"Average Score (Last 4 Tasks): {sum(last_4_valid_scores)/len(last_4_valid_scores):.2f}")
except:
print(f"Average Score (Last 4 Tasks): 0.0")
print(f"Average Recall: {sum(task2recall.values())/len(task2number.values())}")
print(f"Average Recall Score: {sum(task2recall_score.values())/len(task2number.values())}")
print(f"Average Precision: {sum(task2precision.values())/len(task2number.values())}")
print(f"Total Question Number: {sum(task2number.values())}")
print(f"Average FPS: {total_fps/total_response_number}")
print(f"Average KV Cache: {total_kv_cache_size/total_response_number}")
print(f"Average Non-Recall Pred: {sum(task2nonrecall_pred.values())/sum(task2number.values())}")
print(f"Average Response Number: {total_response_number/sum(task2number.values())}")
print(f"Average Precision: {total_precision/sum(task2number.values()) * 100}")
print(f"[{sum(task2recall.values())/len(task2number.values()):.1f}, {sum(task2precision.values())/len(task2number.values()):.1f}]")
def generate_latex_table_row():
# Generate LaTeX format table row
print("\n# LaTeX format table row")
print(f"& {eval_model} ", end="")
# First 8 tasks
for task in list(task2number.keys())[:8]:
print(f"& {task2score[task]:.1f} ", end="")
# Average of first 8 tasks
print(f"& {sum(first_8_valid_scores)/len(first_8_valid_scores):.1f} ", end="")
# Last 4 tasks
for task in list(task2number.keys())[-4:]:
print(f"& {task2score[task]:.1f} ", end="")
# Average of last 4 tasks
print(f"& {sum(last_4_valid_scores)/len(last_4_valid_scores):.1f} ", end="")
# Overall average
# print(f"& {sum(task2score.values())/len(task2number.values()):.1f} \\\\")
generate_latex_table_row()
# def generate_latex_table_row():
# # Generate LaTeX format table row
# print("\n# LaTeX format table row")
# print(f"& {eval_model} ", end="")
# # First 8 tasks
# for task in list(task2number.keys())[:8]:
# print(f"& {task2recall[task]:.1f} ", end="")
# # Average of first 8 tasks
# print(f"& {sum(first_8_recall_scores)/len(first_8_recall_scores):.1f} ", end="")
# # Last 4 tasks
# for task in list(task2number.keys())[-4:]:
# print(f"& {task2recall[task]:.1f} ", end="")
# # Average of last 4 tasks
# print(f"& {sum(last_4_recall_scores)/len(last_4_recall_scores):.1f} ", end="")
# # Overall average
# # print(f"& {sum(task2score.values())/len(task2number.values()):.1f} \\\\")
# generate_latex_table_row()