SitongGong's picture
Upload folder: ESTP-Bench
7f3f41b verified
import json,os
import numpy as np
def load_multiple_json(file_path):
"""读取包含多个 JSON 对象的文件,并将每个 JSON 对象解析成 Python 对象,存放在列表中。"""
with open(file_path, 'r', encoding='utf-8') as f:
content = f.read()
decoder = json.JSONDecoder()
pos = 0
results = []
content_length = len(content)
while pos < content_length:
# 跳过空白字符
while pos < content_length and content[pos].isspace():
pos += 1
if pos >= content_length:
break
try:
obj, new_pos = decoder.raw_decode(content, pos)
results.append(obj)
pos = new_pos
except json.JSONDecodeError as e:
# 出现解析错误则退出循环
print(f"JSON 解析错误: {e}")
break
return results
# import argparse
# def parse_args():
# parser = argparse.ArgumentParser()
# parser.add_argument('--eval_model', type=str, default='InternVLV28', help='Model to evaluate')
# parser.add_argument('--inference_mode', type=str, default='default', help='Evaluation mode: passive, fbf, streaming')
# return parser.parse_args()
# args = parse_args()
# # 使用命令行参数或默认值
# eval_model = args.eval_model
# # 根据选择的模型设置评估文件路径
# model_file_mapping = {
# 'MiniCPMV': {
# 'passive': '/root/videollm-online/data/estp_dataset/estpCqa_baseline/MiniCPMV_passive_v2evaluator_deepseek_1_2.json',
# 'fbf': '/root/videollm-online/data/estp_dataset/estpCqa_baseline/MiniCPMV_fbf_0.175evaluator_deepseek_1_2.json'
# },
# 'Qwen2VL': {
# 'passive': '/root/videollm-online/data/estp_dataset/estpCqa_baseline/Qwen2VL_passive_v2evaluator_deepseek_1_2.json',
# 'fbf': '/root/videollm-online/data/estp_dataset/estpCqa_baseline/Qwen2VL_fbf_0.175evaluator_deepseek_1_2.json'
# },
# 'LLaVAOneVision': {
# 'passive': '/root/videollm-online/data/estp_dataset/estpCqa_baseline/LLaVAOneVision_passive_v2evaluator_deepseek_1_2.json',
# 'fbf': '/root/videollm-online/data/estp_dataset/estpCqa_baseline/LLaVAOneVision_fbf_0.175evaluator_deepseek_1_2.json'
# },
# 'Lavila': {
# 'passive': '/root/videollm-online/data/estp_dataset/estpCqa_baseline/Lavila_streaming_v2evaluator_deepseek_1_2.json'
# },
# 'EgoVLP': {
# 'passive': '/root/videollm-online/data/estp_dataset/estpCqa_baseline/EgoVLP_streaming_v2evaluator_deepseek_1_2.json'
# },
# 'CLIP': {
# 'passive': '/root/videollm-online/data/estp_dataset/estpCqa_baseline/CLIP_streaming_v2evaluator_deepseek_1_2.json'
# },
# 'MMDuet': {
# 'default': '/root/videollm-online/data/estp_dataset/estpCqa_baseline/MMDuetevaluator_deepseek_1_2.json'
# },
# 'LLaVANextVideo7B': {
# 'passive': '/root/videollm-online/data/estp_dataset/estpCqa_baseline/LLaVANextVideo7B_passive_v2evaluator_deepseek_1_2.json',
# 'fbf': '/root/videollm-online/data/estp_dataset/estpCqa_baseline/LLaVANextVideo7B_fbf_0.175evaluator_deepseek_1_2.json'
# },
# 'InternVLV28': {
# 'passive': '/root/videollm-online/data/estp_dataset/estpCqa_baseline/InternVLV28_passive_v2evaluator_deepseek_1_2.json',
# 'fbf': '/root/videollm-online/data/estp_dataset/estpCqa_baseline/InternVLV28_fbf_0.175evaluator_deepseek_1_2.json'
# },
# 'VideollmOnline': {
# 'default': '/root/videollm-online/data/estp_dataset/estpCqa_baseline/VideollmOnline0.8evaluator_deepseek_1_2.json'
# }
# }
# # 默认使用fbf版本,如果没有则使用passive,如果都没有则使用默认或streaming
# if eval_model in model_file_mapping:
# # 展示可用的inference_mode选项
# available_modes = list(model_file_mapping[eval_model].keys())
# print(f"可用的inference_mode选项:")
# for i, mode in enumerate(available_modes, 1):
# print(f"{i}. {mode}")
# # 如果用户提供的inference_mode不在可用选项中,让用户选择
# if args.inference_mode not in available_modes:
# choice = input(f"请选择inference_mode (1-{len(available_modes)}): ")
# try:
# choice_idx = int(choice) - 1
# if 0 <= choice_idx < len(available_modes):
# args.inference_mode = available_modes[choice_idx]
# else:
# args.inference_mode = available_modes[0] # 默认使用第一个选项
# print(f"无效选择,使用默认模式: {args.inference_mode}")
# except ValueError:
# args.inference_mode = available_modes[0] # 默认使用第一个选项
# print(f"无效输入,使用默认模式: {args.inference_mode}")
# eval_file = model_file_mapping[eval_model][args.inference_mode]
# else:
# raise ValueError(f"未找到模型 {eval_model} 的评估文件")
# print(f"评估模型: {eval_model}")
# print(f"评估文件: {eval_file}")
eval_file = '/root/videollm-online/data/estp_dataset/estpCqa_baseline/MiniCPMV_passiveevaluator_deepseek_1_2.json' # tmp_predict_VideollmOnline_v2_correctness
eval_model = 'MiniCPMV'
eval_file = '/root/videollm-online/data/estp_dataset/estpCqa_baseline/Qwen2VL_passiveevaluator_deepseek_1_2.json' # tmp_predict_VideollmOnline_v2_correctness
eval_model = 'Qwen2VL'
eval_file = '/2022233235/videollm-online/data/estp_dataset/estpCqa_baseline/VideollmOnline0.9evaluator_deepseek_1_2.json'
# eval_file = '/2022233235/videollm-online/data/estp_dataset/estpCqa_ours/LIVE_IT0.95evaluator_deepseek_1_2.json'
# eval_file = '/2022233235/videollm-online/data/estp_dataset/estpCqa_ours/LIVE_IT_smoothing_v2evaluator_deepseek_1_2.json'
eval_model = 'VideollmOnline' # VideollmOnline MiniCPMV
# eval_file = '/root/videollm-online/data/estp_dataset/estpCqa_baseline/LLaVAOneVision_passiveevaluator_deepseek_1_2.json' # tmp_predict_VideollmOnline_v2_correctness
# eval_model = 'LLaVAOneVision'
# eval_file = '/2022233235/videollm-online/data/estp_dataset/estpCqa_ours/LivebaseStage2_v3evaluator_deepseek_1_2.json'
# eval_file = '/2022233235/videollm-online/data/estp_dataset/estpCqa_ours/LivebaseStage2.5evaluator_deepseek_1_2.json'
eval_file = '/2022233235/videollm-online/data/estp_dataset/estpCqa_ours/LivebaseStage3.5_high0.31_11evaluator_deepseek_1_2.json'
# eval_file = '/2022233235/videollm-online/data/estp_dataset/estpCqa_ours/LivebaseStage3_v3evaluator_deepseek_1_2.json'
# eval_file = '/2022233235/videollm-online/data/estp_dataset/estpCqa_ours/LivebaseStage3.5_v3evaluator_deepseek_1_2.json'
eval_file = '/root/videollm-online/data/estp_dataset/estpCqa_ours/LivebaseStage2_lowevaluator_deepseek_1_2.json'
eval_model = 'EWO'
parent_dir = os.path.dirname(eval_file)
eval_files = [os.path.join(parent_dir, f) for f in os.listdir(parent_dir) if f.startswith(eval_file.split('/')[-1])]
eval_result = []
for eval_file in eval_files:
eval_result += load_multiple_json(eval_file)
task2number = {
"Object Relative Context": 0,
"Task Relative Context": 0,
}
task2recall = {
"Object Relative Context": 0,
"Task Relative Context": 0,
}
task2recall_score = {
"Object Relative Context": 0,
"Task Relative Context": 0,
}
task2score = {
"Object Relative Context": 0,
"Task Relative Context": 0,
}
task2nonrecall_pred = {
"Object Relative Context": 0,
"Task Relative Context": 0,
}
task2precision = {
"Object Relative Context": 0,
"Task Relative Context": 0,
}
task2bug = {
"Object Relative Context": 0,
"Task Relative Context": 0,
}
def scoreMean(score):
score = score.max(axis=0)
return score.mean()
def validScoreMean(score):
# FP
if len(score) == 0:
return 0, 0, 0, 0
nonrecall_pred = (score.max(axis=0) == 0).sum()
# valid_score
valid_score = np.zeros(score.shape[0])
valid_recall = np.zeros(score.shape[0])
valid_recall_score = np.zeros(score.shape[0])
for i,s in enumerate(score):
valid_s = s[s > 0]
if len(valid_s) > 0:
valid_score[i] = valid_s.sum() / (len(valid_s) + (nonrecall_pred / score.shape[0]))
valid_score[i] = valid_s.sum() / (valid_s.sum() + (nonrecall_pred / score.shape[0]))
# valid_score[i] = valid_s.max() / (1 + (nonrecall_pred / score.shape[0]))
# valid_score[i] = valid_s.max() * len(valid_s) / (len(valid_s) + (nonrecall_pred / score.shape[0]))
valid_recall[i] = 1
valid_recall_score[i] = valid_s.sum() / len(valid_s)
return valid_score.mean(), valid_recall.mean(), valid_recall_score.mean(), nonrecall_pred
BETA = 1
def validScoreF1(score):
if len(score) == 0:
return 0, 0, 0, 0, 0
# FP
FP = (score.max(axis=0) == 0).sum()
# TP
TP = 0
# valid_score
valid_score = np.zeros(score.shape[0])
valid_recall = np.zeros(score.shape[0])
valid_recall_score = np.zeros(score.shape[0])
for i,s in enumerate(score):
valid_s = s[s > 0]
if len(valid_s) > 0:
# four type compute text-time precision
valid_score[i] = valid_s.sum() / len(valid_s)
# valid_score[i] = valid_s.max()
valid_recall[i] = 1
valid_recall_score[i] = valid_s.sum() / len(valid_s)
# valid_recall_score[i] = valid_s.max()
TP = valid_score.sum()
precision = TP / (TP + FP)
if np.isnan(precision):
precision = 0
recall = valid_recall_score.mean()
if precision == 0 or recall == 0:
F1 = 0
else:
F1 = (1 + BETA**2) * precision * recall / ((BETA**2 * precision) + recall)
# breakpoint()
F1 = 2*TP / (2*TP + FP + score.shape[0] - valid_recall.sum())
return F1, valid_recall.mean(), valid_recall_score.mean(), FP, precision
def topkValidScoreMean(score, k=10):
score = score.max(axis=1)
score = score[:k]
valid_score = score[score > 0]
return valid_score.mean()
total_fps = 0
total_kv_cache_size = 0
total_response_number = 0
total_answer_number = 0
for ll in eval_result:
if eval_model in ll.keys():
total_answer_number += len(ll['conversation'])
for response in ll[eval_model]:
if response['role'].lower() == 'assistant':
if 'fps' in response:
total_fps+=response['fps']
total_response_number += 1
if 'kv_cache_size' in response:
total_kv_cache_size += response['kv_cache_size']
task2number[ll['Task Type'].strip()] += 1
# breakpoint()
text_score = np.array(ll['evaluator_output_text']) / 10
reponse_score = np.array(ll['evaluator_output_reponse']) / 10
# if ll['Task Type'] == 'Ego Object State Change Recognition':
score = (text_score+reponse_score)
try:
score_mean, recall_mean, recall_score_mean, nonrecall_pred, precision = validScoreF1(score)
except:
breakpoint()
task2score[ll['Task Type'].strip()] += score_mean
task2recall[ll['Task Type'].strip()] += recall_mean
task2recall_score[ll['Task Type'].strip()] += recall_score_mean
task2nonrecall_pred[ll['Task Type'].strip()] += nonrecall_pred
task2precision[ll['Task Type'].strip()] += precision
for k,v in task2score.items():
if task2number[k] == 0:
task2score[k] = 0
task2recall[k] = 0
task2recall_score[k] = 0
task2nonrecall_pred[k] = 0
task2precision[k] = 0
else:
task2score[k] = v / task2number[k] * 100
task2recall[k] = task2recall[k] / task2number[k] * 100
task2recall_score[k] = task2recall_score[k] / task2number[k] * 100
# task2nonrecall_pred[k] = task2nonrecall_pred[k] / task2number[k]
task2precision[k] = task2precision[k] / task2number[k] * 100
print(json.dumps(task2number, indent=4))
print(json.dumps({k: round(v, 2) for k,v in task2score.items()}, indent=4))
print(json.dumps(task2recall, indent=4))
print(json.dumps(task2recall_score, indent=4))
print("Total question number: ", sum(task2number.values()))
print(f"Average Score: {sum(task2score.values())/len(task2number.values())}")
print(f"Average Recall: {sum(task2recall.values())/len(task2number.values())}")
print(f"Average Recall Score: {sum(task2recall_score.values())/len(task2number.values())}")
print(f"Average Precision: {sum(task2precision.values())/len(task2number.values())}")
print(f"Total Question Number: {sum(task2number.values())}")
print(f"Average FPS: {total_fps/total_response_number}")
print(f"Average KV Cache: {total_kv_cache_size/total_response_number}")
print(f"Average Non-Recall Pred: {sum(task2nonrecall_pred.values())/sum(task2number.values())}")
print(f"Average Response Number: {total_response_number/sum(task2number.values())}")
print(f"Average Answer Number: {total_answer_number/sum(task2number.values())}")
def generate_latex_table_row():
# Generate LaTeX format table row
print("\n# LaTeX format table row")
print(f"& {eval_model} ", end="")
# First 8 tasks
for task in list(task2number.keys())[:8]:
print(f"& {task2score[task]:.1f} ", end="")
# Overall average
print(f"& {sum(task2score.values())/len(task2number.values()):.1f}")
generate_latex_table_row()
def generate_latex_table_row():
# Generate LaTeX format table row
print("\n# LaTeX format table row")
print(f"& {eval_model} ", end="")
# First 8 tasks
for task in list(task2number.keys())[:8]:
print(f"& {task2recall[task]:.1f} ", end="")
# Overall average
print(f"& {sum(task2recall.values())/len(task2number.values()):.1f}")
generate_latex_table_row()