| import json,os |
| import numpy as np |
|
|
| def load_multiple_json(file_path): |
| """读取包含多个 JSON 对象的文件,并将每个 JSON 对象解析成 Python 对象,存放在列表中。""" |
| with open(file_path, 'r', encoding='utf-8') as f: |
| content = f.read() |
| |
| decoder = json.JSONDecoder() |
| pos = 0 |
| results = [] |
| content_length = len(content) |
| |
| while pos < content_length: |
| |
| while pos < content_length and content[pos].isspace(): |
| pos += 1 |
| if pos >= content_length: |
| break |
| try: |
| obj, new_pos = decoder.raw_decode(content, pos) |
| results.append(obj) |
| pos = new_pos |
| except json.JSONDecodeError as e: |
| |
| print(f"JSON 解析错误: {e}") |
| break |
| return results |
|
|
|
|
| import argparse |
|
|
| def parse_args(): |
| parser = argparse.ArgumentParser() |
| parser.add_argument('--eval_file', type=str, default='') |
| parser.add_argument('--eval_mode', type=str, default='all') |
| return parser.parse_args() |
|
|
| args = parse_args() |
|
|
| |
| eval_file = '/root/videollm-online/data/estp_dataset/result/estp_bench_sq_MiniCPMV_passiveevaluator_deepseek_1_2.json' |
| |
| |
| |
| eval_file = '/root/videollm-online/data/estp_dataset/estpSqa_baseline/MiniCPMV_fbf_singleQA_0.175_v2evaluator_deepseek_1_2.json' |
| eval_model = 'MiniCPMV' |
|
|
| |
| |
| |
| |
|
|
| |
| |
|
|
| |
| |
| |
| |
| |
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| eval_file = '/root/videollm-online/data/estp_dataset/estpSqa_ours_5cases/LivebaseStage3_high0.31_11evaluator_deepseek_1_2.json' |
| |
|
|
| |
| |
| eval_file = '/root/videollm-online/data/estp_dataset/estpSqa_ours/LivebaseStage3_high0.31_1_lowevaluator_deepseek_1_2.json' |
| eval_model = 'EWO' |
|
|
| |
| |
|
|
| |
|
|
| |
| |
| |
|
|
| |
| |
| |
| |
|
|
| |
| |
| |
|
|
|
|
| |
| |
|
|
| |
| |
|
|
| |
| |
|
|
|
|
| parent_dir = os.path.dirname(eval_file) |
| eval_files = [os.path.join(parent_dir, f) for f in os.listdir(parent_dir) if f.startswith(eval_file.split('/')[-1])] |
| eval_result = [] |
| for eval_file in eval_files: |
| eval_result += load_multiple_json(eval_file) |
|
|
|
|
| task2number = { |
| "Object Recognition": 0, |
| "Attribute Perception": 0, |
| "Text-Rich Understanding": 0, |
| "Object Localization": 0, |
| "Object State Change Recognition": 0, |
| "Ego Object Localization": 0, |
| "Ego Object State Change Recognition": 0, |
| "Action Recognition": 0, |
| "Object Function": 0, |
| "Information Function": 0, |
| "Action Reasoning": 0, |
| "Task Understanding": 0, |
| |
| } |
|
|
| task2score = { |
| "Object Recognition": 0, |
| "Attribute Perception": 0, |
| "Text-Rich Understanding": 0, |
| "Object Localization": 0, |
| "Object State Change Recognition": 0, |
| "Ego Object Localization": 0, |
| "Ego Object State Change Recognition": 0, |
| "Action Recognition": 0, |
| "Object Function": 0, |
| "Information Function": 0, |
| "Action Reasoning": 0, |
| "Task Understanding": 0, |
| |
| |
| } |
|
|
| task2recall = { |
| "Object Recognition": 0, |
| "Attribute Perception": 0, |
| "Text-Rich Understanding": 0, |
| "Object Localization": 0, |
| "Object State Change Recognition": 0, |
| "Ego Object Localization": 0, |
| "Ego Object State Change Recognition": 0, |
| "Action Recognition": 0, |
| "Object Function": 0, |
| "Information Function": 0, |
| "Action Reasoning": 0, |
| "Task Understanding": 0, |
| |
| |
| } |
|
|
| task2recall_score = { |
| "Object Recognition": 0, |
| "Attribute Perception": 0, |
| "Text-Rich Understanding": 0, |
| "Object Localization": 0, |
| "Object State Change Recognition": 0, |
| "Ego Object Localization": 0, |
| "Ego Object State Change Recognition": 0, |
| "Action Recognition": 0, |
| "Object Function": 0, |
| "Information Function": 0, |
| "Action Reasoning": 0, |
| "Task Understanding": 0, |
| |
| |
| } |
|
|
| task2nonrecall_pred = { |
| "Object Recognition": 0, |
| "Attribute Perception": 0, |
| "Text-Rich Understanding": 0, |
| "Object Localization": 0, |
| "Object State Change Recognition": 0, |
| "Ego Object Localization": 0, |
| "Ego Object State Change Recognition": 0, |
| "Action Recognition": 0, |
| "Object Function": 0, |
| "Information Function": 0, |
| "Action Reasoning": 0, |
| "Task Understanding": 0, |
| |
| } |
|
|
| task2precision = { |
| "Object Recognition": 0, |
| "Attribute Perception": 0, |
| "Text-Rich Understanding": 0, |
| "Object Localization": 0, |
| "Object State Change Recognition": 0, |
| "Ego Object Localization": 0, |
| "Ego Object State Change Recognition": 0, |
| "Action Recognition": 0, |
| "Object Function": 0, |
| "Information Function": 0, |
| "Action Reasoning": 0, |
| "Task Understanding": 0, |
| } |
|
|
| def scoreMean(score): |
| score = score.max(axis=0) |
| return score.mean() |
|
|
| |
| def validScoreMean(score): |
| |
| nonrecall_pred = (score.max(axis=0) == 0).sum() |
| |
| |
| valid_score = np.zeros(score.shape[0]) |
| valid_recall = np.zeros(score.shape[0]) |
| valid_recall_score = np.zeros(score.shape[0]) |
| for i,s in enumerate(score): |
| valid_s = s[s > 0] |
| if len(valid_s) > 0: |
| valid_score[i] = valid_s.sum() / (len(valid_s) + (nonrecall_pred / score.shape[0])) |
| valid_score[i] = valid_s.sum() / (valid_s.sum() + (nonrecall_pred / score.shape[0])) |
| |
| |
| |
| valid_recall[i] = 1 |
| valid_recall_score[i] = valid_s.sum() / len(valid_s) |
| return valid_score.mean(), valid_recall.mean(), valid_recall_score.mean(), nonrecall_pred |
|
|
| BETA = 1 |
| def validScoreF1(score): |
| |
| if score.shape[1] == 0: |
| return 0, 0, 0, 0, 0 |
| |
| FP = (score.max(axis=0) == 0).sum() |
| |
| TP = 0 |
| |
| |
| valid_score = np.zeros(score.shape[0]) |
| valid_recall = np.zeros(score.shape[0]) |
| valid_recall_score = np.zeros(score.shape[0]) |
| for i,s in enumerate(score): |
| valid_s = s[s > 0] |
| if len(valid_s) > 0: |
| |
| valid_score[i] = valid_s.sum() / len(valid_s) |
| |
| |
| |
| valid_recall[i] = 1 |
| valid_recall_score[i] = valid_s.sum() / len(valid_s) |
| |
| |
| |
| TP = valid_score.sum() |
| precision = TP / (TP + FP) |
| recall = valid_recall_score.mean() |
| |
| if precision == 0 or recall == 0: |
| F1 = 0 |
| else: |
| F1 = (1 + BETA**2) * precision * recall / ((BETA**2 * precision) + recall) |
| |
| F1 = 2*TP / (2*TP + FP + score.shape[0] - valid_recall.sum()) |
| return F1, valid_recall.mean(), valid_recall_score.mean(), FP, precision |
|
|
| def topkValidScoreMean(score, k=10): |
| score = score.max(axis=1) |
| score = score[:k] |
| valid_score = score[score > 0] |
| return valid_score.mean() |
|
|
| total_fps = 0 |
| total_kv_cache_size = 0 |
| total_response_number = 0 |
| total_answer_number = 0 |
| total_precision = 0 |
| for ll in eval_result: |
| if eval_model in ll.keys(): |
| total_answer_number += len(ll['conversation']) |
| this_turn_response_number = 0 |
| for response in ll[eval_model]: |
| if response['role'].lower() == 'assistant': |
| if 'fps' in response: |
| total_fps+=response['fps'] |
| total_response_number += 1 |
| this_turn_response_number += 1 |
| if 'kv_cache_size' in response: |
| total_kv_cache_size += response['kv_cache_size'] |
| task2number[ll['Task Type'].strip()] += 1 |
| |
| text_score = np.array(ll['evaluator_output_text']) / 10 |
| reponse_score = np.array(ll['evaluator_output_reponse']) / 10 |
| |
| if args.eval_mode == 'all' and eval_model not in ['EgoVLP', 'CLIP', 'Lavila']: |
| score = (text_score+reponse_score) |
| elif args.eval_mode == 'text': |
| score = text_score |
| elif args.eval_mode == 'response' or eval_model in ['EgoVLP', 'CLIP', 'Lavila']: |
| score = reponse_score |
| score_mean, recall_mean, recall_score_mean, nonrecall_pred, precision = validScoreF1(score) |
| task2score[ll['Task Type'].strip()] += score_mean |
| task2recall[ll['Task Type'].strip()] += recall_mean |
| task2recall_score[ll['Task Type'].strip()] += recall_score_mean |
| task2nonrecall_pred[ll['Task Type'].strip()] += nonrecall_pred |
| task2precision[ll['Task Type'].strip()] += precision |
| total_precision += nonrecall_pred / this_turn_response_number if this_turn_response_number > 0 else 0 |
|
|
|
|
|
|
| for k,v in task2score.items(): |
| if task2number[k] == 0: |
| task2score[k] = 0 |
| task2recall[k] = 0 |
| task2recall_score[k] = 0 |
| task2nonrecall_pred[k] = 0 |
| task2precision[k] = 0 |
| else: |
| task2score[k] = v / task2number[k] * 100 |
| task2recall[k] = task2recall[k] / task2number[k] * 100 |
| task2recall_score[k] = task2recall_score[k] / task2number[k] * 100 |
| task2nonrecall_pred[k] = task2nonrecall_pred[k] / task2number[k] |
| task2precision[k] = task2precision[k] / task2number[k] * 100 |
| |
| print(json.dumps(task2number, indent=4)) |
| print(json.dumps({k: round(v, 2) for k,v in task2score.items()}, indent=4)) |
| print(json.dumps(task2recall, indent=4)) |
| |
| |
|
|
| print("Total question number: ", sum(task2number.values())) |
|
|
| print(f"Average Score: {sum(task2score.values())/len(task2number.values())}") |
|
|
| |
| first_8_tasks = list(task2number.keys())[:8] |
| first_8_scores = [task2score[task] for task in first_8_tasks] |
| first_8_valid_scores = [score for score in first_8_scores if score > 0] |
| first_8_recall_scores = [task2recall[task] for task in first_8_tasks] |
| try: |
| print(f"Average Score (First 8 Tasks): {sum(first_8_valid_scores)/len(first_8_valid_scores):.2f}") |
| except: |
| print(f"Average Score (First 8 Tasks): 0.0") |
|
|
| |
| last_4_tasks = list(task2number.keys())[-4:] |
| last_4_scores = [task2score[task] for task in last_4_tasks] |
| last_4_valid_scores = [score for score in last_4_scores if score > 0] |
| last_4_recall_scores = [task2recall[task] for task in last_4_tasks] |
| try: |
| print(f"Average Score (Last 4 Tasks): {sum(last_4_valid_scores)/len(last_4_valid_scores):.2f}") |
| except: |
| print(f"Average Score (Last 4 Tasks): 0.0") |
|
|
|
|
| print(f"Average Recall: {sum(task2recall.values())/len(task2number.values())}") |
| print(f"Average Recall Score: {sum(task2recall_score.values())/len(task2number.values())}") |
| print(f"Average Precision: {sum(task2precision.values())/len(task2number.values())}") |
|
|
|
|
| print(f"Total Question Number: {sum(task2number.values())}") |
| print(f"Average FPS: {total_fps/total_response_number}") |
| print(f"Average KV Cache: {total_kv_cache_size/total_response_number}") |
| print(f"Average Non-Recall Pred: {sum(task2nonrecall_pred.values())/sum(task2number.values())}") |
| print(f"Average Response Number: {total_response_number/sum(task2number.values())}") |
| print(f"Average Precision: {total_precision/sum(task2number.values()) * 100}") |
|
|
|
|
| print(f"[{sum(task2recall.values())/len(task2number.values()):.1f}, {sum(task2precision.values())/len(task2number.values()):.1f}]") |
|
|
| def generate_latex_table_row(): |
| |
| print("\n# LaTeX format table row") |
| print(f"& {eval_model} ", end="") |
|
|
| |
| for task in list(task2number.keys())[:8]: |
| print(f"& {task2score[task]:.1f} ", end="") |
|
|
| |
| print(f"& {sum(first_8_valid_scores)/len(first_8_valid_scores):.1f} ", end="") |
|
|
| |
| for task in list(task2number.keys())[-4:]: |
| print(f"& {task2score[task]:.1f} ", end="") |
|
|
| |
| print(f"& {sum(last_4_valid_scores)/len(last_4_valid_scores):.1f} ", end="") |
|
|
| |
| |
|
|
| generate_latex_table_row() |
|
|
|
|
| |
| |
| |
| |
|
|
| |
| |
| |
|
|
| |
| |
|
|
| |
| |
| |
|
|
| |
| |
|
|
| |
| |
|
|
| |