Pu Miao commited on
Commit
e6bf090
·
1 Parent(s): c8dc238
Files changed (1) hide show
  1. evaluation/calculate_scores.py +137 -0
evaluation/calculate_scores.py ADDED
@@ -0,0 +1,137 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import json
3
+ import os
4
+
5
+ # Function to initialize command-line arguments
6
+ def init_arguments():
7
+ """
8
+ Initialize and parse command-line arguments.
9
+
10
+ Returns:
11
+ argparse.Namespace: Parsed command-line arguments.
12
+ """
13
+ parser = argparse.ArgumentParser(description='Input JSON file path')
14
+ parser.add_argument('--input_path', '-i', type=str, required=True,
15
+ help='Path to the eval result JSON file.')
16
+ return parser.parse_args()
17
+
18
+
19
+ def format_data(gt_type, data):
20
+
21
+ try:
22
+ r = gt_type(data) if data is not None else None
23
+ except Exception as e:
24
+ print(f"Data formatting error: {e} data: {data}")
25
+ r = None
26
+ return r
27
+
28
+ # Function to process task results
29
+ def process_task_results(results, gt_type):
30
+ """
31
+ Process the results of a single task.
32
+
33
+ Args:
34
+ results (list): List of task results.
35
+ gt_type (type): Type of the ground truth value.
36
+
37
+ Returns:
38
+ dict: A dictionary containing scores and wrong cases.
39
+ """
40
+ scores = {
41
+ 'correct': 0,
42
+ 'total': 0,
43
+ 'wrong_cases': []
44
+ }
45
+ for item in results:
46
+ scores['total'] += 1
47
+ parsed_answer = format_data(gt_type, item['parsed_answer'])
48
+ ground_truth = format_data(gt_type, item['ground_truth'])
49
+ if parsed_answer == ground_truth and parsed_answer is not None:
50
+ scores['correct'] += 1
51
+ else:
52
+ scores['wrong_cases'].append({
53
+ 'id': item['id'],
54
+ 'question': item['question'],
55
+ 'image_path': item['image_path'],
56
+ 'parsed_answer': parsed_answer,
57
+ 'ground_truth': ground_truth
58
+ })
59
+ return scores
60
+
61
+
62
+ # Function to print task evaluation results
63
+ def print_task_results(task_name, scores, accuracy):
64
+ """
65
+ Print the evaluation results of a single task.
66
+
67
+ Args:
68
+ task_name (str): Name of the task.
69
+ scores (dict): Dictionary containing scores and wrong cases.
70
+ accuracy (float): Calculated accuracy for the task.
71
+ """
72
+ print(f"\n{task_name} Task:")
73
+ print(f"Total Samples: {scores['total']}")
74
+ print(f"Correct Count: {scores['correct']}")
75
+ print(f"Accuracy: {accuracy:.2%}")
76
+ print(f"Wrong Cases: {len(scores['wrong_cases'])}")
77
+
78
+
79
+ # Function to get output file path
80
+ def get_output_path(input_path):
81
+ file_name = os.path.basename(input_path)
82
+ eval_file_name = file_name.replace('_results.json', '_evaluation.json')
83
+ return os.path.join(os.path.dirname(input_path), eval_file_name)
84
+
85
+
86
+ # Function to calculate all scores and save results
87
+ def calculate_scores(input_path, output_path):
88
+ """
89
+ Calculate scores for different tasks and save the detailed results.
90
+
91
+ Args:
92
+ input_path (str): Path to the input JSON file.
93
+ output_path (str): Path to the output JSON file.
94
+ """
95
+ with open(input_path, 'r', encoding='utf-8') as f:
96
+ results = json.load(f)
97
+
98
+ tasks = {
99
+ 'counting': (results.get('counting_results', []), int),
100
+ 'relations': (results.get('relations_results', []), str),
101
+ 'combination': (results.get('combination_results', []), int)
102
+ }
103
+
104
+ detailed_results = {}
105
+
106
+ print("\n=== Evaluation Results ===")
107
+ for task_name, (task_results, gt_type) in tasks.items():
108
+ scores = process_task_results(task_results, gt_type)
109
+ accuracy = scores['correct'] / scores['total'] if scores['total'] > 0 else 0
110
+ print_task_results(task_name.capitalize(), scores, accuracy)
111
+ detailed_results[f'{task_name}_task'] = {
112
+ 'total_samples': scores['total'],
113
+ 'correct_samples': scores['correct'],
114
+ 'accuracy': accuracy,
115
+ 'wrong_cases': scores['wrong_cases']
116
+ }
117
+
118
+ with open(output_path, 'w', encoding='utf-8') as f:
119
+ json.dump(detailed_results, f, indent=4, ensure_ascii=False)
120
+ print(f"\nDetailed results have been saved to {output_path}")
121
+
122
+
123
+ # Main function
124
+ def main():
125
+ """
126
+ Main function to run the evaluation process.
127
+
128
+ Example:
129
+ python calculate_scores.py -i ./result/InternVL3-2B_results.json
130
+ """
131
+ args = init_arguments()
132
+ output_path = get_output_path(args.input_path)
133
+ calculate_scores(args.input_path, output_path)
134
+
135
+
136
+ if __name__ == "__main__":
137
+ main()