ddwang2000 commited on
Commit
a5d8cca
·
verified ·
1 Parent(s): 717a171

Upload code/mmsu_evaluation.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. code/mmsu_evaluation.py +136 -0
code/mmsu_evaluation.py ADDED
@@ -0,0 +1,136 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import argparse
3
+ from collections import defaultdict
4
+
5
+ def load_jsonl_data(jsonl_path):
6
+ """Load data from the provided JSONL file."""
7
+ records = []
8
+ with open(jsonl_path, 'r', encoding='utf-8') as f:
9
+ for line_number, line in enumerate(f):
10
+ try:
11
+ record = json.loads(line.strip())
12
+ records.append(record)
13
+ except json.JSONDecodeError as e:
14
+ print(f"Error decoding JSON on line {line_number}: {line.strip()}")
15
+ print(e)
16
+ return records
17
+
18
+ def calculate_accuracy_per_task_and_category(data):
19
+ """Calculate accuracy for each unique task and category."""
20
+ task_category_accuracy = defaultdict(lambda: defaultdict(lambda: {"correct": 0, "total": 0}))
21
+ task_average_accuracy = defaultdict(lambda: {"total_correct": 0, "total_count": 0})
22
+
23
+ # Initialize counters
24
+ total_correct = 0
25
+ total_count = 0
26
+ fail_num = 0
27
+
28
+ for record in data:
29
+ task = record.get('category', '')
30
+ category = record.get('sub-category', '')
31
+
32
+ # Extract response
33
+ response = record.get('response', '')
34
+ try:
35
+ predict = response.strip().replace('\n', '')
36
+ except:
37
+ print('Error prediction!')
38
+ continue
39
+
40
+ if predict != 'None' and predict:
41
+ if predict[0] == 'A' or predict[0] == 'B' or predict[0] == 'C' or predict[0] == 'D':
42
+ model_predict = predict[0]
43
+ #This situation may occur when the answer given by gpt is "The answer is A."
44
+ elif len(predict) > 1:
45
+ if predict[-2] == 'A' or predict[-2] == 'B' or predict[-2] == 'C' or predict[-2] == 'D':
46
+ model_predict = predict[-2]
47
+ else:
48
+ print(f'Wrong format response: {predict}')
49
+ continue
50
+ else:
51
+ print(f'Wrong format response: {predict}')
52
+ continue
53
+
54
+ # Get the correct answer
55
+ answer_gt = record.get('answer_gt', '')
56
+ choices = {
57
+ 'A': record.get('choice_a', ''),
58
+ 'B': record.get('choice_b', ''),
59
+ 'C': record.get('choice_c', ''),
60
+ 'D': record.get('choice_d', '')
61
+ }
62
+
63
+ # Check if the prediction matches the correct answer
64
+ if model_predict:
65
+ if model_predict == 'A' and choices['A'] == answer_gt:
66
+ task_category_accuracy[task][category]["correct"] += 1
67
+ total_correct += 1
68
+ elif model_predict == 'B' and choices['B'] == answer_gt:
69
+ task_category_accuracy[task][category]["correct"] += 1
70
+ total_correct += 1
71
+ elif model_predict == 'C' and choices['C'] == answer_gt:
72
+ task_category_accuracy[task][category]["correct"] += 1
73
+ total_correct += 1
74
+ elif model_predict == 'D' and choices['D'] == answer_gt:
75
+ task_category_accuracy[task][category]["correct"] += 1
76
+ total_correct += 1
77
+
78
+ # Increase the total count for the task and category
79
+ task_category_accuracy[task][category]["total"] += 1
80
+ total_count += 1
81
+
82
+ # Calculate accuracy per task and category
83
+ for task, categories in task_category_accuracy.items():
84
+ total_correct_for_task = 0
85
+ total_count_for_task = 0
86
+ for category, counts in categories.items():
87
+ total = counts["total"]
88
+ correct = counts["correct"]
89
+ accuracy = correct / total if total > 0 else 0
90
+ task_category_accuracy[task][category] = accuracy
91
+
92
+ # Calculate overall task accuracy
93
+ total_correct_for_task += correct
94
+ total_count_for_task += total
95
+
96
+ # Calculate average accuracy for each task
97
+ task_average_accuracy[task]["total_correct"] = total_correct_for_task
98
+ task_average_accuracy[task]["total_count"] = total_count_for_task
99
+ task_average_accuracy[task]["average_accuracy"] = total_correct_for_task / total_count_for_task if total_count_for_task > 0 else 0
100
+
101
+ # Calculate overall accuracy
102
+ overall_accuracy = total_correct / total_count if total_count > 0 else 0
103
+
104
+ return task_category_accuracy, task_average_accuracy, overall_accuracy, total_count
105
+
106
+ def main():
107
+ """Main function to load data, calculate accuracy, and print results."""
108
+ # Parse command-line arguments
109
+ parser = argparse.ArgumentParser(description="Process a JSONL file and calculate accuracy.")
110
+ parser.add_argument('jsonl_path', type=str, help="Path to the input JSONL file")
111
+ args = parser.parse_args()
112
+
113
+ # Load data
114
+ data = load_jsonl_data(args.jsonl_path)
115
+
116
+ # Calculate accuracy
117
+ task_category_accuracies, task_average_accuracies, overall_accuracy, total_count = calculate_accuracy_per_task_and_category(data)
118
+
119
+ # Print accuracy for each category and sub-category
120
+ for task, categories in task_category_accuracies.items():
121
+ for category, accuracy in categories.items():
122
+ print(f'Category: {task}, Sub-category: {category}, Accuracy: {accuracy:.4f}')
123
+
124
+ # Print average accuracy for each category
125
+ for task, accuracy_info in task_average_accuracies.items():
126
+ average_accuracy = accuracy_info["average_accuracy"]
127
+ print(f'Category: {task}, Average Accuracy: {average_accuracy:.4f}')
128
+
129
+ # Print overall accuracy
130
+ print(f'Overall Accuracy: {overall_accuracy:.4f}')
131
+ print(f'Total count: {total_count}')
132
+
133
+ if __name__ == "__main__":
134
+ # bash:
135
+ # python mmsu_evaluation.py /path/to/your/input.jsonl
136
+ main()