ddwang2000 commited on
Commit
2048137
·
verified ·
1 Parent(s): 45a46ca

Delete folder code with huggingface_hub

Browse files
Files changed (2) hide show
  1. code/mmsu_evaluation.py +0 -136
  2. code/mmsu_inference.py +0 -80
code/mmsu_evaluation.py DELETED
@@ -1,136 +0,0 @@
1
- import json
2
- import argparse
3
- from collections import defaultdict
4
-
5
- def load_jsonl_data(jsonl_path):
6
- """Load data from the provided JSONL file."""
7
- records = []
8
- with open(jsonl_path, 'r', encoding='utf-8') as f:
9
- for line_number, line in enumerate(f):
10
- try:
11
- record = json.loads(line.strip())
12
- records.append(record)
13
- except json.JSONDecodeError as e:
14
- print(f"Error decoding JSON on line {line_number}: {line.strip()}")
15
- print(e)
16
- return records
17
-
18
- def calculate_accuracy_per_task_and_category(data):
19
- """Calculate accuracy for each unique task and category."""
20
- task_category_accuracy = defaultdict(lambda: defaultdict(lambda: {"correct": 0, "total": 0}))
21
- task_average_accuracy = defaultdict(lambda: {"total_correct": 0, "total_count": 0})
22
-
23
- # Initialize counters
24
- total_correct = 0
25
- total_count = 0
26
- fail_num = 0
27
-
28
- for record in data:
29
- task = record.get('category', '')
30
- category = record.get('sub-category', '')
31
-
32
- # Extract response
33
- response = record.get('response', '')
34
- try:
35
- predict = response.strip().replace('\n', '')
36
- except:
37
- print('Error prediction!')
38
- continue
39
-
40
- if predict != 'None' and predict:
41
- if predict[0] == 'A' or predict[0] == 'B' or predict[0] == 'C' or predict[0] == 'D':
42
- model_predict = predict[0]
43
- #This situation may occur when the answer given by gpt is "The answer is A."
44
- elif len(predict) > 1:
45
- if predict[-2] == 'A' or predict[-2] == 'B' or predict[-2] == 'C' or predict[-2] == 'D':
46
- model_predict = predict[-2]
47
- else:
48
- print(f'Wrong format response: {predict}')
49
- continue
50
- else:
51
- print(f'Wrong format response: {predict}')
52
- continue
53
-
54
- # Get the correct answer
55
- answer_gt = record.get('answer_gt', '')
56
- choices = {
57
- 'A': record.get('choice_a', ''),
58
- 'B': record.get('choice_b', ''),
59
- 'C': record.get('choice_c', ''),
60
- 'D': record.get('choice_d', '')
61
- }
62
-
63
- # Check if the prediction matches the correct answer
64
- if model_predict:
65
- if model_predict == 'A' and choices['A'] == answer_gt:
66
- task_category_accuracy[task][category]["correct"] += 1
67
- total_correct += 1
68
- elif model_predict == 'B' and choices['B'] == answer_gt:
69
- task_category_accuracy[task][category]["correct"] += 1
70
- total_correct += 1
71
- elif model_predict == 'C' and choices['C'] == answer_gt:
72
- task_category_accuracy[task][category]["correct"] += 1
73
- total_correct += 1
74
- elif model_predict == 'D' and choices['D'] == answer_gt:
75
- task_category_accuracy[task][category]["correct"] += 1
76
- total_correct += 1
77
-
78
- # Increase the total count for the task and category
79
- task_category_accuracy[task][category]["total"] += 1
80
- total_count += 1
81
-
82
- # Calculate accuracy per task and category
83
- for task, categories in task_category_accuracy.items():
84
- total_correct_for_task = 0
85
- total_count_for_task = 0
86
- for category, counts in categories.items():
87
- total = counts["total"]
88
- correct = counts["correct"]
89
- accuracy = correct / total if total > 0 else 0
90
- task_category_accuracy[task][category] = accuracy
91
-
92
- # Calculate overall task accuracy
93
- total_correct_for_task += correct
94
- total_count_for_task += total
95
-
96
- # Calculate average accuracy for each task
97
- task_average_accuracy[task]["total_correct"] = total_correct_for_task
98
- task_average_accuracy[task]["total_count"] = total_count_for_task
99
- task_average_accuracy[task]["average_accuracy"] = total_correct_for_task / total_count_for_task if total_count_for_task > 0 else 0
100
-
101
- # Calculate overall accuracy
102
- overall_accuracy = total_correct / total_count if total_count > 0 else 0
103
-
104
- return task_category_accuracy, task_average_accuracy, overall_accuracy, total_count
105
-
106
- def main():
107
- """Main function to load data, calculate accuracy, and print results."""
108
- # Parse command-line arguments
109
- parser = argparse.ArgumentParser(description="Process a JSONL file and calculate accuracy.")
110
- parser.add_argument('jsonl_path', type=str, help="Path to the input JSONL file")
111
- args = parser.parse_args()
112
-
113
- # Load data
114
- data = load_jsonl_data(args.jsonl_path)
115
-
116
- # Calculate accuracy
117
- task_category_accuracies, task_average_accuracies, overall_accuracy, total_count = calculate_accuracy_per_task_and_category(data)
118
-
119
- # Print accuracy for each category and sub-category
120
- for task, categories in task_category_accuracies.items():
121
- for category, accuracy in categories.items():
122
- print(f'Category: {task}, Sub-category: {category}, Accuracy: {accuracy:.4f}')
123
-
124
- # Print average accuracy for each category
125
- for task, accuracy_info in task_average_accuracies.items():
126
- average_accuracy = accuracy_info["average_accuracy"]
127
- print(f'Category: {task}, Average Accuracy: {average_accuracy:.4f}')
128
-
129
- # Print overall accuracy
130
- print(f'Overall Accuracy: {overall_accuracy:.4f}')
131
- print(f'Total count: {total_count}')
132
-
133
- if __name__ == "__main__":
134
- # bash:
135
- # python mmsu_evaluation.py /path/to/your/input.jsonl
136
- main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
code/mmsu_inference.py DELETED
@@ -1,80 +0,0 @@
1
- import os
2
- import argparse
3
- import json
4
- from tqdm import tqdm
5
- import shutil
6
-
7
-
8
- def main():
9
- # Parse command-line arguments
10
- parser = argparse.ArgumentParser(description="Easy Inference for your model.")
11
- parser.add_argument('--input_jsonl', type=str, required=True, help="Path to the input JSONL file")
12
- parser.add_argument('--output_jsonl', type=str, required=True, help="Path to the output JSONL file")
13
- args = parser.parse_args()
14
-
15
- input_file = args.input_jsonl
16
- output_file = args.output_jsonl
17
-
18
-
19
- #Step1: Bulid your model; Implement according to your own model
20
- #model = model.cuda()
21
- #model.eval()
22
-
23
- #Step2: Single step inference
24
- with open(input_file, "r") as fin, open(output_file, "w") as fout:
25
- fin = json.load(fin)
26
- for item in tqdm(fin):
27
- audio_path = item['audio_path']
28
- task_name = item['task_name']
29
- if os.path.exists(audio_path) == False:
30
- print(f"lack wav {wav_fn}")
31
- continue
32
-
33
-
34
- #Construct prompt
35
- question = item['question']
36
- question_prompts = 'Choose the most suitable answer from options A, B, C, and D to respond the question in next line, **you should only choose A or B or C or D.** Do not provide any additional explanations or content.'
37
- choice_a = item['choice_a']
38
- choice_b = item['choice_b']
39
- choice_c = item.get('choice_c', None)
40
- choice_d = item.get('choice_d', None)
41
- choices = f'A. {choice_a}\nB. {choice_b}\nC. {choice_c}\nD. {choice_d}'
42
- instruction = f"{question_prompts}\n\nQuestion: {question}\n\n{choices}"
43
-
44
-
45
- #Step 3: Run model inference
46
- #output = model.infer(
47
- # Prompts=instruction,
48
- # Audio_path=audio_path,
49
- # ...
50
- #)
51
-
52
- output = "Model response here" # Placeholder response
53
-
54
- #Step 4: save result
55
- json_string = json.dumps(
56
- {
57
- "id": item["id"]
58
- "audio_path": item["audio_path"],
59
- "question": question,
60
- "choice_a": choice_a,
61
- "choice_b": choice_b,
62
- "choice_c": choice_c,
63
- "choice_d": choice_d,
64
- "answer_gt": item["answer_gt"],
65
- "response": output,
66
- "task_name": task_name,
67
- "category": item["category"],
68
- "sub-category": item["category"],
69
- "sub-sub-category": item["sub-sub-category"],
70
- "linguistics_sub_discipline": item["linguistics_sub_discipline"],
71
- },
72
- ensure_ascii=False
73
- )
74
- fout.write(json_string + "\n")
75
-
76
-
77
- if __name__ == "__main__":
78
- # bash:
79
- # python mmsu_inference.py --input_jsonl /path/to/input.jsonl --output_jsonl /path/to/output.jsonl
80
- main()