| import argparse |
| import json |
| import os |
|
|
| |
| def init_arguments(): |
| """ |
| Initialize and parse command-line arguments. |
| |
| Returns: |
| argparse.Namespace: Parsed command-line arguments. |
| """ |
| parser = argparse.ArgumentParser(description='Input JSON file path') |
| parser.add_argument('--input_path', '-i', type=str, required=True, |
| help='Path to the eval result JSON file.') |
| return parser.parse_args() |
|
|
|
|
| def format_data(gt_type, data): |
|
|
| try: |
| r = gt_type(data) if data is not None else None |
| except Exception as e: |
| print(f"Data formatting error: {e} data: {data}") |
| r = None |
| return r |
|
|
| |
| def process_task_results(results, gt_type): |
| """ |
| Process the results of a single task. |
| |
| Args: |
| results (list): List of task results. |
| gt_type (type): Type of the ground truth value. |
| |
| Returns: |
| dict: A dictionary containing scores and wrong cases. |
| """ |
| scores = { |
| 'correct': 0, |
| 'total': 0, |
| 'wrong_cases': [] |
| } |
| for item in results: |
| scores['total'] += 1 |
| parsed_answer = format_data(gt_type, item['parsed_answer']) |
| ground_truth = format_data(gt_type, item['ground_truth']) |
| if parsed_answer == ground_truth and parsed_answer is not None: |
| scores['correct'] += 1 |
| else: |
| scores['wrong_cases'].append({ |
| 'id': item['id'], |
| 'question': item['question'], |
| 'image_path': item['image_path'], |
| 'parsed_answer': parsed_answer, |
| 'ground_truth': ground_truth |
| }) |
| return scores |
|
|
|
|
| |
| def print_task_results(task_name, scores, accuracy): |
| """ |
| Print the evaluation results of a single task. |
| |
| Args: |
| task_name (str): Name of the task. |
| scores (dict): Dictionary containing scores and wrong cases. |
| accuracy (float): Calculated accuracy for the task. |
| """ |
| print(f"\n{task_name} Task:") |
| print(f"Total Samples: {scores['total']}") |
| print(f"Correct Count: {scores['correct']}") |
| print(f"Accuracy: {accuracy:.2%}") |
| print(f"Wrong Cases: {len(scores['wrong_cases'])}") |
|
|
|
|
| |
| def get_output_path(input_path): |
| file_name = os.path.basename(input_path) |
| eval_file_name = file_name.replace('_results.json', '_evaluation.json') |
| return os.path.join(os.path.dirname(input_path), eval_file_name) |
|
|
|
|
| |
| def calculate_scores(input_path, output_path): |
| """ |
| Calculate scores for different tasks and save the detailed results. |
| |
| Args: |
| input_path (str): Path to the input JSON file. |
| output_path (str): Path to the output JSON file. |
| """ |
| with open(input_path, 'r', encoding='utf-8') as f: |
| results = json.load(f) |
|
|
| tasks = { |
| 'counting': (results.get('counting_results', []), int), |
| 'relations': (results.get('relations_results', []), str), |
| 'combination': (results.get('combination_results', []), int) |
| } |
|
|
| detailed_results = {} |
|
|
| print("\n=== Evaluation Results ===") |
| for task_name, (task_results, gt_type) in tasks.items(): |
| scores = process_task_results(task_results, gt_type) |
| accuracy = scores['correct'] / scores['total'] if scores['total'] > 0 else 0 |
| print_task_results(task_name.capitalize(), scores, accuracy) |
| detailed_results[f'{task_name}_task'] = { |
| 'total_samples': scores['total'], |
| 'correct_samples': scores['correct'], |
| 'accuracy': accuracy, |
| 'wrong_cases': scores['wrong_cases'] |
| } |
|
|
| with open(output_path, 'w', encoding='utf-8') as f: |
| json.dump(detailed_results, f, indent=4, ensure_ascii=False) |
| print(f"\nDetailed results have been saved to {output_path}") |
|
|
|
|
| |
| def main(): |
| """ |
| Main function to run the evaluation process. |
| |
| Example: |
| python calculate_scores.py -i ./result/InternVL3-2B_results.json |
| """ |
| args = init_arguments() |
| output_path = get_output_path(args.input_path) |
| calculate_scores(args.input_path, output_path) |
|
|
|
|
| if __name__ == "__main__": |
| main() |
|
|