File size: 2,769 Bytes
1350de4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
import os
import argparse
import json
import re
import sys

print(sys.path)

# from mova.eval.m4c_evaluator import ChartVQAEvaluator
from llava.eval.m4c_evaluator import TextVQAAccuracyEvaluator, STVQAAccuracyEvaluator

def get_args():
    parser = argparse.ArgumentParser()
    parser.add_argument('--annotation-file', type=str)
    parser.add_argument('--result-file', type=str)
    parser.add_argument('--result-dir', type=str)
    parser.add_argument('--mid_result', type=str)
    parser.add_argument('--output_result', type=str)
    return parser.parse_args()


def eval_single(annotation_file, result_file):
    experiment_name = os.path.splitext(os.path.basename(result_file))[0]
    print(experiment_name)
    # annotations = json.load(open(annotation_file))['data']
    annotations = [
        json.loads(q) for q in open(os.path.expanduser(annotation_file), "r")
    ]
    annotations = {(annotation['question_id'], annotation['question'].lower()): annotation for annotation in annotations}
    results = [json.loads(line) for line in open(result_file)]

    pred_list = []
    mid_list = []
    for result in results:
        annotation = annotations[(result['question_id'], result['prompt'].lower())]
        pred_list.append({
            "pred_answer": result['text'].lower(),
            "gt_answers": [annotation['answer'].lower()],
        })
        mid_list.append(result)
        mid_list[-1]["annotation"] = annotation['answer']

    # evaluator = ChartVQAEvaluator()
    # acc, acc_list = evaluator.evaluate_relaxed_accuracy(pred_list)
    # evaluator = TextVQAAccuracyEvaluator()
    evaluator = STVQAAccuracyEvaluator()
    acc = evaluator.eval_pred_list(pred_list)
    acc = 100. * acc
    # for num, d in zip(acc_list, mid_list):
    #     d["acc"] = num
    print('Samples: {}\nAccuracy: {:.2f}%\n'.format(len(pred_list), acc))
    return len(pred_list), acc, mid_list


if __name__ == "__main__":
    args = get_args()

    if args.result_file is not None:
        samples, acc, mid_result = eval_single(args.annotation_file, args.result_file)

    if args.result_dir is not None:
        for result_file in sorted(os.listdir(args.result_dir)):
            if not result_file.endswith('.jsonl'):
                print(f'Skipping {result_file}')
                continue
            samples, acc, mid_result = eval_single(args.annotation_file, os.path.join(args.result_dir, result_file))

    # with open(args.mid_result, 'w') as f:
    #     json.dump(mid_result, f, indent=2)
    # output_folder = os.path.dirname(args.output_result)
    # print(output_folder)
    # os.makedirs(os.path.dirname(output_folder), exist_ok=True)
    # with open(args.output_result, 'w') as f:
    #     json.dump({'samples': samples, 'acc': acc}, f, indent=2)