ZzzHelloWorld commited on
Commit
2f26016
·
verified ·
1 Parent(s): bcec3ca

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. VLMEvalKit-sudoku/llava/__pycache__/constants.cpython-310.pyc +0 -0
  2. VLMEvalKit-sudoku/llava/eval/eval_ai2d.py +76 -0
  3. VLMEvalKit-sudoku/llava/eval/eval_rec.py +171 -0
  4. VLMEvalKit-sudoku/llava/eval/generate_webpage_data_from_table.py +111 -0
  5. VLMEvalKit-sudoku/llava/model/language_model/__pycache__/llava_llama.cpython-310.pyc +0 -0
  6. VLMEvalKit-sudoku/llava/model/language_model/__pycache__/llava_qwen3.cpython-310.pyc +0 -0
  7. VLMEvalKit-sudoku/llava/model/language_model/llava_qwen.py +165 -0
  8. VLMEvalKit-sudoku/llava/model/multimodal_encoder/dev_eva_clip/eva_clip/model_configs/EVA01-CLIP-B-16.json +19 -0
  9. VLMEvalKit-sudoku/llava/model/multimodal_encoder/eva_clip/model_configs/EVA02-CLIP-L-14.json +29 -0
  10. VLMEvalKit-sudoku/llava/model/multimodal_encoder/modeling_qwen2_5vl.py +207 -0
  11. VLMEvalKit-sudoku/llava/model/multimodal_resampler/__pycache__/masked_drop.cpython-310.pyc +0 -0
  12. VLMEvalKit-sudoku/llava/serve/gradio_web_server.py +442 -0
  13. VLMEvalKit-sudoku/llava/train/llava_trainer_eval.py +76 -0
  14. VLMEvalKit-sudoku/llava/utils.py +198 -0
  15. VLMEvalKit-sudoku/vlmeval/__pycache__/inference.cpython-310.pyc +0 -0
  16. VLMEvalKit-sudoku/vlmeval/__pycache__/tools.cpython-310.pyc +0 -0
  17. VLMEvalKit-sudoku/vlmeval/api/__pycache__/reka.cpython-310.pyc +0 -0
  18. VLMEvalKit-sudoku/vlmeval/api/bailingmm.py +90 -0
  19. VLMEvalKit-sudoku/vlmeval/api/base.py +296 -0
  20. VLMEvalKit-sudoku/vlmeval/api/claude.py +147 -0
  21. VLMEvalKit-sudoku/vlmeval/api/hunyuan.py +183 -0
  22. VLMEvalKit-sudoku/vlmeval/api/qwen_api.py +74 -0
  23. VLMEvalKit-sudoku/vlmeval/api/reka.py +59 -0
  24. VLMEvalKit-sudoku/vlmeval/dataset/__init__.py +317 -0
  25. VLMEvalKit-sudoku/vlmeval/dataset/__pycache__/chartmimic.cpython-310.pyc +0 -0
  26. VLMEvalKit-sudoku/vlmeval/dataset/__pycache__/m4bench.cpython-310.pyc +0 -0
  27. VLMEvalKit-sudoku/vlmeval/dataset/__pycache__/miabench.cpython-310.pyc +0 -0
  28. VLMEvalKit-sudoku/vlmeval/dataset/__pycache__/ost_bench.cpython-310.pyc +0 -0
  29. VLMEvalKit-sudoku/vlmeval/dataset/__pycache__/qbench_video.cpython-310.pyc +0 -0
  30. VLMEvalKit-sudoku/vlmeval/dataset/__pycache__/video_dataset_config.cpython-310.pyc +0 -0
  31. VLMEvalKit-sudoku/vlmeval/dataset/emma.py +56 -0
  32. VLMEvalKit-sudoku/vlmeval/dataset/image_mt.py +128 -0
  33. VLMEvalKit-sudoku/vlmeval/dataset/image_shortqa.py +163 -0
  34. VLMEvalKit-sudoku/vlmeval/dataset/longvideobench.py +335 -0
  35. VLMEvalKit-sudoku/vlmeval/dataset/mmalignbench.py +298 -0
  36. VLMEvalKit-sudoku/vlmeval/dataset/mmlongbench.py +584 -0
  37. VLMEvalKit-sudoku/vlmeval/dataset/moviechat1k.py +264 -0
  38. VLMEvalKit-sudoku/vlmeval/dataset/mvbench.py +675 -0
  39. VLMEvalKit-sudoku/vlmeval/dataset/sfebench.py +223 -0
  40. VLMEvalKit-sudoku/vlmeval/dataset/slidevqa.py +189 -0
  41. VLMEvalKit-sudoku/vlmeval/dataset/tamperbench.py +537 -0
  42. VLMEvalKit-sudoku/vlmeval/dataset/tempcompass.py +646 -0
  43. VLMEvalKit-sudoku/vlmeval/dataset/text_mcq.py +123 -0
  44. VLMEvalKit-sudoku/vlmeval/dataset/utils/ccocr_evaluator/__init__.py +12 -0
  45. VLMEvalKit-sudoku/vlmeval/dataset/utils/ccocr_evaluator/common.py +222 -0
  46. VLMEvalKit-sudoku/vlmeval/dataset/utils/ccocr_evaluator/kie_evaluator.py +385 -0
  47. VLMEvalKit-sudoku/vlmeval/dataset/utils/judge_util.py +53 -0
  48. VLMEvalKit-sudoku/vlmeval/dataset/utils/llavabench.py +88 -0
  49. VLMEvalKit-sudoku/vlmeval/dataset/utils/mathv.py +179 -0
  50. VLMEvalKit-sudoku/vlmeval/dataset/utils/mlvu.py +189 -0
VLMEvalKit-sudoku/llava/__pycache__/constants.cpython-310.pyc ADDED
Binary file (451 Bytes). View file
 
VLMEvalKit-sudoku/llava/eval/eval_ai2d.py ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import argparse
3
+ import json
4
+ import re
5
+ import sys
6
+ print(sys.path)
7
+
8
+ def get_args():
9
+ parser = argparse.ArgumentParser()
10
+ parser.add_argument('--annotation-file', type=str)
11
+ parser.add_argument('--result-file', type=str)
12
+ parser.add_argument('--result-dir', type=str)
13
+ parser.add_argument('--mid_result', type=str)
14
+ parser.add_argument('--output_result', type=str)
15
+ return parser.parse_args()
16
+
17
+
18
+ def evaluate_exact_match_accuracy(entries):
19
+ scores = []
20
+ for elem in entries:
21
+ if isinstance(elem['annotation'], str):
22
+ elem['annotation'] = [elem['annotation']]
23
+ score = max([
24
+ (1.0 if
25
+ (elem['answer'].strip().lower() == ann.strip().lower()) else 0.0)
26
+ for ann in elem['annotation']
27
+ ])
28
+ scores.append(score)
29
+ return sum(scores) / len(scores)
30
+
31
+
32
+ def eval_single(annotation_file, result_file):
33
+ experiment_name = os.path.splitext(os.path.basename(result_file))[0]
34
+ print(experiment_name)
35
+ # annotations = json.load(open(annotation_file))['data']
36
+ annotations = [
37
+ json.loads(q) for q in open(os.path.expanduser(annotation_file), "r")
38
+ ]
39
+ annotations = {(annotation['question_id'], annotation['question'].lower()): annotation for annotation in annotations}
40
+ results = [json.loads(line) for line in open(result_file)]
41
+
42
+ pred_list = []
43
+ mid_list = []
44
+ for result in results:
45
+ annotation = annotations[(result['question_id'], result['prompt'].lower())]
46
+ pred_list.append({
47
+ "answer": result['text'],
48
+ "annotation": annotation['answer'],
49
+ })
50
+ mid_list.append(result)
51
+ mid_list[-1]["annotation"] = annotation['answer']
52
+
53
+ acc = evaluate_exact_match_accuracy(pred_list)
54
+ acc = 100. * acc
55
+ print('Samples: {}\nAccuracy: {:.2f}%\n'.format(len(pred_list), acc))
56
+ return len(pred_list), acc, mid_list
57
+
58
+
59
+ if __name__ == "__main__":
60
+ args = get_args()
61
+
62
+ if args.result_file is not None:
63
+ samples, acc, mid_result = eval_single(args.annotation_file, args.result_file)
64
+
65
+ if args.result_dir is not None:
66
+ for result_file in sorted(os.listdir(args.result_dir)):
67
+ if not result_file.endswith('.jsonl'):
68
+ print(f'Skipping {result_file}')
69
+ continue
70
+ samples, acc, mid_result = eval_single(args.annotation_file, os.path.join(args.result_dir, result_file))
71
+
72
+ with open(args.mid_result, 'w') as f:
73
+ json.dump(mid_result, f, indent=2)
74
+
75
+ with open(args.output_result, 'w') as f:
76
+ json.dump({'samples': samples, 'acc': acc}, f, indent=2)
VLMEvalKit-sudoku/llava/eval/eval_rec.py ADDED
@@ -0,0 +1,171 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import json
3
+ import argparse
4
+ import torch
5
+ from torchvision.ops import box_iou
6
+ import sys
7
+ import logging
8
+ import warnings
9
+ from typing import Dict, Any, Sequence
10
+ from PIL import Image
11
+ from tqdm import tqdm
12
+
13
+ def expand2square(pil_img, background_color):
14
+ width, height = pil_img.size
15
+ if width == height:
16
+ return pil_img
17
+ elif width > height:
18
+ result = Image.new(pil_img.mode, (width, width), background_color)
19
+ result.paste(pil_img, (0, (width - height) // 2))
20
+ return result
21
+ else:
22
+ result = Image.new(pil_img.mode, (height, height), background_color)
23
+ result.paste(pil_img, ((height - width) // 2, 0))
24
+ return result
25
+
26
+
27
+ def eval_rec(answers, labels):
28
+ preds = []
29
+ targets = []
30
+ # for answer, annotation in tqdm(zip(answers, labels)):
31
+ for answer, annotation in zip(answers, labels):
32
+ text = answer['text']
33
+ label = annotation['label']
34
+
35
+ #"text": "[0.09, 0.29, 0.37, 0.98]\n\nThe woman is wearing black pants."
36
+ # remove suffix :"\n\nThe woman is wearing black pants." of text, and prserve "[0.09, 0.29, 0.37, 0.98]"
37
+ text = text.split('\n\n')[0]
38
+
39
+ # remove []
40
+ text = text.replace('[', '')
41
+ text = text.replace(']', '')
42
+ label = label.replace('[', '')
43
+ label = label.replace(']', '')
44
+ # crop the coord
45
+ coords = text.strip(' ').split(',')
46
+ try:
47
+ xmin, ymin, xmax, ymax = coords
48
+ except:
49
+ continue
50
+ pred = torch.as_tensor([float(xmin), float(ymin),
51
+ float(xmax), float(ymax)])
52
+ preds.append(pred)
53
+
54
+ coords = label.strip(' ').split(',')
55
+ xmin, ymin, xmax, ymax = coords
56
+ target = torch.as_tensor([float(xmin), float(ymin),
57
+ float(xmax), float(ymax)])
58
+
59
+ img = Image.open('./playground/data/eval/rec/images/train2017/' + annotation['image'])
60
+
61
+ width_ori, height_ori = img.size
62
+ xmin, ymin, xmax, ymax = target
63
+ # print(annotation['text'].split(':')[-1], xmin, ymin, xmax, ymax)
64
+ xmin, ymin, xmax, ymax = xmin * width_ori, ymin * height_ori, xmax * width_ori, ymax * height_ori
65
+
66
+ # import matplotlib.pyplot as plt
67
+ # plt.figure(annotation['text'].split(':')[-1])
68
+ # plt.axis('off')
69
+ # plt.imshow(img)
70
+ # plt.gca().add_patch(
71
+ # plt.Rectangle(
72
+ # (xmin, ymin), xmax - xmin, ymax - ymin, color='red', fill=False
73
+ # )
74
+ # )
75
+ # plt.savefig('image1.png')
76
+ if 0:
77
+ if width_ori > height_ori:
78
+ ymin += (width_ori - height_ori) // 2
79
+ ymax += (width_ori - height_ori) // 2
80
+ width = width_ori
81
+ height = height_ori + width_ori - height_ori
82
+ else:
83
+ xmin += (height_ori - width_ori) // 2
84
+ xmax += (height_ori - width_ori) // 2
85
+ width = width_ori + height_ori - width_ori
86
+ height = height_ori
87
+ else:
88
+ width = width_ori
89
+ height = height_ori
90
+
91
+ # import matplotlib.pyplot as plt
92
+ # plt.figure(annotation['text'] + '1'.split(':')[-1])
93
+ # plt.axis('off')
94
+
95
+ # img_pad = expand2square(img, (0,0,0))
96
+ # plt.imshow(img_pad)
97
+ # plt.gca().add_patch(
98
+ # plt.Rectangle(
99
+ # (xmin, ymin), xmax - xmin, ymax - ymin, color='red', fill=False
100
+ # )
101
+ # )
102
+ # plt.savefig('image2.png')
103
+ # import pdb; pdb.set_trace()
104
+
105
+ target = torch.as_tensor([float(xmin / width), float(ymin / height),
106
+ float(xmax / width), float(ymax / height)])
107
+ targets.append(target)
108
+
109
+ pred_boxes = torch.stack(preds, dim=0)
110
+ target_boxes = torch.stack(targets, dim=0)
111
+
112
+ # normalized box value is too small, so that the area is 0.
113
+ ious = box_iou(pred_boxes * 1000, target_boxes * 1000)
114
+ ious = torch.einsum('i i -> i', ious) # take diag elem
115
+ # NOTE: please note iou only calculate for success target
116
+ iou = ious.mean().item()
117
+ correct = (ious > 0.5).sum().item()
118
+ # HACK: currently we expand image to square. so this iou is the real iou.
119
+ warn_message = "this iou is calculate on normalized box. just for non-rigorous training progress checking." \
120
+ "the value is consistent with real iou only if image.width == image.height."
121
+ warnings.warn(warn_message)
122
+
123
+ return {
124
+ 'accuracy': 1.0 * correct / len(targets),
125
+ 'iou': iou,
126
+ 'warning': warn_message,
127
+ }
128
+
129
+ if __name__ == "__main__":
130
+ parser = argparse.ArgumentParser()
131
+ parser.add_argument("--annotation-file", type=str)
132
+ parser.add_argument("--question-file", type=str)
133
+ parser.add_argument("--result-file", type=str)
134
+ args = parser.parse_args()
135
+
136
+ questions = [json.loads(line) for line in open(args.question_file)]
137
+ questions = {question['question_id']: question for question in questions}
138
+ answers = [json.loads(q) for q in open(args.result_file)]
139
+ annotations = [json.loads(a) for a in open(args.annotation_file)]
140
+
141
+ val_splits = ['REC_refcoco_unc_val',
142
+ 'REC_refcoco_unc_testA',
143
+ 'REC_refcoco_unc_testB',
144
+ 'REC_refcoco+_unc_val',
145
+ 'REC_refcoco+_unc_testA',
146
+ 'REC_refcoco+_unc_testB',
147
+ 'REC_refcocog_umd_val',
148
+ 'REC_refcocog_umd_test',]
149
+
150
+ # val_splits = ['REC_refcoco+_unc_val']
151
+
152
+ for category in val_splits:
153
+ cur_answers = [x for x in answers if questions[x['question_id']]['category'] == category]
154
+ cur_labels = [x for x in annotations if questions[x['question_id']]['category'] == category]
155
+ if len(cur_answers) == 0:
156
+ continue
157
+ print('split: {}, # samples answer: {}, # samples target {}'.format(category, len(cur_answers), len(cur_labels)))
158
+ # align the targe and label
159
+ align_answers = []
160
+ align_labels = []
161
+ for cur_answer in cur_answers:
162
+ for cur_label in cur_labels:
163
+ if cur_answer['question_id'] == cur_label['question_id']:
164
+ align_answers.append(cur_answer)
165
+ align_labels.append(cur_label)
166
+ break
167
+ # eval_info = eval_rec(cur_answers, cur_labels)
168
+ eval_info = eval_rec(align_answers, align_labels)
169
+ print("=================={}==================".format(category))
170
+ print(eval_info)
171
+ print("======================================")
VLMEvalKit-sudoku/llava/eval/generate_webpage_data_from_table.py ADDED
@@ -0,0 +1,111 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Generate json file for webpage."""
2
+ import json
3
+ import os
4
+ import re
5
+
6
+ # models = ['llama', 'alpaca', 'gpt35', 'bard']
7
+ models = ['vicuna']
8
+
9
+
10
+ def read_jsonl(path: str, key: str=None):
11
+ data = []
12
+ with open(os.path.expanduser(path)) as f:
13
+ for line in f:
14
+ if not line:
15
+ continue
16
+ data.append(json.loads(line))
17
+ if key is not None:
18
+ data.sort(key=lambda x: x[key])
19
+ data = {item[key]: item for item in data}
20
+ return data
21
+
22
+
23
+ def trim_hanging_lines(s: str, n: int) -> str:
24
+ s = s.strip()
25
+ for _ in range(n):
26
+ s = s.split('\n', 1)[1].strip()
27
+ return s
28
+
29
+
30
+ if __name__ == '__main__':
31
+ questions = read_jsonl('table/question.jsonl', key='question_id')
32
+
33
+ # alpaca_answers = read_jsonl('table/answer/answer_alpaca-13b.jsonl', key='question_id')
34
+ # bard_answers = read_jsonl('table/answer/answer_bard.jsonl', key='question_id')
35
+ # gpt35_answers = read_jsonl('table/answer/answer_gpt35.jsonl', key='question_id')
36
+ # llama_answers = read_jsonl('table/answer/answer_llama-13b.jsonl', key='question_id')
37
+ vicuna_answers = read_jsonl('table/answer/answer_vicuna-13b.jsonl', key='question_id')
38
+ ours_answers = read_jsonl('table/results/llama-13b-hf-alpaca.jsonl', key='question_id')
39
+
40
+ review_vicuna = read_jsonl('table/review/review_vicuna-13b_llama-13b-hf-alpaca.jsonl', key='question_id')
41
+ # review_alpaca = read_jsonl('table/review/review_alpaca-13b_vicuna-13b.jsonl', key='question_id')
42
+ # review_bard = read_jsonl('table/review/review_bard_vicuna-13b.jsonl', key='question_id')
43
+ # review_gpt35 = read_jsonl('table/review/review_gpt35_vicuna-13b.jsonl', key='question_id')
44
+ # review_llama = read_jsonl('table/review/review_llama-13b_vicuna-13b.jsonl', key='question_id')
45
+
46
+ records = []
47
+ for qid in questions.keys():
48
+ r = {
49
+ 'id': qid,
50
+ 'category': questions[qid]['category'],
51
+ 'question': questions[qid]['text'],
52
+ 'answers': {
53
+ # 'alpaca': alpaca_answers[qid]['text'],
54
+ # 'llama': llama_answers[qid]['text'],
55
+ # 'bard': bard_answers[qid]['text'],
56
+ # 'gpt35': gpt35_answers[qid]['text'],
57
+ 'vicuna': vicuna_answers[qid]['text'],
58
+ 'ours': ours_answers[qid]['text'],
59
+ },
60
+ 'evaluations': {
61
+ # 'alpaca': review_alpaca[qid]['text'],
62
+ # 'llama': review_llama[qid]['text'],
63
+ # 'bard': review_bard[qid]['text'],
64
+ 'vicuna': review_vicuna[qid]['content'],
65
+ # 'gpt35': review_gpt35[qid]['text'],
66
+ },
67
+ 'scores': {
68
+ 'vicuna': review_vicuna[qid]['tuple'],
69
+ # 'alpaca': review_alpaca[qid]['score'],
70
+ # 'llama': review_llama[qid]['score'],
71
+ # 'bard': review_bard[qid]['score'],
72
+ # 'gpt35': review_gpt35[qid]['score'],
73
+ },
74
+ }
75
+
76
+ # cleanup data
77
+ cleaned_evals = {}
78
+ for k, v in r['evaluations'].items():
79
+ v = v.strip()
80
+ lines = v.split('\n')
81
+ # trim the first line if it's a pair of numbers
82
+ if re.match(r'\d+[, ]+\d+', lines[0]):
83
+ lines = lines[1:]
84
+ v = '\n'.join(lines)
85
+ cleaned_evals[k] = v.replace('Assistant 1', "**Assistant 1**").replace('Assistant 2', '**Assistant 2**')
86
+
87
+ r['evaluations'] = cleaned_evals
88
+ records.append(r)
89
+
90
+ # Reorder the records, this is optional
91
+ for r in records:
92
+ if r['id'] <= 20:
93
+ r['id'] += 60
94
+ else:
95
+ r['id'] -= 20
96
+ for r in records:
97
+ if r['id'] <= 50:
98
+ r['id'] += 10
99
+ elif 50 < r['id'] <= 60:
100
+ r['id'] -= 50
101
+ for r in records:
102
+ if r['id'] == 7:
103
+ r['id'] = 1
104
+ elif r['id'] < 7:
105
+ r['id'] += 1
106
+
107
+ records.sort(key=lambda x: x['id'])
108
+
109
+ # Write to file
110
+ with open('webpage/data.json', 'w') as f:
111
+ json.dump({'questions': records, 'models': models}, f, indent=2)
VLMEvalKit-sudoku/llava/model/language_model/__pycache__/llava_llama.cpython-310.pyc ADDED
Binary file (4.51 kB). View file
 
VLMEvalKit-sudoku/llava/model/language_model/__pycache__/llava_qwen3.cpython-310.pyc ADDED
Binary file (4.44 kB). View file
 
VLMEvalKit-sudoku/llava/model/language_model/llava_qwen.py ADDED
@@ -0,0 +1,165 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 Hao Zhang
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+
16
+ from typing import List, Optional, Tuple, Union, Dict
17
+ import torch
18
+ import torch.nn as nn
19
+ from torch.nn import CrossEntropyLoss
20
+
21
+ import transformers
22
+ from transformers import AutoConfig, AutoModelForCausalLM, LlamaConfig, LlamaModel, LlamaForCausalLM
23
+
24
+ from transformers.modeling_outputs import CausalLMOutputWithPast
25
+ from transformers.generation.utils import GenerateOutput
26
+
27
+ # from ...constants import IGNORE_INDEX, IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN
28
+ from llava.model.llava_arch import LlavaMetaModel, LlavaMetaForCausalLM
29
+ from transformers import Qwen2Config, Qwen2Model, Qwen2ForCausalLM
30
+
31
+ # from .qwen.modeling_qwen import QWenLMHeadModel, QWenModel
32
+ # from .qwen.configuration_qwen import QWenConfig
33
+
34
+
35
+ class LlavaQwenConfig(Qwen2Config):
36
+ model_type = "llava_qwen"
37
+
38
+
39
+ class LlavaQwenModel(LlavaMetaModel, Qwen2Model):
40
+ config_class = LlavaQwenConfig
41
+
42
+ def __init__(self, config: Qwen2Config):
43
+ super(LlavaQwenModel, self).__init__(config)
44
+
45
+
46
+ class LlavaQwenForCausalLM(Qwen2ForCausalLM, LlavaMetaForCausalLM):
47
+ config_class = LlavaQwenConfig
48
+
49
+ def __init__(self, config):
50
+ # super(Qwen2ForCausalLM, self).__init__(config)
51
+ Qwen2ForCausalLM.__init__(self, config)
52
+ config.model_type = "llava_qwen"
53
+ config.rope_scaling = None
54
+
55
+ self.model = LlavaQwenModel(config)
56
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
57
+ # Initialize weights and apply final processing
58
+ self.post_init()
59
+
60
+ def get_model(self):
61
+ return self.model
62
+
63
+ def forward(
64
+ self,
65
+ input_ids: torch.LongTensor = None,
66
+ attention_mask: Optional[torch.Tensor] = None,
67
+ position_ids: Optional[torch.LongTensor] = None,
68
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
69
+ inputs_embeds: Optional[torch.FloatTensor] = None,
70
+ labels: Optional[torch.LongTensor] = None,
71
+ use_cache: Optional[bool] = None,
72
+ output_attentions: Optional[bool] = None,
73
+ output_hidden_states: Optional[bool] = None,
74
+ images: Optional[torch.FloatTensor] = None,
75
+ image_sizes: Optional[List[List[int]]] = None,
76
+ return_dict: Optional[bool] = None,
77
+ modalities: Optional[List[str]] = ["image"],
78
+ dpo_forward: Optional[bool] = False,
79
+ cache_position=None,
80
+ patch_images: Optional[torch.FloatTensor] = None,
81
+ ind_tokens: Optional[List[int]] = None,
82
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
83
+
84
+ if inputs_embeds is None:
85
+ (input_ids, position_ids, attention_mask, past_key_values, inputs_embeds, labels) = self.prepare_inputs_labels_for_multimodal(input_ids, position_ids, attention_mask, past_key_values, labels, images, modalities, image_sizes,patch_images=patch_images,
86
+ ind_tokens=ind_tokens)
87
+
88
+ if dpo_forward:
89
+ outputs = self.model(
90
+ input_ids=input_ids,
91
+ attention_mask=attention_mask,
92
+ position_ids=position_ids,
93
+ past_key_values=past_key_values,
94
+ inputs_embeds=inputs_embeds,
95
+ use_cache=use_cache,
96
+ output_attentions=output_attentions,
97
+ output_hidden_states=output_hidden_states,
98
+ return_dict=return_dict,
99
+ )
100
+
101
+ hidden_states = outputs[0]
102
+ logits = self.lm_head(hidden_states)
103
+ return logits, labels
104
+
105
+ else:
106
+ output = super().forward(
107
+ input_ids=input_ids,
108
+ attention_mask=attention_mask,
109
+ position_ids=position_ids,
110
+ past_key_values=past_key_values,
111
+ inputs_embeds=inputs_embeds,
112
+ labels=labels,
113
+ use_cache=use_cache,
114
+ output_attentions=output_attentions,
115
+ output_hidden_states=output_hidden_states,
116
+ return_dict=return_dict,
117
+ )
118
+ # import pdb; pdb.set_trace()
119
+ # output_id = torch.softmax(output[1], dim=2)
120
+ # output_id = torch.argmax(output_id, dim=2)
121
+ return output
122
+
123
+ @torch.no_grad()
124
+ def generate(
125
+ self,
126
+ inputs: Optional[torch.Tensor] = None,
127
+ images: Optional[torch.Tensor] = None,
128
+ image_sizes: Optional[torch.Tensor] = None,
129
+ modalities: Optional[List[str]] = ["image"],
130
+ patch_images: Optional[torch.FloatTensor] = None,
131
+ ind_tokens: Optional[List[int]] = None,
132
+ **kwargs,
133
+ ) -> Union[GenerateOutput, torch.LongTensor]:
134
+ position_ids = kwargs.pop("position_ids", None)
135
+ attention_mask = kwargs.pop("attention_mask", None)
136
+ if "inputs_embeds" in kwargs:
137
+ raise NotImplementedError("`inputs_embeds` is not supported")
138
+
139
+ if images is not None:
140
+ (inputs, position_ids, attention_mask, _, inputs_embeds, _) = self.prepare_inputs_labels_for_multimodal(inputs, position_ids, attention_mask, None, None, images, modalities, image_sizes=image_sizes, patch_images=patch_images,
141
+ ind_tokens=ind_tokens)
142
+ else:
143
+ inputs_embeds = self.get_model().embed_tokens(inputs)
144
+
145
+ return super().generate(position_ids=position_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, **kwargs)
146
+
147
+ def prepare_inputs_for_generation(self, input_ids, past_key_values=None, inputs_embeds=None, **kwargs):
148
+ images = kwargs.pop("images", None)
149
+ image_sizes = kwargs.pop("image_sizes", None)
150
+ patch_images = kwargs.pop("patch_images", None)
151
+ ind_tokens = kwargs.pop("ind_tokens", None)
152
+ inputs = super().prepare_inputs_for_generation(input_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, **kwargs)
153
+ if images is not None:
154
+ inputs["images"] = images
155
+ if image_sizes is not None:
156
+ inputs["image_sizes"] = image_sizes
157
+ if patch_images is not None:
158
+ inputs['patch_images'] = patch_images
159
+ if ind_tokens is not None:
160
+ inputs['ind_tokens'] = ind_tokens
161
+ return inputs
162
+
163
+
164
+ AutoConfig.register("llava_qwen", LlavaQwenConfig)
165
+ AutoModelForCausalLM.register(LlavaQwenConfig, LlavaQwenForCausalLM)
VLMEvalKit-sudoku/llava/model/multimodal_encoder/dev_eva_clip/eva_clip/model_configs/EVA01-CLIP-B-16.json ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "embed_dim": 512,
3
+ "vision_cfg": {
4
+ "image_size": 224,
5
+ "layers": 12,
6
+ "width": 768,
7
+ "patch_size": 16,
8
+ "eva_model_name": "eva-clip-b-16",
9
+ "ls_init_value": 0.1,
10
+ "drop_path_rate": 0.0
11
+ },
12
+ "text_cfg": {
13
+ "context_length": 77,
14
+ "vocab_size": 49408,
15
+ "width": 512,
16
+ "heads": 8,
17
+ "layers": 12
18
+ }
19
+ }
VLMEvalKit-sudoku/llava/model/multimodal_encoder/eva_clip/model_configs/EVA02-CLIP-L-14.json ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "embed_dim": 768,
3
+ "vision_cfg": {
4
+ "image_size": 224,
5
+ "layers": 24,
6
+ "width": 1024,
7
+ "drop_path_rate": 0,
8
+ "head_width": 64,
9
+ "mlp_ratio": 2.6667,
10
+ "patch_size": 14,
11
+ "eva_model_name": "eva-clip-l-14",
12
+ "xattn": true,
13
+ "fusedLN": true,
14
+ "rope": true,
15
+ "pt_hw_seq_len": 16,
16
+ "intp_freq": true,
17
+ "naiveswiglu": true,
18
+ "subln": true
19
+ },
20
+ "text_cfg": {
21
+ "context_length": 77,
22
+ "vocab_size": 49408,
23
+ "width": 768,
24
+ "heads": 12,
25
+ "layers": 12,
26
+ "xattn": false,
27
+ "fusedLN": true
28
+ }
29
+ }
VLMEvalKit-sudoku/llava/model/multimodal_encoder/modeling_qwen2_5vl.py ADDED
@@ -0,0 +1,207 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import PretrainedConfig
2
+ from transformers.models.qwen2_5_vl.modeling_qwen2_5_vl import Qwen2_5_VisionTransformerPretrainedModel
3
+
4
+ import numpy as np
5
+ import torch
6
+ import torch.nn as nn
7
+ import torch.nn.functional as F
8
+ from PIL import Image
9
+ from functools import partial, reduce
10
+ from typing import Any, Optional, Tuple, Union, Dict
11
+ from transformers.image_processing_utils import BatchFeature, get_size_dict
12
+ from transformers.image_transforms import (
13
+ convert_to_rgb,
14
+ normalize,
15
+ rescale,
16
+ resize,
17
+ to_channel_dimension_format,
18
+ )
19
+ from transformers.image_utils import (
20
+ ChannelDimension,
21
+ PILImageResampling,
22
+ to_numpy_array,
23
+ )
24
+
25
+ class QwenVisionConfig(PretrainedConfig):
26
+ model_type = "qwen2_5_vl"
27
+ base_config_key = "vision_config"
28
+
29
+ def __init__(
30
+ self,
31
+ depth=32,
32
+ hidden_size=3584,
33
+ hidden_act="silu",
34
+ intermediate_size=3420,
35
+ num_heads=16,
36
+ in_channels=3,
37
+ patch_size=14,
38
+ spatial_merge_size=2,
39
+ temporal_patch_size=2,
40
+ tokens_per_second=4,
41
+ window_size=112,
42
+ out_hidden_size=3584,
43
+ fullatt_block_indexes=[7, 15, 23, 31],
44
+ initializer_range=0.02,
45
+ **kwargs,
46
+ ):
47
+ super().__init__(**kwargs)
48
+
49
+ self.depth = depth
50
+ self.hidden_size = hidden_size
51
+ self.hidden_act = hidden_act
52
+ self.intermediate_size = intermediate_size
53
+ self.num_heads = num_heads
54
+ self.in_channels = in_channels
55
+ self.patch_size = patch_size
56
+ self.spatial_merge_size = spatial_merge_size
57
+ self.temporal_patch_size = temporal_patch_size
58
+ self.tokens_per_second = tokens_per_second
59
+ self.window_size = window_size
60
+ self.fullatt_block_indexes = fullatt_block_indexes
61
+ self.out_hidden_size = out_hidden_size
62
+ self.initializer_range = initializer_range
63
+
64
+ class QwenImageProcessor:
65
+ def __init__(self, image_mean=(0.5, 0.5, 0.5), image_std=(0.5, 0.5, 0.5), size=(392, 392), crop_size: Dict[str, int] = None, resample=PILImageResampling.BICUBIC, rescale_factor=1 / 255, data_format=ChannelDimension.FIRST):
66
+ crop_size = crop_size if crop_size is not None else {"height": 392, "width": 392}
67
+ crop_size = get_size_dict(crop_size, default_to_square=True, param_name="crop_size")
68
+
69
+ self.image_mean = image_mean
70
+ self.image_std = image_std
71
+ self.size = size
72
+ self.resample = resample
73
+ self.rescale_factor = rescale_factor
74
+ self.data_format = data_format
75
+ self.crop_size = crop_size
76
+
77
+ def preprocess(self, images, do_resize = True, do_center_crop = True, do_rescale = True, do_normalize = True, return_tensors = 'pt'):
78
+ if isinstance(images, Image.Image):
79
+ images = [images]
80
+ else:
81
+ # to adapt video data
82
+ images = [to_numpy_array(image) for image in images]
83
+ assert isinstance(images, list)
84
+
85
+ # do_resize=False, do_center_crop=False, do_rescale=True, do_normalize=True,
86
+
87
+ transforms = [
88
+ convert_to_rgb,
89
+ to_numpy_array
90
+ ]
91
+
92
+ if do_resize:
93
+ transforms.append(partial(resize, size=self.size, resample=self.resample, data_format=self.data_format))
94
+ if do_rescale:
95
+ transforms.append(partial(rescale, scale=self.rescale_factor, data_format=self.data_format))
96
+ if do_normalize:
97
+ transforms.append(partial(normalize, mean=self.image_mean, std=self.image_std, data_format=self.data_format))
98
+
99
+ transforms.append(partial(to_channel_dimension_format, channel_dim=self.data_format, input_channel_dim=self.data_format))
100
+
101
+ images = reduce(lambda x, f: [*map(f, x)], transforms, images)
102
+ data = {"pixel_values": images}
103
+ return BatchFeature(data=data, tensor_type=return_tensors)
104
+
105
+ class Qwen2_5VLVisionTower(nn.Module):
106
+ def __init__(self, vision_tower, vision_tower_cfg, delay_load=False):
107
+ super().__init__()
108
+
109
+ self.is_loaded = False
110
+
111
+ self.config = QwenVisionConfig() ### 需要定义
112
+
113
+ self.vision_tower_name = vision_tower
114
+
115
+ self.image_processor = QwenImageProcessor()
116
+
117
+ if not delay_load:
118
+ print(f"Loading vision tower: {vision_tower}")
119
+ self.load_model()
120
+
121
+ elif getattr(vision_tower_cfg, "unfreeze_mm_vision_tower", False):
122
+ print(f"The checkpoint seems to contain `vision_tower` weights: `unfreeze_mm_vision_tower`: True.")
123
+ self.load_model()
124
+
125
+ elif hasattr(vision_tower_cfg, "mm_tunable_parts") and "mm_vision_tower" in vision_tower_cfg.mm_tunable_parts:
126
+ print(f"The checkpoint seems to contain `vision_tower` weights: `mm_tunable_parts` contains `mm_vision_tower`.")
127
+ self.load_model()
128
+
129
+ else:
130
+ self.cfg_only = self.config
131
+
132
+ def load_model(self, device_map=None):
133
+ if self.is_loaded:
134
+ print("{} is already loaded, `load_model` called again, skipping.".format(self.vision_tower_name))
135
+ return
136
+
137
+ self.vision_tower = Qwen2_5_VisionTransformerPretrainedModel.from_pretrained(self.vision_tower_name, device_map=device_map)
138
+ print('qwen2_5vl vision tower loaded')
139
+ self.vision_tower.requires_grad_(False)
140
+ self.is_loaded = True
141
+
142
+ def forward(self, images, patch_sizes=None):
143
+ if type(images) is list:
144
+ pixel_values = []
145
+ vision_grid_thws = []
146
+ spatial_patch_size = self.vision_tower.config.spatial_patch_size
147
+ temporal_patch_size = self.vision_tower.config.temporal_patch_size
148
+ spatial_merge_size = 2
149
+ data = {}
150
+ for image in images:
151
+ image = image.to(device=self.device, dtype=self.dtype).unsqueeze(0)
152
+ image = torch.cat([image, image], dim=0) ### t, c, h, w
153
+ grid_t = image.shape[0] // temporal_patch_size
154
+ grid_h, grid_w = image.shape[2] // spatial_patch_size, image.shape[3] // spatial_patch_size
155
+ channel = image.shape[1]
156
+ patches = image.reshape(grid_t, temporal_patch_size, channel,
157
+ grid_h // spatial_merge_size, spatial_merge_size, spatial_patch_size,
158
+ grid_w // spatial_merge_size, spatial_merge_size, spatial_patch_size)
159
+ patches = patches.permute(0, 3, 6, 4, 7, 2, 1, 5, 8)
160
+ flatten_patches = patches.reshape(
161
+ grid_t * grid_h * grid_w,
162
+ channel * temporal_patch_size * spatial_patch_size * spatial_patch_size
163
+ )
164
+
165
+ pixel_values.extend(flatten_patches)
166
+ vision_grid_thws.append(torch.tensor([grid_t, grid_h, grid_w]).unsqueeze(0))
167
+ pixel_values = torch.stack(pixel_values, dim=0)
168
+ pixel_values = pixel_values.to(device=self.device, dtype=self.dtype)
169
+ vision_grid_thws = torch.cat(vision_grid_thws, dim=0).to(device=self.device)
170
+ image_embeds = self.vision_tower(pixel_values, grid_thw=vision_grid_thws)
171
+ split_sizes = (vision_grid_thws.prod(-1) // spatial_merge_size**2).tolist()
172
+ image_features = torch.split(image_embeds, split_sizes)
173
+ else:
174
+ print('no support for parallel processing')
175
+ exit()
176
+ return image_features
177
+
178
+ @property
179
+ def dummy_feature(self):
180
+ return torch.zeros(1, self.hidden_size, device=self.device, dtype=self.dtype)
181
+
182
+ @property
183
+ def dtype(self):
184
+ for p in self.vision_tower.parameters():
185
+ return p.dtype
186
+
187
+ @property
188
+ def device(self):
189
+ for p in self.vision_tower.parameters():
190
+ return p.device
191
+
192
+ @property
193
+ def hidden_size(self):
194
+ return self.config.hidden_size
195
+
196
+ @property
197
+ def num_patches(self):
198
+ return (self.config.image_size // self.config.patch_size) ** 2
199
+
200
+ @property
201
+ def num_patches_per_side(self):
202
+ return self.config.image_size // self.config.patch_size
203
+ # return self.model_config["vision_cfg"]["image_size"] // self.model_config["vision_cfg"]["patch_size"]
204
+
205
+ @property
206
+ def image_size(self):
207
+ return self.config.image_size
VLMEvalKit-sudoku/llava/model/multimodal_resampler/__pycache__/masked_drop.cpython-310.pyc ADDED
Binary file (2.44 kB). View file
 
VLMEvalKit-sudoku/llava/serve/gradio_web_server.py ADDED
@@ -0,0 +1,442 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import datetime
3
+ import json
4
+ import os
5
+ import time
6
+
7
+ import gradio as gr
8
+ import requests
9
+
10
+ from llava.conversation import default_conversation, conv_templates, SeparatorStyle
11
+ from llava.constants import LOGDIR
12
+ from llava.utils import build_logger, server_error_msg, violates_moderation, moderation_msg
13
+ import hashlib
14
+
15
+
16
+ logger = build_logger("gradio_web_server", "gradio_web_server.log")
17
+
18
+ headers = {"User-Agent": "LLaVA Client"}
19
+
20
+ no_change_btn = gr.Button.update()
21
+ enable_btn = gr.Button.update(interactive=True)
22
+ disable_btn = gr.Button.update(interactive=False)
23
+
24
+ priority = {
25
+ "vicuna-13b": "aaaaaaa",
26
+ "koala-13b": "aaaaaab",
27
+ }
28
+
29
+
30
+ def get_conv_log_filename():
31
+ t = datetime.datetime.now()
32
+ name = os.path.join(LOGDIR, f"{t.year}-{t.month:02d}-{t.day:02d}-conv.json")
33
+ return name
34
+
35
+
36
+ def get_model_list():
37
+ ret = requests.post(args.controller_url + "/refresh_all_workers")
38
+ assert ret.status_code == 200
39
+ ret = requests.post(args.controller_url + "/list_models")
40
+ models = ret.json()["models"]
41
+ models.sort(key=lambda x: priority.get(x, x))
42
+ logger.info(f"Models: {models}")
43
+ return models
44
+
45
+
46
+ get_window_url_params = """
47
+ function() {
48
+ const params = new URLSearchParams(window.location.search);
49
+ url_params = Object.fromEntries(params);
50
+ console.log(url_params);
51
+ return url_params;
52
+ }
53
+ """
54
+
55
+
56
+ def load_demo(url_params, request: gr.Request):
57
+ logger.info(f"load_demo. ip: {request.client.host}. params: {url_params}")
58
+
59
+ dropdown_update = gr.Dropdown.update(visible=True)
60
+ if "model" in url_params:
61
+ model = url_params["model"]
62
+ if model in models:
63
+ dropdown_update = gr.Dropdown.update(value=model, visible=True)
64
+
65
+ state = default_conversation.copy()
66
+ return state, dropdown_update
67
+
68
+
69
+ def load_demo_refresh_model_list(request: gr.Request):
70
+ logger.info(f"load_demo. ip: {request.client.host}")
71
+ models = get_model_list()
72
+ state = default_conversation.copy()
73
+ dropdown_update = gr.Dropdown.update(choices=models, value=models[0] if len(models) > 0 else "")
74
+ return state, dropdown_update
75
+
76
+
77
+ def vote_last_response(state, vote_type, model_selector, request: gr.Request):
78
+ with open(get_conv_log_filename(), "a") as fout:
79
+ data = {
80
+ "tstamp": round(time.time(), 4),
81
+ "type": vote_type,
82
+ "model": model_selector,
83
+ "state": state.dict(),
84
+ "ip": request.client.host,
85
+ }
86
+ fout.write(json.dumps(data) + "\n")
87
+
88
+
89
+ def upvote_last_response(state, model_selector, request: gr.Request):
90
+ logger.info(f"upvote. ip: {request.client.host}")
91
+ vote_last_response(state, "upvote", model_selector, request)
92
+ return ("",) + (disable_btn,) * 3
93
+
94
+
95
+ def downvote_last_response(state, model_selector, request: gr.Request):
96
+ logger.info(f"downvote. ip: {request.client.host}")
97
+ vote_last_response(state, "downvote", model_selector, request)
98
+ return ("",) + (disable_btn,) * 3
99
+
100
+
101
+ def flag_last_response(state, model_selector, request: gr.Request):
102
+ logger.info(f"flag. ip: {request.client.host}")
103
+ vote_last_response(state, "flag", model_selector, request)
104
+ return ("",) + (disable_btn,) * 3
105
+
106
+
107
+ def regenerate(state, image_process_mode, request: gr.Request):
108
+ logger.info(f"regenerate. ip: {request.client.host}")
109
+ state.messages[-1][-1] = None
110
+ prev_human_msg = state.messages[-2]
111
+ if type(prev_human_msg[1]) in (tuple, list):
112
+ prev_human_msg[1] = (*prev_human_msg[1][:2], image_process_mode)
113
+ state.skip_next = False
114
+ return (state, state.to_gradio_chatbot(), "", None) + (disable_btn,) * 5
115
+
116
+
117
+ def clear_history(request: gr.Request):
118
+ logger.info(f"clear_history. ip: {request.client.host}")
119
+ state = default_conversation.copy()
120
+ return (state, state.to_gradio_chatbot(), "", None) + (disable_btn,) * 5
121
+
122
+
123
+ def add_text(state, text, image, image_process_mode, request: gr.Request):
124
+ logger.info(f"add_text. ip: {request.client.host}. len: {len(text)}")
125
+ if len(text) <= 0 and image is None:
126
+ state.skip_next = True
127
+ return (state, state.to_gradio_chatbot(), "", None) + (no_change_btn,) * 5
128
+ if args.moderate:
129
+ flagged = violates_moderation(text)
130
+ if flagged:
131
+ state.skip_next = True
132
+ return (state, state.to_gradio_chatbot(), moderation_msg, None) + (no_change_btn,) * 5
133
+
134
+ text = text[:1536] # Hard cut-off
135
+ if image is not None:
136
+ text = text[:1200] # Hard cut-off for images
137
+ if "<image>" not in text:
138
+ # text = '<Image><image></Image>' + text
139
+ text = text + "\n<image>"
140
+ text = (text, image, image_process_mode)
141
+ if len(state.get_images(return_pil=True)) > 0:
142
+ state = default_conversation.copy()
143
+ state.append_message(state.roles[0], text)
144
+ state.append_message(state.roles[1], None)
145
+ state.skip_next = False
146
+ return (state, state.to_gradio_chatbot(), "", None) + (disable_btn,) * 5
147
+
148
+
149
+ def http_bot(state, model_selector, temperature, top_p, max_new_tokens, request: gr.Request, template_name=None):
150
+ logger.info(f"http_bot. ip: {request.client.host}")
151
+ start_tstamp = time.time()
152
+ model_name = model_selector
153
+
154
+ if state.skip_next:
155
+ # This generate call is skipped due to invalid inputs
156
+ yield (state, state.to_gradio_chatbot()) + (no_change_btn,) * 5
157
+ return
158
+
159
+ if len(state.messages) == state.offset + 2:
160
+ # First round of conversation
161
+ if "llava" in model_name.lower():
162
+ if "llama-2" in model_name.lower():
163
+ template_name = "llava_llama_2"
164
+ elif "mistral" in model_name.lower() or "mixtral" in model_name.lower():
165
+ if "orca" in model_name.lower():
166
+ template_name = "mistral_orca"
167
+ elif "hermes" in model_name.lower():
168
+ template_name = "mistral_direct"
169
+ else:
170
+ template_name = "mistral_instruct"
171
+ elif "zephyr" in model_name.lower():
172
+ template_name = "mistral_zephyr"
173
+ elif "hermes" in model_name.lower():
174
+ template_name = "mistral_direct"
175
+ elif "v1" in model_name.lower():
176
+ if "mmtag" in model_name.lower():
177
+ template_name = "llava_v1_mmtag"
178
+ elif "plain" in model_name.lower() and "finetune" not in model_name.lower():
179
+ template_name = "llava_v1_mmtag"
180
+ else:
181
+ template_name = "llava_v1"
182
+ elif "mpt" in model_name.lower():
183
+ template_name = "mpt"
184
+ else:
185
+ if "mmtag" in model_name.lower():
186
+ template_name = "v0_plain"
187
+ elif "plain" in model_name.lower() and "finetune" not in model_name.lower():
188
+ template_name = "v0_plain"
189
+ else:
190
+ template_name = "llava_v0"
191
+ elif "mistral" in model_name.lower() or "mixtral" in model_name.lower():
192
+ if "orca" in model_name.lower():
193
+ template_name = "mistral_orca"
194
+ elif "hermes" in model_name.lower():
195
+ template_name = "mistral_direct"
196
+ else:
197
+ template_name = "mistral_instruct"
198
+ elif "hermes" in model_name.lower():
199
+ template_name = "mistral_direct"
200
+ elif "zephyr" in model_name.lower():
201
+ template_name = "mistral_zephyr"
202
+ elif "mpt" in model_name:
203
+ template_name = "mpt_text"
204
+ elif "llama-2" in model_name:
205
+ template_name = "llama_2"
206
+ else:
207
+ template_name = "vicuna_v1"
208
+ new_state = conv_templates[template_name].copy()
209
+ new_state.append_message(new_state.roles[0], state.messages[-2][1])
210
+ new_state.append_message(new_state.roles[1], None)
211
+ state = new_state
212
+
213
+ # Query worker address
214
+ controller_url = args.controller_url
215
+ ret = requests.post(controller_url + "/get_worker_address", json={"model": model_name})
216
+ worker_addr = ret.json()["address"]
217
+ logger.info(f"model_name: {model_name}, worker_addr: {worker_addr}")
218
+
219
+ # No available worker
220
+ if worker_addr == "":
221
+ state.messages[-1][-1] = server_error_msg
222
+ yield (state, state.to_gradio_chatbot(), disable_btn, disable_btn, disable_btn, enable_btn, enable_btn)
223
+ return
224
+
225
+ # Construct prompt
226
+ prompt = state.get_prompt()
227
+
228
+ all_images = state.get_images(return_pil=True)
229
+ all_image_hash = [hashlib.md5(image.tobytes()).hexdigest() for image in all_images]
230
+ for image, hash in zip(all_images, all_image_hash):
231
+ t = datetime.datetime.now()
232
+ filename = os.path.join(LOGDIR, "serve_images", f"{t.year}-{t.month:02d}-{t.day:02d}", f"{hash}.jpg")
233
+ if not os.path.isfile(filename):
234
+ os.makedirs(os.path.dirname(filename), exist_ok=True)
235
+ image.save(filename)
236
+
237
+ # Make requests
238
+ pload = {
239
+ "model": model_name,
240
+ "prompt": prompt,
241
+ "temperature": float(temperature),
242
+ "top_p": float(top_p),
243
+ "max_new_tokens": min(int(max_new_tokens), 1536),
244
+ "stop": state.sep if state.sep_style in [SeparatorStyle.SINGLE, SeparatorStyle.MPT] else state.sep2,
245
+ "images": f"List of {len(state.get_images())} images: {all_image_hash}",
246
+ }
247
+ logger.info(f"==== request ====\n{pload}")
248
+
249
+ pload["images"] = state.get_images()
250
+
251
+ state.messages[-1][-1] = "▌"
252
+ yield (state, state.to_gradio_chatbot()) + (disable_btn,) * 5
253
+
254
+ try:
255
+ # Stream output
256
+ response = requests.post(worker_addr + "/worker_generate_stream", headers=headers, json=pload, stream=True, timeout=100)
257
+ last_print_time = time.time()
258
+ for chunk in response.iter_lines(decode_unicode=False, delimiter=b"\0"):
259
+ if chunk:
260
+ data = json.loads(chunk.decode())
261
+ if data["error_code"] == 0:
262
+ output = data["text"][len(prompt) :].strip()
263
+ state.messages[-1][-1] = output + "▌"
264
+ if time.time() - last_print_time > 0.05:
265
+ last_print_time = time.time()
266
+ yield (state, state.to_gradio_chatbot()) + (disable_btn,) * 5
267
+ else:
268
+ output = data["text"] + f" (error_code: {data['error_code']})"
269
+ state.messages[-1][-1] = output
270
+ yield (state, state.to_gradio_chatbot()) + (disable_btn, disable_btn, disable_btn, enable_btn, enable_btn)
271
+ return
272
+ time.sleep(0.03)
273
+ except requests.exceptions.RequestException as e:
274
+ state.messages[-1][-1] = server_error_msg
275
+ yield (state, state.to_gradio_chatbot()) + (disable_btn, disable_btn, disable_btn, enable_btn, enable_btn)
276
+ return
277
+
278
+ state.messages[-1][-1] = state.messages[-1][-1][:-1]
279
+ yield (state, state.to_gradio_chatbot()) + (enable_btn,) * 5
280
+
281
+ finish_tstamp = time.time()
282
+ logger.info(f"{output}")
283
+
284
+ with open(get_conv_log_filename(), "a") as fout:
285
+ data = {
286
+ "tstamp": round(finish_tstamp, 4),
287
+ "type": "chat",
288
+ "model": model_name,
289
+ "start": round(start_tstamp, 4),
290
+ "finish": round(start_tstamp, 4),
291
+ "state": state.dict(),
292
+ "images": all_image_hash,
293
+ "ip": request.client.host,
294
+ }
295
+ fout.write(json.dumps(data) + "\n")
296
+
297
+
298
+ title_markdown = """
299
+ # 🌋 LLaVA: Large Language and Vision Assistant
300
+ [[Project Page](https://llava-vl.github.io)] [[Code](https://github.com/haotian-liu/LLaVA)] [[Model](https://github.com/haotian-liu/LLaVA/blob/main/docs/MODEL_ZOO.md)] | 📚 [[LLaVA](https://arxiv.org/abs/2304.08485)] [[LLaVA-v1.5](https://arxiv.org/abs/2310.03744)]
301
+ """
302
+
303
+ tos_markdown = """
304
+ ### Terms of use
305
+ By using this service, users are required to agree to the following terms:
306
+ The service is a research preview intended for non-commercial use only. It only provides limited safety measures and may generate offensive content. It must not be used for any illegal, harmful, violent, racist, or sexual purposes. The service may collect user dialogue data for future research.
307
+ Please click the "Flag" button if you get any inappropriate answer! We will collect those to keep improving our moderator.
308
+ For an optimal experience, please use desktop computers for this demo, as mobile devices may compromise its quality.
309
+ """
310
+
311
+
312
+ learn_more_markdown = """
313
+ ### License
314
+ The service is a research preview intended for non-commercial use only, subject to the model [License](https://github.com/facebookresearch/llama/blob/main/MODEL_CARD.md) of LLaMA, [Terms of Use](https://openai.com/policies/terms-of-use) of the data generated by OpenAI, and [Privacy Practices](https://chrome.google.com/webstore/detail/sharegpt-share-your-chatg/daiacboceoaocpibfodeljbdfacokfjb) of ShareGPT. Please contact us if you find any potential violation.
315
+ """
316
+
317
+ block_css = """
318
+
319
+ #buttons button {
320
+ min-width: min(120px,100%);
321
+ }
322
+
323
+ """
324
+
325
+
326
+ def build_demo(embed_mode):
327
+ textbox = gr.Textbox(show_label=False, placeholder="Enter text and press ENTER", container=False)
328
+ with gr.Blocks(title="LLaVA", theme=gr.themes.Default(), css=block_css) as demo:
329
+ state = gr.State()
330
+
331
+ if not embed_mode:
332
+ gr.Markdown(title_markdown)
333
+
334
+ with gr.Row():
335
+ with gr.Column(scale=3):
336
+ with gr.Row(elem_id="model_selector_row"):
337
+ model_selector = gr.Dropdown(choices=models, value=models[0] if len(models) > 0 else "", interactive=True, show_label=False, container=False)
338
+
339
+ imagebox = gr.Image(type="pil")
340
+ image_process_mode = gr.Radio(["Crop", "Resize", "Pad", "Default"], value="Default", label="Preprocess for non-square image", visible=False)
341
+
342
+ cur_dir = os.path.dirname(os.path.abspath(__file__))
343
+ gr.Examples(
344
+ examples=[
345
+ [f"{cur_dir}/examples/extreme_ironing.jpg", "What is unusual about this image?"],
346
+ [f"{cur_dir}/examples/waterview.jpg", "What are the things I should be cautious about when I visit here?"],
347
+ ],
348
+ inputs=[imagebox, textbox],
349
+ )
350
+
351
+ with gr.Accordion("Parameters", open=False) as parameter_row:
352
+ temperature = gr.Slider(
353
+ minimum=0.0,
354
+ maximum=1.0,
355
+ value=0.2,
356
+ step=0.1,
357
+ interactive=True,
358
+ label="Temperature",
359
+ )
360
+ top_p = gr.Slider(
361
+ minimum=0.0,
362
+ maximum=1.0,
363
+ value=0.7,
364
+ step=0.1,
365
+ interactive=True,
366
+ label="Top P",
367
+ )
368
+ max_output_tokens = gr.Slider(
369
+ minimum=0,
370
+ maximum=1024,
371
+ value=512,
372
+ step=64,
373
+ interactive=True,
374
+ label="Max output tokens",
375
+ )
376
+
377
+ with gr.Column(scale=8):
378
+ chatbot = gr.Chatbot(elem_id="chatbot", label="LLaVA Chatbot", height=550)
379
+ with gr.Row():
380
+ with gr.Column(scale=8):
381
+ textbox.render()
382
+ with gr.Column(scale=1, min_width=50):
383
+ submit_btn = gr.Button(value="Send", variant="primary")
384
+ with gr.Row(elem_id="buttons") as button_row:
385
+ upvote_btn = gr.Button(value="👍 Upvote", interactive=False)
386
+ downvote_btn = gr.Button(value="👎 Downvote", interactive=False)
387
+ flag_btn = gr.Button(value="⚠️ Flag", interactive=False)
388
+ # stop_btn = gr.Button(value="⏹️ Stop Generation", interactive=False)
389
+ regenerate_btn = gr.Button(value="🔄 Regenerate", interactive=False)
390
+ clear_btn = gr.Button(value="🗑️ Clear", interactive=False)
391
+
392
+ if not embed_mode:
393
+ gr.Markdown(tos_markdown)
394
+ gr.Markdown(learn_more_markdown)
395
+ url_params = gr.JSON(visible=False)
396
+
397
+ # Register listeners
398
+ btn_list = [upvote_btn, downvote_btn, flag_btn, regenerate_btn, clear_btn]
399
+ upvote_btn.click(upvote_last_response, [state, model_selector], [textbox, upvote_btn, downvote_btn, flag_btn], queue=False)
400
+ downvote_btn.click(downvote_last_response, [state, model_selector], [textbox, upvote_btn, downvote_btn, flag_btn], queue=False)
401
+ flag_btn.click(flag_last_response, [state, model_selector], [textbox, upvote_btn, downvote_btn, flag_btn], queue=False)
402
+
403
+ regenerate_btn.click(regenerate, [state, image_process_mode], [state, chatbot, textbox, imagebox] + btn_list, queue=False).then(http_bot, [state, model_selector, temperature, top_p, max_output_tokens], [state, chatbot] + btn_list)
404
+
405
+ clear_btn.click(clear_history, None, [state, chatbot, textbox, imagebox] + btn_list, queue=False)
406
+
407
+ textbox.submit(add_text, [state, textbox, imagebox, image_process_mode], [state, chatbot, textbox, imagebox] + btn_list, queue=False).then(
408
+ http_bot, [state, model_selector, temperature, top_p, max_output_tokens], [state, chatbot] + btn_list
409
+ )
410
+
411
+ submit_btn.click(add_text, [state, textbox, imagebox, image_process_mode], [state, chatbot, textbox, imagebox] + btn_list, queue=False).then(
412
+ http_bot, [state, model_selector, temperature, top_p, max_output_tokens], [state, chatbot] + btn_list
413
+ )
414
+
415
+ if args.model_list_mode == "once":
416
+ demo.load(load_demo, [url_params], [state, model_selector], _js=get_window_url_params, queue=False)
417
+ elif args.model_list_mode == "reload":
418
+ demo.load(load_demo_refresh_model_list, None, [state, model_selector], queue=False)
419
+ else:
420
+ raise ValueError(f"Unknown model list mode: {args.model_list_mode}")
421
+
422
+ return demo
423
+
424
+
425
+ if __name__ == "__main__":
426
+ parser = argparse.ArgumentParser()
427
+ parser.add_argument("--host", type=str, default="0.0.0.0")
428
+ parser.add_argument("--port", type=int)
429
+ parser.add_argument("--controller-url", type=str, default="http://localhost:21001")
430
+ parser.add_argument("--concurrency-count", type=int, default=10)
431
+ parser.add_argument("--model-list-mode", type=str, default="once", choices=["once", "reload"])
432
+ parser.add_argument("--share", action="store_true")
433
+ parser.add_argument("--moderate", action="store_true")
434
+ parser.add_argument("--embed", action="store_true")
435
+ args = parser.parse_args()
436
+ logger.info(f"args: {args}")
437
+
438
+ models = get_model_list()
439
+
440
+ logger.info(args)
441
+ demo = build_demo(args.embed)
442
+ demo.queue(concurrency_count=args.concurrency_count, api_open=False).launch(server_name=args.host, server_port=args.port, share=args.share)
VLMEvalKit-sudoku/llava/train/llava_trainer_eval.py ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import subprocess
3
+
4
+ from llava.train.llava_trainer import LLaVATrainer
5
+
6
+
7
+ class LLaVAEvalTrainer(LLaVATrainer):
8
+ def evaluate(self, evaluate_args):
9
+ cmd = f"accelerate launch --num_processes {evaluate_args.eval_num_processes} -m lmms_eval \
10
+ --model {evaluate_args.model} \
11
+ --model_args {evaluate_args.model_args} \
12
+ --tasks {evaluate_args.task_names} \
13
+ --batch_size {evaluate_args.batch_size} \
14
+ --log_samples_suffix {evaluate_args.log_samples_suffix} \
15
+ --output_path {evaluate_args.output_path}"
16
+ if evaluate_args.limit:
17
+ cmd += f" --limit {evaluate_args.limit}"
18
+ if evaluate_args.num_fewshot:
19
+ cmd += f" --num_fewshot {evaluate_args.num_fewshot}"
20
+ if evaluate_args.gen_kwargs != "":
21
+ cmd += f" --gen_kwargs {evaluate_args.gen_kwargs}"
22
+ if evaluate_args.log_samples:
23
+ cmd += f" --log_samples"
24
+ else:
25
+ assert False, "Please log samples so that the result can be parsed"
26
+ results = subprocess.run([cmd], shell=True, capture_output=True, text=True)
27
+ try:
28
+ result_file_index_start = results.stdout.index("Saved samples to ")
29
+ result_file_index_end = results.stdout.index(f".json")
30
+ result_file_index_start += len("Saved samples to ")
31
+ file = results.stdout[result_file_index_start:result_file_index_end]
32
+ except:
33
+ result_file_index_start = results.stderr.index("Saved samples to ")
34
+ result_file_index_end = results.stderr.index(f".json")
35
+ result_file_index_start += len("Saved samples to ")
36
+ file = results.stderr[result_file_index_start:result_file_index_end]
37
+ file = file.split("/")[:-1]
38
+ file = "/".join(file) + "/results.json"
39
+ with open(file, "r") as f:
40
+ lmms_eval_results = json.load(f)
41
+ result_dict = {}
42
+ tasks_list = evaluate_args.task_names.split(",")
43
+ for task in tasks_list:
44
+ task_results = lmms_eval_results["results"][task]
45
+ for k, v in task_results.items():
46
+ if k != "alias" and "stderr" not in k:
47
+ metric = k.split(",")[0]
48
+ result_dict[f"{task}_{metric}"] = v
49
+ return result_dict
50
+
51
+ """def evaluate(self, evaluate_args):
52
+ initialize_tasks()
53
+ tasks_list = evaluate_args.task_names.split(",")
54
+ result_dict = {}
55
+ results = evaluator.simple_evaluate(
56
+ model=evaluate_args.model,
57
+ model_args=evaluate_args.model_args,
58
+ tasks=tasks_list,
59
+ num_fewshot=evaluate_args.num_fewshot,
60
+ batch_size=evaluate_args.batch_size,
61
+ device=evaluate_args.device,
62
+ limit=evaluate_args.limit,
63
+ check_integrity=evaluate_args.check_integrity,
64
+ show_task_to_terminal=evaluate_args.show_task_to_terminal,
65
+ log_samples=evaluate_args.log_samples,
66
+ gen_kwargs=evaluate_args.gen_kwargs,
67
+ cli_args=evaluate_args,
68
+ )
69
+ for task in tasks_list:
70
+ task_results = results["results"][task]
71
+ for k,v in task_results.items():
72
+ if k != "alias" and "stderr" not in k:
73
+ metric = k.split(",")[0]
74
+ result_dict[f"{task}_{metric}"] = v
75
+
76
+ return result_dict"""
VLMEvalKit-sudoku/llava/utils.py ADDED
@@ -0,0 +1,198 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import datetime
2
+ import logging
3
+ import logging.handlers
4
+ import os
5
+ import sys
6
+ import numpy as np
7
+
8
+ import requests
9
+
10
+ from llava.constants import LOGDIR
11
+
12
+ server_error_msg = "**NETWORK ERROR DUE TO HIGH TRAFFIC. PLEASE REGENERATE OR REFRESH THIS PAGE.**"
13
+ moderation_msg = "I am sorry. Your input may violate our content moderation guidelines. Please avoid using harmful or offensive content."
14
+
15
+ handler = None
16
+
17
+ import torch.distributed as dist
18
+
19
+ try:
20
+ import av
21
+ from decord import VideoReader, cpu
22
+ except ImportError:
23
+ print("Please install pyav to use video processing functions.")
24
+
25
+ def process_video_with_decord(video_file, data_args):
26
+ vr = VideoReader(video_file, ctx=cpu(0), num_threads=1)
27
+ total_frame_num = len(vr)
28
+ video_time = total_frame_num / vr.get_avg_fps()
29
+ avg_fps = round(vr.get_avg_fps() / data_args.video_fps)
30
+ frame_idx = [i for i in range(0, total_frame_num, avg_fps)]
31
+ frame_time = [i/avg_fps for i in frame_idx]
32
+
33
+
34
+ if data_args.frames_upbound > 0:
35
+ if len(frame_idx) > data_args.frames_upbound or data_args.force_sample:
36
+ uniform_sampled_frames = np.linspace(0, total_frame_num - 1, data_args.frames_upbound, dtype=int)
37
+ frame_idx = uniform_sampled_frames.tolist()
38
+ frame_time = [i/vr.get_avg_fps() for i in frame_idx]
39
+
40
+ video = vr.get_batch(frame_idx).asnumpy()
41
+ frame_time = ",".join([f"{i:.2f}s" for i in frame_time])
42
+
43
+ num_frames_to_sample = num_frames = len(frame_idx)
44
+ # https://github.com/dmlc/decord/issues/208
45
+ vr.seek(0)
46
+ return video, video_time, frame_time, num_frames_to_sample
47
+
48
+ def process_video_with_pyav(video_file, data_args):
49
+ container = av.open(video_file)
50
+ # !!! This is the only difference. Using auto threading
51
+ container.streams.video[0].thread_type = "AUTO"
52
+
53
+ video_frames = []
54
+ for packet in container.demux():
55
+ if packet.stream.type == 'video':
56
+ for frame in packet.decode():
57
+ video_frames.append(frame)
58
+ total_frame_num = len(video_frames)
59
+ video_time = video_frames[-1].time
60
+ avg_fps = round(total_frame_num / video_time / data_args.video_fps)
61
+ frame_idx = [i for i in range(0, total_frame_num, avg_fps)]
62
+
63
+ if data_args.frames_upbound > 0:
64
+ if len(frame_idx) > data_args.frames_upbound:
65
+ uniform_sampled_frames = np.linspace(0, total_frame_num - 1, data_args.frames_upbound, dtype=int)
66
+ frame_idx = uniform_sampled_frames.tolist()
67
+
68
+
69
+ frames = [video_frames[i] for i in frame_idx]
70
+ return np.stack([x.to_ndarray(format="rgb24") for x in frames])
71
+
72
+
73
+ def rank0_print(*args):
74
+ if dist.is_initialized():
75
+ if dist.get_rank() == 0:
76
+ print(f"Rank {dist.get_rank()}: ", *args)
77
+ else:
78
+ print(*args)
79
+
80
+
81
+ def rank_print(*args):
82
+ if dist.is_initialized():
83
+ print(f"Rank {dist.get_rank()}: ", *args)
84
+ else:
85
+ print(*args)
86
+
87
+ def build_logger(logger_name, logger_filename):
88
+ global handler
89
+
90
+ formatter = logging.Formatter(
91
+ fmt="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
92
+ datefmt="%Y-%m-%d %H:%M:%S",
93
+ )
94
+
95
+ # Set the format of root handlers
96
+ if not logging.getLogger().handlers:
97
+ logging.basicConfig(level=logging.INFO)
98
+ logging.getLogger().handlers[0].setFormatter(formatter)
99
+
100
+ # Redirect stdout and stderr to loggers
101
+ stdout_logger = logging.getLogger("stdout")
102
+ stdout_logger.setLevel(logging.INFO)
103
+ sl = StreamToLogger(stdout_logger, logging.INFO)
104
+ sys.stdout = sl
105
+
106
+ stderr_logger = logging.getLogger("stderr")
107
+ stderr_logger.setLevel(logging.ERROR)
108
+ sl = StreamToLogger(stderr_logger, logging.ERROR)
109
+ sys.stderr = sl
110
+
111
+ # Get logger
112
+ logger = logging.getLogger(logger_name)
113
+ logger.setLevel(logging.INFO)
114
+
115
+ # Add a file handler for all loggers
116
+ if handler is None:
117
+ os.makedirs(LOGDIR, exist_ok=True)
118
+ filename = os.path.join(LOGDIR, logger_filename)
119
+ handler = logging.handlers.TimedRotatingFileHandler(filename, when="D", utc=True)
120
+ handler.setFormatter(formatter)
121
+
122
+ for name, item in logging.root.manager.loggerDict.items():
123
+ if isinstance(item, logging.Logger):
124
+ item.addHandler(handler)
125
+
126
+ return logger
127
+
128
+
129
+ class StreamToLogger(object):
130
+ """
131
+ Fake file-like stream object that redirects writes to a logger instance.
132
+ """
133
+
134
+ def __init__(self, logger, log_level=logging.INFO):
135
+ self.terminal = sys.stdout
136
+ self.logger = logger
137
+ self.log_level = log_level
138
+ self.linebuf = ""
139
+
140
+ def __getattr__(self, attr):
141
+ return getattr(self.terminal, attr)
142
+
143
+ def write(self, buf):
144
+ temp_linebuf = self.linebuf + buf
145
+ self.linebuf = ""
146
+ for line in temp_linebuf.splitlines(True):
147
+ # From the io.TextIOWrapper docs:
148
+ # On output, if newline is None, any '\n' characters written
149
+ # are translated to the system default line separator.
150
+ # By default sys.stdout.write() expects '\n' newlines and then
151
+ # translates them so this is still cross platform.
152
+ if line[-1] == "\n":
153
+ self.logger.log(self.log_level, line.rstrip())
154
+ else:
155
+ self.linebuf += line
156
+
157
+ def flush(self):
158
+ if self.linebuf != "":
159
+ self.logger.log(self.log_level, self.linebuf.rstrip())
160
+ self.linebuf = ""
161
+
162
+
163
+ def disable_torch_init():
164
+ """
165
+ Disable the redundant torch default initialization to accelerate model creation.
166
+ """
167
+ import torch
168
+
169
+ setattr(torch.nn.Linear, "reset_parameters", lambda self: None)
170
+ setattr(torch.nn.LayerNorm, "reset_parameters", lambda self: None)
171
+
172
+
173
+ def violates_moderation(text):
174
+ """
175
+ Check whether the text violates OpenAI moderation API.
176
+ """
177
+ url = "https://api.openai.com/v1/moderations"
178
+ headers = {"Content-Type": "application/json", "Authorization": "Bearer " + os.environ["OPENAI_API_KEY"]}
179
+ text = text.replace("\n", "")
180
+ data = "{" + '"input": ' + f'"{text}"' + "}"
181
+ data = data.encode("utf-8")
182
+ try:
183
+ ret = requests.post(url, headers=headers, data=data, timeout=5)
184
+ flagged = ret.json()["results"][0]["flagged"]
185
+ except requests.exceptions.RequestException as e:
186
+ print(f"######################### Moderation Error: {e} #########################")
187
+ flagged = False
188
+ except KeyError as e:
189
+ print(f"######################### Moderation Error: {e} #########################")
190
+ flagged = False
191
+
192
+ return flagged
193
+
194
+
195
+ def pretty_print_semaphore(semaphore):
196
+ if semaphore is None:
197
+ return "None"
198
+ return f"Semaphore(value={semaphore._value}, locked={semaphore.locked()})"
VLMEvalKit-sudoku/vlmeval/__pycache__/inference.cpython-310.pyc ADDED
Binary file (7.71 kB). View file
 
VLMEvalKit-sudoku/vlmeval/__pycache__/tools.cpython-310.pyc ADDED
Binary file (22.6 kB). View file
 
VLMEvalKit-sudoku/vlmeval/api/__pycache__/reka.cpython-310.pyc ADDED
Binary file (2.25 kB). View file
 
VLMEvalKit-sudoku/vlmeval/api/bailingmm.py ADDED
@@ -0,0 +1,90 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import base64
2
+ from vlmeval.smp import *
3
+ from vlmeval.api.base import BaseAPI
4
+ from vlmeval.dataset import DATASET_TYPE
5
+ from vlmeval.smp.vlm import encode_image_file_to_base64
6
+ import time
7
+
8
+
9
+ class bailingMMWrapper(BaseAPI):
10
+
11
+ is_api: bool = True
12
+
13
+ def __init__(self,
14
+ model: str,
15
+ retry: int = 5,
16
+ key: str = None,
17
+ verbose: bool = True,
18
+ system_prompt: str = None,
19
+ max_tokens: int = 1024,
20
+ proxy: str = None,
21
+ **kwargs):
22
+
23
+ self.model = model
24
+ self.fail_msg = 'Failed to obtain answer via bailingMM API.'
25
+ if key is None:
26
+ key = os.environ.get('BAILINGMM_API_KEY', None)
27
+ assert key is not None, ('Please set the API Key for bailingMM.')
28
+ self.key = key
29
+ self.headers = {"Content-Type": "application/json"}
30
+ super().__init__(retry=retry, system_prompt=system_prompt, verbose=verbose, **kwargs)
31
+
32
+ def image_to_base64(self, image_path):
33
+ with open(image_path, 'rb') as image_file:
34
+ encoded_string = str(base64.b64encode(image_file.read()), 'utf-8')
35
+ return encoded_string
36
+
37
+ def prepare_inputs(self, inputs):
38
+ msgs = cp.deepcopy(inputs)
39
+ content = []
40
+ for i, msg in enumerate(msgs):
41
+ if msg['type'] == 'text':
42
+ pass
43
+ else:
44
+ try:
45
+ image_data = self.image_to_base64(msg['value'])
46
+ except Exception as e:
47
+ if self.verbose:
48
+ self.logger.error(e)
49
+ image_data = ''
50
+ msg['value'] = image_data
51
+ content.append(msg)
52
+ return content
53
+
54
+ def generate_inner(self, inputs, **kwargs) -> str:
55
+ assert isinstance(inputs, str) or isinstance(inputs, list)
56
+ start = time.time()
57
+ inputs = [inputs] if isinstance(inputs, str) else inputs
58
+
59
+ messages = self.prepare_inputs(inputs)
60
+
61
+ service_url = "https://bailingchat.alipay.com/api/proxy/eval/antgmm/completions"
62
+
63
+ payload = {
64
+ "structInput": json.dumps([{"role":"user","content":messages}]),
65
+ "sk": self.key,
66
+ "model": self.model,
67
+ "timeout": 180000
68
+ }
69
+ response = requests.post(service_url, headers=self.headers, json=payload)
70
+ if self.verbose:
71
+ self.logger.info('Time for requesting is:')
72
+ self.logger.info(time.time() - start)
73
+ try:
74
+ assert response.status_code == 200
75
+ output = json.loads(response.text)
76
+ answer = output['preds']['pred']
77
+ if self.verbose:
78
+ self.logger.info(f'inputs: {inputs}\nanswer: {answer}')
79
+ return 0, answer, 'Succeeded! '
80
+ except Exception as e:
81
+ if self.verbose:
82
+ self.logger.error(e)
83
+ self.logger.error(f'The input messages are {inputs}.')
84
+ return -1, self.fail_msg, ''
85
+
86
+
87
+ class bailingMMAPI(bailingMMWrapper):
88
+
89
+ def generate(self, message, dataset=None):
90
+ return super(bailingMMAPI, self).generate(message, dataset=dataset)
VLMEvalKit-sudoku/vlmeval/api/base.py ADDED
@@ -0,0 +1,296 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import time
2
+ import random as rd
3
+ from abc import abstractmethod
4
+ import os.path as osp
5
+ import copy as cp
6
+ from ..smp import get_logger, parse_file, concat_images_vlmeval, LMUDataRoot, md5, decode_base64_to_image_file
7
+
8
+
9
+ class BaseAPI:
10
+
11
+ allowed_types = ['text', 'image', 'video']
12
+ INTERLEAVE = True
13
+ INSTALL_REQ = False
14
+
15
+ def __init__(self,
16
+ retry=10,
17
+ wait=1,
18
+ system_prompt=None,
19
+ verbose=True,
20
+ fail_msg='Failed to obtain answer via API.',
21
+ **kwargs):
22
+ """Base Class for all APIs.
23
+
24
+ Args:
25
+ retry (int, optional): The retry times for `generate_inner`. Defaults to 10.
26
+ wait (int, optional): The wait time after each failed retry of `generate_inner`. Defaults to 1.
27
+ system_prompt (str, optional): Defaults to None.
28
+ verbose (bool, optional): Defaults to True.
29
+ fail_msg (str, optional): The message to return when failed to obtain answer.
30
+ Defaults to 'Failed to obtain answer via API.'.
31
+ **kwargs: Other kwargs for `generate_inner`.
32
+ """
33
+
34
+ self.wait = wait
35
+ self.retry = retry
36
+ self.system_prompt = system_prompt
37
+ self.verbose = verbose
38
+ self.fail_msg = fail_msg
39
+ self.logger = get_logger('ChatAPI')
40
+
41
+ if len(kwargs):
42
+ self.logger.info(f'BaseAPI received the following kwargs: {kwargs}')
43
+ self.logger.info('Will try to use them as kwargs for `generate`. ')
44
+ self.default_kwargs = kwargs
45
+
46
+ @abstractmethod
47
+ def generate_inner(self, inputs, **kwargs):
48
+ """The inner function to generate the answer.
49
+
50
+ Returns:
51
+ tuple(int, str, str): ret_code, response, log
52
+ """
53
+ self.logger.warning('For APIBase, generate_inner is an abstract method. ')
54
+ assert 0, 'generate_inner not defined'
55
+ ret_code, answer, log = None, None, None
56
+ # if ret_code is 0, means succeed
57
+ return ret_code, answer, log
58
+
59
+ def working(self):
60
+ """If the API model is working, return True, else return False.
61
+
62
+ Returns:
63
+ bool: If the API model is working, return True, else return False.
64
+ """
65
+ self.old_timeout = None
66
+ if hasattr(self, 'timeout'):
67
+ self.old_timeout = self.timeout
68
+ self.timeout = 120
69
+
70
+ retry = 5
71
+ while retry > 0:
72
+ ret = self.generate('hello')
73
+ if ret is not None and ret != '' and self.fail_msg not in ret:
74
+ if self.old_timeout is not None:
75
+ self.timeout = self.old_timeout
76
+ return True
77
+ retry -= 1
78
+
79
+ if self.old_timeout is not None:
80
+ self.timeout = self.old_timeout
81
+ return False
82
+
83
+ def check_content(self, msgs):
84
+ """Check the content type of the input. Four types are allowed: str, dict, liststr, listdict.
85
+
86
+ Args:
87
+ msgs: Raw input messages.
88
+
89
+ Returns:
90
+ str: The message type.
91
+ """
92
+ if isinstance(msgs, str):
93
+ return 'str'
94
+ if isinstance(msgs, dict):
95
+ return 'dict'
96
+ if isinstance(msgs, list):
97
+ types = [self.check_content(m) for m in msgs]
98
+ if all(t == 'str' for t in types):
99
+ return 'liststr'
100
+ if all(t == 'dict' for t in types):
101
+ return 'listdict'
102
+ return 'unknown'
103
+
104
+ def preproc_content(self, inputs):
105
+ """Convert the raw input messages to a list of dicts.
106
+
107
+ Args:
108
+ inputs: raw input messages.
109
+
110
+ Returns:
111
+ list(dict): The preprocessed input messages. Will return None if failed to preprocess the input.
112
+ """
113
+ if self.check_content(inputs) == 'str':
114
+ return [dict(type='text', value=inputs)]
115
+ elif self.check_content(inputs) == 'dict':
116
+ assert 'type' in inputs and 'value' in inputs
117
+ return [inputs]
118
+ elif self.check_content(inputs) == 'liststr':
119
+ res = []
120
+ for s in inputs:
121
+ mime, pth = parse_file(s)
122
+ if mime is None or mime == 'unknown':
123
+ res.append(dict(type='text', value=s))
124
+ else:
125
+ res.append(dict(type=mime.split('/')[0], value=pth))
126
+ return res
127
+ elif self.check_content(inputs) == 'listdict':
128
+ for item in inputs:
129
+ assert 'type' in item and 'value' in item
130
+ mime, s = parse_file(item['value'])
131
+ if mime is None:
132
+ assert item['type'] == 'text', item['value']
133
+ else:
134
+ assert mime.split('/')[0] == item['type']
135
+ item['value'] = s
136
+ return inputs
137
+ else:
138
+ return None
139
+
140
+ # May exceed the context windows size, so try with different turn numbers.
141
+ def chat_inner(self, inputs, **kwargs):
142
+ _ = kwargs.pop('dataset', None)
143
+ while len(inputs):
144
+ try:
145
+ return self.generate_inner(inputs, **kwargs)
146
+ except Exception as e:
147
+ if self.verbose:
148
+ self.logger.info(f'{type(e)}: {e}')
149
+ inputs = inputs[1:]
150
+ while len(inputs) and inputs[0]['role'] != 'user':
151
+ inputs = inputs[1:]
152
+ continue
153
+ return -1, self.fail_msg + ': ' + 'Failed with all possible conversation turns.', None
154
+
155
+ def chat(self, messages, **kwargs1):
156
+ """The main function for multi-turn chatting. Will call `chat_inner` with the preprocessed input messages."""
157
+ assert hasattr(self, 'chat_inner'), 'The API model should has the `chat_inner` method. '
158
+ for msg in messages:
159
+ assert isinstance(msg, dict) and 'role' in msg and 'content' in msg, msg
160
+ assert self.check_content(msg['content']) in ['str', 'dict', 'liststr', 'listdict'], msg
161
+ msg['content'] = self.preproc_content(msg['content'])
162
+ # merge kwargs
163
+ kwargs = cp.deepcopy(self.default_kwargs)
164
+ kwargs.update(kwargs1)
165
+
166
+ answer = None
167
+ # a very small random delay [0s - 0.5s]
168
+ T = rd.random() * 0.5
169
+ time.sleep(T)
170
+
171
+ assert messages[-1]['role'] == 'user'
172
+
173
+ for i in range(self.retry):
174
+ try:
175
+ ret_code, answer, log = self.chat_inner(messages, **kwargs)
176
+ if ret_code == 0 and self.fail_msg not in answer and answer != '':
177
+ if self.verbose:
178
+ print(answer)
179
+ return answer
180
+ elif self.verbose:
181
+ if not isinstance(log, str):
182
+ try:
183
+ log = log.text
184
+ except Exception as e:
185
+ self.logger.warning(f'Failed to parse {log} as an http response: {str(e)}. ')
186
+ self.logger.info(f'RetCode: {ret_code}\nAnswer: {answer}\nLog: {log}')
187
+ except Exception as err:
188
+ if self.verbose:
189
+ self.logger.error(f'An error occured during try {i}: ')
190
+ self.logger.error(f'{type(err)}: {err}')
191
+ # delay before each retry
192
+ T = rd.random() * self.wait * 2
193
+ time.sleep(T)
194
+
195
+ return self.fail_msg if answer in ['', None] else answer
196
+
197
+ def preprocess_message_with_role(self, message):
198
+ system_prompt = ''
199
+ new_message = []
200
+
201
+ for data in message:
202
+ assert isinstance(data, dict)
203
+ role = data.pop('role', 'user')
204
+ if role == 'system':
205
+ system_prompt += data['value'] + '\n'
206
+ else:
207
+ new_message.append(data)
208
+
209
+ if system_prompt != '':
210
+ if self.system_prompt is None:
211
+ self.system_prompt = system_prompt
212
+ else:
213
+ if system_prompt not in self.system_prompt:
214
+ self.system_prompt += '\n' + system_prompt
215
+ return new_message
216
+
217
+ def generate(self, message, **kwargs1):
218
+ """The main function to generate the answer. Will call `generate_inner` with the preprocessed input messages.
219
+
220
+ Args:
221
+ message: raw input messages.
222
+
223
+ Returns:
224
+ str: The generated answer of the Failed Message if failed to obtain answer.
225
+ """
226
+ if self.check_content(message) == 'listdict':
227
+ message = self.preprocess_message_with_role(message)
228
+
229
+ assert self.check_content(message) in ['str', 'dict', 'liststr', 'listdict'], f'Invalid input type: {message}'
230
+ message = self.preproc_content(message)
231
+ assert message is not None and self.check_content(message) == 'listdict'
232
+ for item in message:
233
+ assert item['type'] in self.allowed_types, f'Invalid input type: {item["type"]}'
234
+
235
+ # merge kwargs
236
+ kwargs = cp.deepcopy(self.default_kwargs)
237
+ kwargs.update(kwargs1)
238
+
239
+ answer = None
240
+ # a very small random delay [0s - 0.5s]
241
+ T = rd.random() * 0.5
242
+ time.sleep(T)
243
+
244
+ for i in range(self.retry):
245
+ try:
246
+ ret_code, answer, log = self.generate_inner(message, **kwargs)
247
+ if ret_code == 0 and self.fail_msg not in answer and answer != '':
248
+ if self.verbose:
249
+ print(answer)
250
+ return answer
251
+ elif self.verbose:
252
+ if not isinstance(log, str):
253
+ try:
254
+ log = log.text
255
+ except Exception as e:
256
+ self.logger.warning(f'Failed to parse {log} as an http response: {str(e)}. ')
257
+ self.logger.info(f'RetCode: {ret_code}\nAnswer: {answer}\nLog: {log}')
258
+ except Exception as err:
259
+ if self.verbose:
260
+ self.logger.error(f'An error occured during try {i}: ')
261
+ self.logger.error(f'{type(err)}: {err}')
262
+ # delay before each retry
263
+ T = rd.random() * self.wait * 2
264
+ time.sleep(T)
265
+
266
+ return self.fail_msg if answer in ['', None] else answer
267
+
268
+ def message_to_promptimg(self, message, dataset=None):
269
+ assert not self.INTERLEAVE
270
+ model_name = self.__class__.__name__
271
+ import warnings
272
+ warnings.warn(
273
+ f'Model {model_name} does not support interleaved input. '
274
+ 'Will use the first image and aggregated texts as prompt. ')
275
+ num_images = len([x for x in message if x['type'] == 'image'])
276
+ if num_images == 0:
277
+ prompt = '\n'.join([x['value'] for x in message if x['type'] == 'text'])
278
+ image = None
279
+ elif num_images == 1:
280
+ prompt = '\n'.join([x['value'] for x in message if x['type'] == 'text'])
281
+ image = [x['value'] for x in message if x['type'] == 'image'][0]
282
+ else:
283
+ prompt = '\n'.join([x['value'] if x['type'] == 'text' else '<image>' for x in message])
284
+ if dataset == 'BLINK':
285
+ image = concat_images_vlmeval(
286
+ [x['value'] for x in message if x['type'] == 'image'],
287
+ target_size=512)
288
+ else:
289
+ image = [x['value'] for x in message if x['type'] == 'image'][0]
290
+ return prompt, image
291
+
292
+ def dump_image(self, line, dataset):
293
+ return self.dump_image_func(line)
294
+
295
+ def set_dump_image(self, dump_image_func):
296
+ self.dump_image_func = dump_image_func
VLMEvalKit-sudoku/vlmeval/api/claude.py ADDED
@@ -0,0 +1,147 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from vlmeval.smp import *
2
+ from vlmeval.api.base import BaseAPI
3
+ from time import sleep
4
+ import base64
5
+ import mimetypes
6
+ from PIL import Image
7
+
8
+ alles_url = 'https://openxlab.org.cn/gw/alles-apin-hub/v1/claude/v1/text/chat'
9
+ alles_headers = {
10
+ 'alles-apin-token': '',
11
+ 'Content-Type': 'application/json'
12
+ }
13
+ official_url = 'https://api.anthropic.com/v1/messages'
14
+ official_headers = {
15
+ 'x-api-key': '',
16
+ 'anthropic-version': '2023-06-01',
17
+ 'content-type': 'application/json'
18
+ }
19
+
20
+
21
+ class Claude_Wrapper(BaseAPI):
22
+
23
+ is_api: bool = True
24
+
25
+ def __init__(self,
26
+ backend: str = 'alles',
27
+ model: str = 'claude-3-opus-20240229',
28
+ key: str = None,
29
+ retry: int = 10,
30
+ timeout: int = 60,
31
+ system_prompt: str = None,
32
+ verbose: bool = True,
33
+ temperature: float = 0,
34
+ max_tokens: int = 2048,
35
+ **kwargs):
36
+
37
+ if os.environ.get('ANTHROPIC_BACKEND', '') == 'official':
38
+ backend = 'official'
39
+
40
+ assert backend in ['alles', 'official'], f'Invalid backend: {backend}'
41
+ self.backend = backend
42
+ self.url = alles_url if backend == 'alles' else official_url
43
+ self.model = model
44
+ self.temperature = temperature
45
+ self.max_tokens = max_tokens
46
+ self.headers = alles_headers if backend == 'alles' else official_headers
47
+ self.timeout = timeout
48
+
49
+ if key is not None:
50
+ self.key = key
51
+ else:
52
+ self.key = os.environ.get('ALLES', '') if self.backend == 'alles' else os.environ.get('ANTHROPIC_API_KEY', '') # noqa: E501
53
+
54
+ if self.backend == 'alles':
55
+ self.headers['alles-apin-token'] = self.key
56
+ else:
57
+ self.headers['x-api-key'] = self.key
58
+
59
+ super().__init__(retry=retry, verbose=verbose, system_prompt=system_prompt, **kwargs)
60
+
61
+ def encode_image_file_to_base64(self, image_path, target_size=-1, fmt='.jpg'):
62
+ image = Image.open(image_path)
63
+ if fmt in ('.jpg', '.jpeg'):
64
+ format = 'JPEG'
65
+ elif fmt == '.png':
66
+ format = 'PNG'
67
+ else:
68
+ print(f'Unsupported image format: {fmt}, will cause media type match error.')
69
+
70
+ return encode_image_to_base64(image, target_size=target_size, fmt=format)
71
+
72
+ # inputs can be a lvl-2 nested list: [content1, content2, content3, ...]
73
+ # content can be a string or a list of image & text
74
+ def prepare_itlist(self, inputs):
75
+ assert np.all([isinstance(x, dict) for x in inputs])
76
+ has_images = np.sum([x['type'] == 'image' for x in inputs])
77
+ if has_images:
78
+ content_list = []
79
+ for msg in inputs:
80
+ if msg['type'] == 'text' and msg['value'] != '':
81
+ content_list.append(dict(type='text', text=msg['value']))
82
+ elif msg['type'] == 'image':
83
+ pth = msg['value']
84
+ suffix = osp.splitext(pth)[-1].lower()
85
+ media_type = mimetypes.types_map.get(suffix, None)
86
+ assert media_type is not None
87
+
88
+ content_list.append(dict(
89
+ type='image',
90
+ source={
91
+ 'type': 'base64',
92
+ 'media_type': media_type,
93
+ 'data': self.encode_image_file_to_base64(pth, target_size=4096, fmt=suffix)
94
+ }))
95
+ else:
96
+ assert all([x['type'] == 'text' for x in inputs])
97
+ text = '\n'.join([x['value'] for x in inputs])
98
+ content_list = [dict(type='text', text=text)]
99
+ return content_list
100
+
101
+ def prepare_inputs(self, inputs):
102
+ input_msgs = []
103
+ assert isinstance(inputs, list) and isinstance(inputs[0], dict)
104
+ assert np.all(['type' in x for x in inputs]) or np.all(['role' in x for x in inputs]), inputs
105
+ if 'role' in inputs[0]:
106
+ assert inputs[-1]['role'] == 'user', inputs[-1]
107
+ for item in inputs:
108
+ input_msgs.append(dict(role=item['role'], content=self.prepare_itlist(item['content'])))
109
+ else:
110
+ input_msgs.append(dict(role='user', content=self.prepare_itlist(inputs)))
111
+ return input_msgs
112
+
113
+ def generate_inner(self, inputs, **kwargs) -> str:
114
+ payload = {
115
+ 'model': self.model,
116
+ 'max_tokens': self.max_tokens,
117
+ 'messages': self.prepare_inputs(inputs),
118
+ **kwargs
119
+ }
120
+ if self.system_prompt is not None:
121
+ payload['system'] = self.system_prompt
122
+
123
+ response = requests.request(
124
+ 'POST', self.url, headers=self.headers, data=json.dumps(payload), timeout=self.timeout * 1.1
125
+ )
126
+ ret_code = response.status_code
127
+ ret_code = 0 if (200 <= int(ret_code) < 300) else ret_code
128
+ answer = self.fail_msg
129
+
130
+ try:
131
+ resp_struct = json.loads(response.text)
132
+ if self.backend == 'alles':
133
+ answer = resp_struct['data']['content'][0]['text'].strip()
134
+ elif self.backend == 'official':
135
+ answer = resp_struct['content'][0]['text'].strip()
136
+ except Exception as err:
137
+ if self.verbose:
138
+ self.logger.error(f'{type(err)}: {err}')
139
+ self.logger.error(response.text if hasattr(response, 'text') else response)
140
+
141
+ return ret_code, answer, response
142
+
143
+
144
+ class Claude3V(Claude_Wrapper):
145
+
146
+ def generate(self, message, dataset=None):
147
+ return super(Claude_Wrapper, self).generate(message)
VLMEvalKit-sudoku/vlmeval/api/hunyuan.py ADDED
@@ -0,0 +1,183 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from vlmeval.smp import *
2
+ import os
3
+ import sys
4
+ from vlmeval.api.base import BaseAPI
5
+ import math
6
+ from vlmeval.dataset import DATASET_TYPE
7
+ from vlmeval.dataset import img_root_map
8
+ from io import BytesIO
9
+ import pandas as pd
10
+ import requests
11
+ import json
12
+ import base64
13
+ import time
14
+
15
+
16
+ class HunyuanWrapper(BaseAPI):
17
+
18
+ is_api: bool = True
19
+ _apiVersion = '2024-12-31'
20
+ _service = 'hunyuan'
21
+
22
+ def __init__(self,
23
+ model: str = 'hunyuan-standard-vision',
24
+ retry: int = 5,
25
+ secret_key: str = None,
26
+ secret_id: str = None,
27
+ verbose: bool = True,
28
+ system_prompt: str = None,
29
+ temperature: float = 0,
30
+ timeout: int = 60,
31
+ api_base: str = 'hunyuan.tencentcloudapi.com',
32
+ **kwargs):
33
+
34
+ self.model = model
35
+ self.cur_idx = 0
36
+ self.fail_msg = 'Failed to obtain answer via API. '
37
+ self.temperature = temperature
38
+
39
+ warnings.warn('You may need to set the env variable HUNYUAN_SECRET_ID & HUNYUAN_SECRET_KEY to use Hunyuan. ')
40
+
41
+ secret_key = os.environ.get('HUNYUAN_SECRET_KEY', secret_key)
42
+ assert secret_key is not None, 'Please set the environment variable HUNYUAN_SECRET_KEY. '
43
+ secret_id = os.environ.get('HUNYUAN_SECRET_ID', secret_id)
44
+ assert secret_id is not None, 'Please set the environment variable HUNYUAN_SECRET_ID. '
45
+
46
+ self.model = model
47
+ self.endpoint = api_base
48
+ self.secret_id = secret_id
49
+ self.secret_key = secret_key
50
+ self.timeout = timeout
51
+
52
+ try:
53
+ from tencentcloud.common import credential
54
+ from tencentcloud.common.profile.client_profile import ClientProfile
55
+ from tencentcloud.common.profile.http_profile import HttpProfile
56
+ from tencentcloud.hunyuan.v20230901 import hunyuan_client
57
+ except ImportError as err:
58
+ self.logger.critical('Please install tencentcloud-sdk-python to use Hunyuan API. ')
59
+ raise err
60
+
61
+ super().__init__(retry=retry, system_prompt=system_prompt, verbose=verbose, **kwargs)
62
+
63
+ cred = credential.Credential(self.secret_id, self.secret_key)
64
+ httpProfile = HttpProfile(reqTimeout=300)
65
+ httpProfile.endpoint = self.endpoint
66
+ clientProfile = ClientProfile()
67
+ clientProfile.httpProfile = httpProfile
68
+ self.client = hunyuan_client.HunyuanClient(cred, '', clientProfile)
69
+ self.logger.info(
70
+ f'Using Endpoint: {self.endpoint}; API Secret ID: {self.secret_id}; API Secret Key: {self.secret_key}'
71
+ )
72
+
73
+ def use_custom_prompt(self, dataset_name):
74
+ if DATASET_TYPE(dataset_name) == 'MCQ':
75
+ return True
76
+ else:
77
+ return False
78
+
79
+ def build_prompt(self, line, dataset=None):
80
+ assert self.use_custom_prompt(dataset)
81
+ assert dataset is None or isinstance(dataset, str)
82
+
83
+ tgt_path = self.dump_image(line, dataset)
84
+
85
+ question = line['question']
86
+ options = {
87
+ cand: line[cand]
88
+ for cand in string.ascii_uppercase
89
+ if cand in line and not pd.isna(line[cand])
90
+ }
91
+ options_prompt = 'Options:\n'
92
+ for key, item in options.items():
93
+ options_prompt += f'{key}. {item}\n'
94
+ hint = line['hint'] if ('hint' in line and not pd.isna(line['hint'])) else None
95
+ prompt = ''
96
+ if hint is not None:
97
+ prompt += f'Hint: {hint}\n'
98
+ prompt += f'Question: {question}\n'
99
+ if len(options):
100
+ prompt += options_prompt
101
+ prompt += 'Answer with the option letter from the given choices directly.'
102
+
103
+ msgs = []
104
+ if isinstance(tgt_path, list):
105
+ msgs.extend([dict(type='image', value=p) for p in tgt_path])
106
+ else:
107
+ msgs = [dict(type='image', value=tgt_path)]
108
+ msgs.append(dict(type='text', value=prompt))
109
+ return msgs
110
+
111
+ # inputs can be a lvl-2 nested list: [content1, content2, content3, ...]
112
+ # content can be a string or a list of image & text
113
+ def prepare_itlist(self, inputs):
114
+ assert np.all([isinstance(x, dict) for x in inputs])
115
+ has_images = np.sum([x['type'] == 'image' for x in inputs])
116
+ if has_images:
117
+ content_list = []
118
+ for msg in inputs:
119
+ if msg['type'] == 'text':
120
+ content_list.append(dict(Type='text', Text=msg['value']))
121
+ elif msg['type'] == 'image':
122
+ from PIL import Image
123
+ img = Image.open(msg['value'])
124
+ b64 = encode_image_to_base64(img)
125
+ img_struct = dict(Url=f'data:image/jpeg;base64,{b64}')
126
+ content_list.append(dict(Type='image_url', ImageUrl=img_struct))
127
+ else:
128
+ assert all([x['type'] == 'text' for x in inputs])
129
+ text = '\n'.join([x['value'] for x in inputs])
130
+ content_list = [dict(Type='text', Text=text)]
131
+ return content_list
132
+
133
+ def prepare_inputs(self, inputs):
134
+ input_msgs = []
135
+ if self.system_prompt is not None:
136
+ input_msgs.append(dict(Role='system', Content=self.system_prompt))
137
+ assert isinstance(inputs, list) and isinstance(inputs[0], dict)
138
+ assert np.all(['type' in x for x in inputs]) or np.all(['role' in x for x in inputs]), inputs
139
+ if 'role' in inputs[0]:
140
+ assert inputs[-1]['role'] == 'user', inputs[-1]
141
+ for item in inputs:
142
+ input_msgs.append(dict(Role=item['role'], Contents=self.prepare_itlist(item['content'])))
143
+ else:
144
+ input_msgs.append(dict(Role='user', Contents=self.prepare_itlist(inputs)))
145
+ return input_msgs
146
+
147
+ def generate_inner(self, inputs, **kwargs) -> str:
148
+ from tencentcloud.common.exception.tencent_cloud_sdk_exception import TencentCloudSDKException
149
+ from tencentcloud.hunyuan.v20230901 import models
150
+
151
+ input_msgs = self.prepare_inputs(inputs)
152
+ temperature = kwargs.pop('temperature', self.temperature)
153
+
154
+ payload = dict(
155
+ Model=self.model,
156
+ Messages=input_msgs,
157
+ Temperature=temperature,
158
+ TopK=1,
159
+ **kwargs)
160
+
161
+ try:
162
+ req = models.ChatCompletionsRequest()
163
+ req.from_json_string(json.dumps(payload))
164
+ resp = self.client.ChatCompletions(req)
165
+ resp = json.loads(resp.to_json_string())
166
+ answer = resp['Choices'][0]['Message']['Content']
167
+ return 0, answer, resp
168
+ except TencentCloudSDKException as e:
169
+ self.logger.error(f'Got error code: {e.get_code()}')
170
+ if e.get_code() == 'ClientNetworkError':
171
+ return -1, self.fail_msg + e.get_code(), None
172
+ elif e.get_code() in ['InternalError', 'ServerNetworkError']:
173
+ return -1, self.fail_msg + e.get_code(), None
174
+ elif e.get_code() in ['LimitExceeded']:
175
+ return -1, self.fail_msg + e.get_code(), None
176
+ else:
177
+ return -1, self.fail_msg + str(e), None
178
+
179
+
180
+ class HunyuanVision(HunyuanWrapper):
181
+
182
+ def generate(self, message, dataset=None):
183
+ return super(HunyuanVision, self).generate(message)
VLMEvalKit-sudoku/vlmeval/api/qwen_api.py ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from http import HTTPStatus
2
+ import os
3
+ from vlmeval.api.base import BaseAPI
4
+ from vlmeval.smp import *
5
+
6
+
7
+ # Note: This is a pure language model API.
8
+ class QwenAPI(BaseAPI):
9
+
10
+ is_api: bool = True
11
+
12
+ def __init__(self,
13
+ model: str = 'qwen-max-1201',
14
+ retry: int = 5,
15
+ verbose: bool = True,
16
+ seed: int = 2680,
17
+ temperature: float = 0.0,
18
+ system_prompt: str = None,
19
+ key: str = None,
20
+ max_tokens: int = 2048,
21
+ proxy: str = None,
22
+ **kwargs):
23
+
24
+ assert model in ['qwen-turbo', 'qwen-plus', 'qwen-max', 'qwen-max-1201', 'qwen-max-longcontext']
25
+ self.model = model
26
+ import dashscope
27
+ self.fail_msg = 'Failed to obtain answer via API. '
28
+ self.max_tokens = max_tokens
29
+ self.temperature = temperature
30
+ self.seed = seed
31
+ if key is None:
32
+ key = os.environ.get('DASHSCOPE_API_KEY', None)
33
+ assert key is not None, (
34
+ 'Please set the API Key (obtain it here: '
35
+ 'https://help.aliyun.com/zh/dashscope/developer-reference/vl-plus-quick-start)'
36
+ )
37
+ dashscope.api_key = key
38
+ if proxy is not None:
39
+ proxy_set(proxy)
40
+ super().__init__(retry=retry, system_prompt=system_prompt, verbose=verbose, **kwargs)
41
+
42
+ @staticmethod
43
+ def build_msgs(msgs_raw, system_prompt=None):
44
+ msgs = cp.deepcopy(msgs_raw)
45
+ ret = []
46
+ if system_prompt is not None:
47
+ ret.append(dict(role='system', content=system_prompt))
48
+ for i, msg in enumerate(msgs):
49
+ role = 'user' if i % 2 == 0 else 'assistant'
50
+ ret.append(dict(role=role, content=msg))
51
+ return ret
52
+
53
+ def generate_inner(self, inputs, **kwargs) -> str:
54
+ from dashscope import MultiModalConversation
55
+ assert isinstance(inputs, str) or isinstance(inputs, list)
56
+ inputs = [inputs] if isinstance(inputs, str) else inputs
57
+ messages = self.build_msgs(msgs_raw=inputs, system_prompt=self.system_prompt)
58
+
59
+ import dashscope
60
+ response = dashscope.Generation.call(
61
+ model=self.model,
62
+ messages=messages,
63
+ seed=self.seed,
64
+ temperature=self.temperature,
65
+ max_tokens=self.max_tokens,
66
+ result_format='message', # set the result to be "message" format.
67
+ )
68
+ if response.status_code != HTTPStatus.OK:
69
+ return -1, 'Error: Bad Response Statuse Code. ', f'The response status code is {response.status_code}. '
70
+
71
+ try:
72
+ return 0, response['output']['choices'][0]['message']['content'].strip(), 'Succeeded! '
73
+ except Exception as err:
74
+ return -1, f'Error: Failed to parse the response. {err}', response
VLMEvalKit-sudoku/vlmeval/api/reka.py ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from vlmeval.smp import *
2
+ from vlmeval.api.base import BaseAPI
3
+ from time import sleep
4
+ import mimetypes
5
+
6
+
7
+ class Reka_Wrapper(BaseAPI):
8
+
9
+ is_api: bool = True
10
+ INTERLEAVE: bool = False
11
+
12
+ def __init__(self,
13
+ model: str = 'reka-flash-20240226',
14
+ key: str = None,
15
+ retry: int = 10,
16
+ system_prompt: str = None,
17
+ verbose: bool = True,
18
+ temperature: float = 0,
19
+ max_tokens: int = 1024,
20
+ **kwargs):
21
+
22
+ try:
23
+ import reka
24
+ except ImportError:
25
+ raise ImportError('Please install reka by running "pip install reka-api"')
26
+
27
+ self.model = model
28
+ default_kwargs = dict(temperature=temperature, request_output_len=max_tokens)
29
+ default_kwargs.update(kwargs)
30
+ self.kwargs = default_kwargs
31
+ if key is not None:
32
+ self.key = key
33
+ else:
34
+ self.key = os.environ.get('REKA_API_KEY', '')
35
+ super().__init__(retry=retry, verbose=verbose, system_prompt=system_prompt, **kwargs)
36
+
37
+ def generate_inner(self, inputs, **kwargs) -> str:
38
+ import reka
39
+ reka.API_KEY = self.key
40
+ dataset = kwargs.pop('dataset', None)
41
+ prompt, image_path = self.message_to_promptimg(inputs, dataset=dataset)
42
+ image_b64 = encode_image_file_to_base64(image_path)
43
+
44
+ response = reka.chat(
45
+ model_name=self.model,
46
+ human=prompt,
47
+ media_url=f'data:image/jpeg;base64,{image_b64}',
48
+ **self.kwargs)
49
+
50
+ try:
51
+ return 0, response['text'], response
52
+ except Exception as err:
53
+ return -1, self.fail_msg + str(err), response
54
+
55
+
56
+ class Reka(Reka_Wrapper):
57
+
58
+ def generate(self, message, dataset=None):
59
+ return super(Reka_Wrapper, self).generate(message)
VLMEvalKit-sudoku/vlmeval/dataset/__init__.py ADDED
@@ -0,0 +1,317 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import warnings
2
+
3
+ from .image_base import img_root_map, ImageBaseDataset
4
+ from .image_caption import ImageCaptionDataset
5
+ from .image_yorn import ImageYORNDataset
6
+ from .image_mcq import (
7
+ ImageMCQDataset, MMMUDataset, CustomMCQDataset, MUIRDataset, GMAIMMBenchDataset, MMERealWorld, HRBenchDataset,
8
+ NaturalBenchDataset, WeMath, MMMUProDataset, VMCBenchDataset, MedXpertQA_MM_test, LEGO, VisuLogic, CVBench, TDBench,
9
+ MicroBench, OmniMedVQA, MSEarthMCQ, VLMBlind, SCAM, _3DSRBench, AffordanceDataset, OmniEarthMCQBench, XLRSBench,
10
+ TreeBench, CVQA, TopViewRS, ShapeGrid
11
+ )
12
+ from .image_mt import MMDUDataset
13
+ from .image_vqa import (
14
+ ImageVQADataset, MathVision, OCRBench, MathVista, LLaVABench, LLaVABench_KO, VGRPBench, MMVet, MTVQADataset,
15
+ TableVQABench, CustomVQADataset, CRPE, MathVerse, OlympiadBench, SeePhys, QSpatial, VizWiz, MMNIAH, LogicVista,
16
+ MME_CoT, MMSci_Captioning, Physics_yale, TDBenchGrounding, WildDocBenchmark, OCR_Reasoning, PhyX, CountBenchQA,
17
+ ZEROBench, Omni3DBench, TallyQA, MMEReasoning, MMVMBench, BMMR, OCRBench_v2, AyaVisionBench
18
+ )
19
+
20
+ from .image_ccocr import CCOCRDataset
21
+ from .image_shortqa import ImageShortQADataset, PathVQA_VAL, PathVQA_TEST
22
+ from .text_mcq import CustomTextMCQDataset, TextMCQDataset
23
+
24
+ from .vcr import VCRDataset
25
+ from .mmlongbench import MMLongBench
26
+ from .dude import DUDE
27
+ from .slidevqa import SlideVQA
28
+ from .vl_rewardbench import VLRewardBench
29
+ from .vlm2bench import VLM2Bench
30
+ from .vlmbias import VLMBias
31
+ from .spatial457 import Spatial457
32
+ from .charxiv import CharXiv
33
+
34
+ from .mmbench_video import MMBenchVideo
35
+ from .videomme import VideoMME
36
+ from .video_holmes import Video_Holmes
37
+ from .mvbench import MVBench, MVBench_MP4
38
+ from .tamperbench import MVTamperBench
39
+ from .miabench import MIABench
40
+ from .mlvu import MLVU, MLVU_MCQ, MLVU_OpenEnded
41
+ from .tempcompass import TempCompass, TempCompass_Captioning, TempCompass_MCQ, TempCompass_YorN
42
+ from .longvideobench import LongVideoBench
43
+ from .video_concat_dataset import ConcatVideoDataset
44
+ from .mmgenbench import MMGenBench
45
+ from .cgbench import CGBench_MCQ_Grounding_Mini, CGBench_OpenEnded_Mini, CGBench_MCQ_Grounding, CGBench_OpenEnded
46
+ from .CGAVCounting.cg_av_counting import CGAVCounting
47
+
48
+ from .megabench import MEGABench
49
+ from .moviechat1k import MovieChat1k
50
+ from .video_mmlu import Video_MMLU_CAP, Video_MMLU_QA
51
+ from .vdc import VDC
52
+ from .vcrbench import VCRBench
53
+ from .gobench import GOBenchDataset
54
+ from .sfebench import SFE
55
+ from .visfactor import VisFactor
56
+ from .ost_bench import OSTDataset
57
+
58
+ from .EgoExoBench.egoexobench import EgoExoBench_MCQ
59
+
60
+ from .worldsense import WorldSense
61
+ from .qbench_video import QBench_Video, QBench_Video_MCQ, QBench_Video_VQA
62
+
63
+ from .cmmmu import CMMMU
64
+ from .emma import EMMADataset
65
+ from .wildvision import WildVision
66
+ from .mmmath import MMMath
67
+ from .dynamath import Dynamath
68
+ from .creation import CreationMMBenchDataset
69
+ from .mmalignbench import MMAlignBench
70
+ from .utils import *
71
+ from .video_dataset_config import *
72
+ from ..smp import *
73
+ from .OmniDocBench.omnidocbench import OmniDocBench
74
+ from .moat import MOAT
75
+ from .GUI.screenspot import ScreenSpot
76
+ from .GUI.screenspot_v2 import ScreenSpotV2
77
+ from .GUI.screenspot_pro import ScreenSpot_Pro
78
+ from .mmifeval import MMIFEval
79
+ from .chartmimic import ChartMimic
80
+ from .m4bench import M4Bench
81
+
82
+
83
+ class ConcatDataset(ImageBaseDataset):
84
+ # This dataset takes multiple dataset names as input and aggregate them into a single dataset.
85
+ # Each single dataset should not have a field named `SUB_DATASET`
86
+
87
+ DATASET_SETS = {
88
+ 'MMMB': ['MMMB_ar', 'MMMB_cn', 'MMMB_en', 'MMMB_pt', 'MMMB_ru', 'MMMB_tr'],
89
+ 'MTL_MMBench_DEV': [
90
+ 'MMBench_dev_ar', 'MMBench_dev_cn', 'MMBench_dev_en',
91
+ 'MMBench_dev_pt', 'MMBench_dev_ru', 'MMBench_dev_tr'
92
+ ],
93
+ 'ScreenSpot_Pro': [
94
+ 'ScreenSpot_Pro_Development', 'ScreenSpot_Pro_Creative', 'ScreenSpot_Pro_CAD',
95
+ 'ScreenSpot_Pro_Scientific', 'ScreenSpot_Pro_Office', 'ScreenSpot_Pro_OS'
96
+ ],
97
+ 'ScreenSpot': ['ScreenSpot_Mobile', 'ScreenSpot_Desktop', 'ScreenSpot_Web'],
98
+ 'ScreenSpot_v2': ['ScreenSpot_v2_Mobile', 'ScreenSpot_v2_Desktop', 'ScreenSpot_v2_Web'],
99
+ 'M4Bench': ['State_Invariance', 'State_Comparison', 'Spatial_Perception', 'Instance_Comparison', 'Detailed_Difference'], # noqa: E501
100
+ }
101
+
102
+ def __init__(self, dataset):
103
+ datasets = self.DATASET_SETS[dataset]
104
+ self.dataset_map = {}
105
+ # The name of the compliation
106
+ self.dataset_name = dataset
107
+ self.datasets = datasets
108
+ for dname in datasets:
109
+ dataset = build_dataset(dname)
110
+ assert dataset is not None, dataset
111
+ self.dataset_map[dname] = dataset
112
+ TYPES = [x.TYPE for x in self.dataset_map.values()]
113
+ MODALITIES = [x.MODALITY for x in self.dataset_map.values()]
114
+ assert np.all([x == TYPES[0] for x in TYPES]), (datasets, TYPES)
115
+ assert np.all([x == MODALITIES[0] for x in MODALITIES]), (datasets, MODALITIES)
116
+ self.TYPE = TYPES[0]
117
+ self.MODALITY = MODALITIES[0]
118
+ data_all = []
119
+ for dname in datasets:
120
+ data = self.dataset_map[dname].data
121
+ data['SUB_DATASET'] = [dname] * len(data)
122
+ if 'image' in data:
123
+ data_new = localize_df(data, dname, nproc=16)
124
+ data_all.append(data_new)
125
+ else:
126
+ data_all.append(data)
127
+
128
+ data = pd.concat(data_all)
129
+ data['original_index'] = data.pop('index')
130
+ data['index'] = np.arange(len(data))
131
+ self.data = data
132
+
133
+ def build_prompt(self, line):
134
+ if isinstance(line, int):
135
+ line = self.data.iloc[line]
136
+ idx = line['original_index']
137
+ dname = line['SUB_DATASET']
138
+ org_data = self.dataset_map[dname].data
139
+ org_line = cp.deepcopy(org_data[org_data['index'] == idx]).iloc[0]
140
+ return self.dataset_map[dname].build_prompt(org_line)
141
+
142
+ def dump_image(self, line):
143
+ # Assert all images are pre-dumped
144
+ assert 'image' not in line
145
+ assert 'image_path' in line
146
+ tgt_path = toliststr(line['image_path'])
147
+ return tgt_path
148
+
149
+ @classmethod
150
+ def supported_datasets(cls):
151
+ return list(cls.DATASET_SETS)
152
+
153
+ def evaluate(self, eval_file, **judge_kwargs):
154
+ # First, split the eval_file by dataset
155
+ data_all = load(eval_file)
156
+ for dname in self.datasets:
157
+ tgt = eval_file.replace(self.dataset_name, dname)
158
+ data_sub = data_all[data_all['SUB_DATASET'] == dname]
159
+ data_sub.pop('index')
160
+ data_sub['index'] = data_sub.pop('original_index')
161
+ data_sub.pop('SUB_DATASET')
162
+ dump(data_sub, tgt)
163
+ # Then, evaluate each dataset separately
164
+ df_all = []
165
+ dict_all = {}
166
+ # One of the vars will be used to aggregate results
167
+ for dname in self.datasets:
168
+ tgt = eval_file.replace(self.dataset_name, dname)
169
+ res = self.dataset_map[dname].evaluate(tgt, **judge_kwargs)
170
+ if isinstance(res, pd.DataFrame):
171
+ res['DATASET'] = [dname] * len(res)
172
+ df_all.append(res)
173
+ elif isinstance(res, dict):
174
+ res = {f'{dname}:{k}': v for k, v in res.items()}
175
+ dict_all.update(res)
176
+ else:
177
+ raise NotImplementedError(f'Unknown result type {type(res)}')
178
+
179
+ if len(df_all):
180
+ result = pd.concat(df_all)
181
+ score_file = get_intermediate_file_path(eval_file, '_acc', 'csv')
182
+ dump(result, score_file)
183
+ return result
184
+ else:
185
+ score_file = get_intermediate_file_path(eval_file, '_score', 'json')
186
+ dump(dict_all, score_file)
187
+ return dict_all
188
+
189
+
190
+ # Add new supported dataset class here
191
+ IMAGE_DATASET = [
192
+ ImageCaptionDataset, ImageYORNDataset, ImageMCQDataset, ImageVQADataset,
193
+ MathVision, MMMUDataset, OCRBench, MathVista, LLaVABench, LLaVABench_KO, VGRPBench, MMVet,
194
+ MTVQADataset, TableVQABench, MMLongBench, VCRDataset, MMDUDataset, DUDE,
195
+ SlideVQA, MUIRDataset, CCOCRDataset, GMAIMMBenchDataset, MMERealWorld,
196
+ HRBenchDataset, CRPE, MathVerse, NaturalBenchDataset, MIABench,
197
+ OlympiadBench, SeePhys,WildVision, MMMath, QSpatial, Dynamath, MMGenBench, VizWiz,
198
+ MMNIAH, CMMMU, VLRewardBench, WeMath, LogicVista, MMMUProDataset,
199
+ CreationMMBenchDataset, ImageShortQADataset, MMAlignBench, OmniDocBench,
200
+ VLM2Bench, VMCBenchDataset, EMMADataset, MME_CoT, MOAT, MedXpertQA_MM_test,
201
+ LEGO, MMSci_Captioning, Physics_yale, ScreenSpot_Pro, ScreenSpot,
202
+ ScreenSpotV2, MMIFEval, Spatial457, VisuLogic, CVBench, PathVQA_VAL,
203
+ PathVQA_TEST, TDBench, TDBenchGrounding, MicroBench, CharXiv, OmniMedVQA,
204
+ WildDocBenchmark, MSEarthMCQ, OCR_Reasoning, PhyX, VLMBlind, CountBenchQA,
205
+ ZEROBench, SCAM, Omni3DBench, TallyQA, _3DSRBench, BMMR, AffordanceDataset,
206
+ MMEReasoning, GOBenchDataset, SFE, ChartMimic, MMVMBench, XLRSBench,
207
+ OmniEarthMCQBench, VisFactor, OSTDataset, OCRBench_v2, TreeBench, CVQA, M4Bench,
208
+ AyaVisionBench, TopViewRS, VLMBias, ShapeGrid
209
+ ]
210
+
211
+ VIDEO_DATASET = [
212
+ MMBenchVideo, VideoMME, MVBench, MVBench_MP4, MVTamperBench,
213
+ LongVideoBench, WorldSense, VDC, MovieChat1k, MEGABench,
214
+ MLVU, MLVU_MCQ, MLVU_OpenEnded,
215
+ TempCompass, TempCompass_MCQ, TempCompass_Captioning, TempCompass_YorN,
216
+ CGBench_MCQ_Grounding_Mini, CGBench_OpenEnded_Mini, CGBench_MCQ_Grounding, CGBench_OpenEnded,
217
+ QBench_Video, QBench_Video_MCQ, QBench_Video_VQA,
218
+ Video_MMLU_CAP, Video_MMLU_QA,
219
+ Video_Holmes, VCRBench, CGAVCounting,
220
+ EgoExoBench_MCQ,
221
+ ]
222
+
223
+ TEXT_DATASET = [
224
+ TextMCQDataset
225
+ ]
226
+
227
+ CUSTOM_DATASET = [
228
+ CustomMCQDataset, CustomVQADataset, CustomTextMCQDataset
229
+ ]
230
+
231
+ DATASET_COLLECTION = [ConcatDataset, ConcatVideoDataset]
232
+
233
+ DATASET_CLASSES = IMAGE_DATASET + VIDEO_DATASET + TEXT_DATASET + CUSTOM_DATASET + DATASET_COLLECTION # noqa: E501
234
+ SUPPORTED_DATASETS = []
235
+ for DATASET_CLS in DATASET_CLASSES:
236
+ SUPPORTED_DATASETS.extend(DATASET_CLS.supported_datasets())
237
+
238
+
239
+ def DATASET_TYPE(dataset, *, default: str = 'MCQ') -> str:
240
+ for cls in DATASET_CLASSES:
241
+ if dataset in cls.supported_datasets():
242
+ if hasattr(cls, 'TYPE'):
243
+ return cls.TYPE
244
+ # Have to add specific routine to handle ConcatDataset
245
+ if dataset in ConcatDataset.DATASET_SETS:
246
+ dataset_list = ConcatDataset.DATASET_SETS[dataset]
247
+ TYPES = [DATASET_TYPE(dname) for dname in dataset_list]
248
+ assert np.all([x == TYPES[0] for x in TYPES]), (dataset_list, TYPES)
249
+ return TYPES[0]
250
+
251
+ if 'openended' in dataset.lower():
252
+ return 'VQA'
253
+ warnings.warn(f'Dataset {dataset} is a custom one and not annotated as `openended`, will treat as {default}. ') # noqa: E501
254
+ return default
255
+
256
+
257
+ def DATASET_MODALITY(dataset, *, default: str = 'IMAGE') -> str:
258
+ if dataset is None:
259
+ warnings.warn(f'Dataset is not specified, will treat modality as {default}. ')
260
+ return default
261
+ for cls in DATASET_CLASSES:
262
+ if dataset in cls.supported_datasets():
263
+ if hasattr(cls, 'MODALITY'):
264
+ return cls.MODALITY
265
+ # Have to add specific routine to handle ConcatDataset
266
+ if dataset in ConcatDataset.DATASET_SETS:
267
+ dataset_list = ConcatDataset.DATASET_SETS[dataset]
268
+ MODALITIES = [DATASET_MODALITY(dname) for dname in dataset_list]
269
+ assert np.all([x == MODALITIES[0] for x in MODALITIES]), (dataset_list, MODALITIES)
270
+ return MODALITIES[0]
271
+
272
+ if 'VIDEO' in dataset.lower():
273
+ return 'VIDEO'
274
+ elif 'IMAGE' in dataset.lower():
275
+ return 'IMAGE'
276
+ warnings.warn(f'Dataset {dataset} is a custom one, will treat modality as {default}. ')
277
+ return default
278
+
279
+
280
+ def build_dataset(dataset_name, **kwargs):
281
+ for cls in DATASET_CLASSES:
282
+ if dataset_name in supported_video_datasets:
283
+ return supported_video_datasets[dataset_name](**kwargs)
284
+ elif dataset_name in cls.supported_datasets():
285
+ return cls(dataset=dataset_name, **kwargs)
286
+
287
+ warnings.warn(f'Dataset {dataset_name} is not officially supported. ')
288
+ data_file = osp.join(LMUDataRoot(), f'{dataset_name}.tsv')
289
+ if not osp.exists(data_file):
290
+ warnings.warn(f'Data file {data_file} does not exist. Dataset building failed. ')
291
+ return None
292
+
293
+ data = load(data_file)
294
+ if 'question' not in [x.lower() for x in data.columns]:
295
+ warnings.warn(f'Data file {data_file} does not have a `question` column. Dataset building failed. ')
296
+ return None
297
+
298
+ if 'A' in data and 'B' in data:
299
+ if 'image' in data or 'image_path' in data:
300
+ warnings.warn(f'Will assume unsupported dataset {dataset_name} as a Custom MCQ dataset. ')
301
+ return CustomMCQDataset(dataset=dataset_name, **kwargs)
302
+ else:
303
+ warnings.warn(f'Will assume unsupported dataset {dataset_name} as a Custom Text MCQ dataset. ')
304
+ return CustomTextMCQDataset(dataset=dataset_name, **kwargs)
305
+ else:
306
+ warnings.warn(f'Will assume unsupported dataset {dataset_name} as a Custom VQA dataset. ')
307
+ return CustomVQADataset(dataset=dataset_name, **kwargs)
308
+
309
+
310
+ def infer_dataset_basename(dataset_name):
311
+ basename = "_".join(dataset_name.split("_")[:-1])
312
+ return basename
313
+
314
+
315
+ __all__ = [
316
+ 'build_dataset', 'img_root_map', 'build_judge', 'extract_answer_from_item', 'prefetch_answer', 'DEBUG_MESSAGE'
317
+ ] + [cls.__name__ for cls in DATASET_CLASSES]
VLMEvalKit-sudoku/vlmeval/dataset/__pycache__/chartmimic.cpython-310.pyc ADDED
Binary file (20.5 kB). View file
 
VLMEvalKit-sudoku/vlmeval/dataset/__pycache__/m4bench.cpython-310.pyc ADDED
Binary file (5.66 kB). View file
 
VLMEvalKit-sudoku/vlmeval/dataset/__pycache__/miabench.cpython-310.pyc ADDED
Binary file (5.67 kB). View file
 
VLMEvalKit-sudoku/vlmeval/dataset/__pycache__/ost_bench.cpython-310.pyc ADDED
Binary file (10.8 kB). View file
 
VLMEvalKit-sudoku/vlmeval/dataset/__pycache__/qbench_video.cpython-310.pyc ADDED
Binary file (12.6 kB). View file
 
VLMEvalKit-sudoku/vlmeval/dataset/__pycache__/video_dataset_config.cpython-310.pyc ADDED
Binary file (5.02 kB). View file
 
VLMEvalKit-sudoku/vlmeval/dataset/emma.py ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from vlmeval import *
2
+ from .image_shortqa import ImageShortQADataset
3
+ from .image_mcq import MMMUDataset
4
+
5
+
6
+ class EMMADataset(ImageShortQADataset):
7
+
8
+ COT_INST = "Please solve the problem step by step. "
9
+ DIRECT_INST = "Please ensure that your output only contains the final answer without any additional content (such as intermediate reasoning steps)." # noqa: E501
10
+ MCQ_FMT = "{context}\n\n{question}\n\n{options}\n\nAnswer with the option's letter from the given choices. "
11
+ OPEN_FMT = "{context}\n\n{question}\n\nAnswer the question using a single word or phrase. "
12
+
13
+ DATASET_URL = {
14
+ 'EMMA': 'https://opencompass.openxlab.space/utils/VLMEval/EMMA.tsv',
15
+ 'EMMA_COT': 'https://opencompass.openxlab.space/utils/VLMEval/EMMA.tsv'
16
+ }
17
+
18
+ def build_prompt(self, line):
19
+ if isinstance(line, int):
20
+ line = self.data.iloc[line]
21
+
22
+ if self.meta_only:
23
+ tgt_path = toliststr(line['image_path'])
24
+ else:
25
+ tgt_path = self.dump_image(line)
26
+
27
+ context = line['context']
28
+ question = line['question']
29
+ example = ""
30
+ _ = {}
31
+ if line['type'] == 'MCQ':
32
+ for ch in string.ascii_uppercase:
33
+ if ch in line and not pd.isna(line[ch]):
34
+ example += f"{ch}: {line[ch]}\n"
35
+
36
+ prompt_tmpl = EMMADataset.MCQ_FMT
37
+ if not pd.isna(context) and context is not None:
38
+ prompt = prompt_tmpl.format(context=context, question=question, options=example)
39
+ else:
40
+ prompt = prompt_tmpl.split('{context}\n\n')[1].format(question=question, options=example)
41
+ prompt += EMMADataset.COT_INST if 'COT' in self.dataset_name else EMMADataset.DIRECT_INST
42
+ else:
43
+ prompt_tmpl = EMMADataset.OPEN_FMT
44
+ if not pd.isna(context) and context is not None:
45
+ prompt = prompt_tmpl.format(context=context, question=question)
46
+ else:
47
+ prompt = prompt_tmpl.split('{context}\n\n')[1].format(question=question)
48
+ prompt += EMMADataset.COT_INST if 'COT' in self.dataset_name else EMMADataset.DIRECT_INST
49
+
50
+ msgs = []
51
+ if isinstance(tgt_path, list):
52
+ msgs.extend([dict(type='image', value=p) for p in tgt_path])
53
+ else:
54
+ msgs = [dict(type='image', value=tgt_path)]
55
+ msgs.append(dict(type='text', value=prompt))
56
+ return MMMUDataset.split_MMMU(msgs)
VLMEvalKit-sudoku/vlmeval/dataset/image_mt.py ADDED
@@ -0,0 +1,128 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .image_base import ImageBaseDataset
2
+ from .utils.judge_util import build_judge
3
+ from ..smp import *
4
+ from ..smp.file import get_intermediate_file_path
5
+ from ..utils import track_progress_rich
6
+
7
+
8
+ class ImageMTDataset(ImageBaseDataset):
9
+
10
+ TYPE = 'MT'
11
+
12
+ def build_prompt(self, line):
13
+ if isinstance(line, int):
14
+ line = self.data.iloc[line]
15
+
16
+ if self.meta_only:
17
+ tgt_path = toliststr(line['image_path'])
18
+ else:
19
+ tgt_path = self.dump_image(line)
20
+
21
+ questions = toliststr(line['question'])
22
+ if 'answer' in line:
23
+ answers = toliststr(line['answer'])
24
+ else:
25
+ answers = [''] * len(questions)
26
+ assert len(questions) == len(answers)
27
+
28
+ dlgs, pics_number = [], 0
29
+ for i in range(len(questions)):
30
+ q, a = questions[i], answers[i]
31
+ if '<ImageHere>' in q:
32
+ content = []
33
+ tag_number = q.count('<ImageHere>')
34
+ images = tgt_path[pics_number: pics_number + tag_number]
35
+ pics_number += tag_number
36
+ q_split = q.split('<ImageHere>')
37
+ for i in range(tag_number):
38
+ qsp, im = q_split[i], images[i]
39
+ if qsp != '':
40
+ content.append(dict(type='text', value=qsp))
41
+ content.append(dict(type='image', value=im))
42
+ if q_split[-1] != '':
43
+ content.append(dict(type='text', value=q_split[-1]))
44
+ else:
45
+ content = [dict(type='text', value=q)]
46
+ dlgs.append(dict(role='user', content=content))
47
+ assert '<ImageHere>' not in a, 'We currently do not support images in the answer. '
48
+ content = [dict(type='text', value=a)]
49
+ dlgs.append(dict(role='assistant', content=content))
50
+ return dlgs
51
+
52
+
53
+ class MMDUDataset(ImageMTDataset):
54
+
55
+ DATASET_URL = {'MMDU': 'https://opencompass.openxlab.space/utils/VLMEval/MMDU.tsv'}
56
+ DATASET_MD5 = {'MMDU': '848b635a88a078f49aebcc6e39792061'}
57
+ DIMS = [
58
+ 'Creativity', 'Richness', 'Visual Perception', 'Logical Coherence',
59
+ 'Answer Accuracy', 'Image Relationship Understanding', 'Overall Score'
60
+ ]
61
+
62
+ def calculat_metric(self, ans):
63
+ all = defaultdict(lambda: 0)
64
+ tot = defaultdict(lambda: 0)
65
+ valid = defaultdict(lambda: 0)
66
+ for k in ans:
67
+ res = ans[k]['res']
68
+ assert isinstance(res, pd.DataFrame)
69
+ lt = len(res)
70
+ for i in range(lt):
71
+ line = res.iloc[i]
72
+ for k in self.DIMS:
73
+ tot[k] += 1
74
+ if k in line and line[k] is not None:
75
+ try:
76
+ score = int(line[k])
77
+ score = np.clip(score, 0, 10)
78
+ all[k] += score
79
+ valid[k] += 1
80
+ except Exception as e:
81
+ print(f'Failed to parse the score: {str(e)}')
82
+ sp1 = {'set': 'all'}
83
+ sp1.update({k: all[k] / tot[k] * 10 for k in self.DIMS})
84
+ sp2 = {'set': 'valid'}
85
+ sp2.update({k: all[k] / valid[k] * 10 for k in self.DIMS})
86
+
87
+ return pd.DataFrame([sp1, sp2])
88
+
89
+ def evaluate(self, eval_file, **judge_kwargs):
90
+ model = judge_kwargs['model']
91
+
92
+ tmp_file = get_intermediate_file_path(eval_file, f'_{model}', 'pkl')
93
+ score_file = get_intermediate_file_path(eval_file, f'_{model}_score', 'csv')
94
+ nproc = judge_kwargs.pop('nproc', 4)
95
+
96
+ data = load(eval_file)
97
+ model = judge_kwargs.pop('model', 'gpt-4o')
98
+ judge_model = build_judge(model=model, **judge_kwargs)
99
+
100
+ lt = len(data)
101
+ lines = [data.iloc[i] for i in range(lt)]
102
+ tups = [(judge_model, line) for line in lines]
103
+ indices = [line['index'] for line in lines]
104
+
105
+ ans = {}
106
+ if osp.exists(tmp_file):
107
+ ans = load(tmp_file)
108
+
109
+ tups = [x for x, i in zip(tups, indices) if i not in ans]
110
+ indices = [i for i in indices if i not in ans]
111
+
112
+ from .utils.mmdu import mmdu_score
113
+
114
+ if len(indices):
115
+ new_results = track_progress_rich(
116
+ mmdu_score,
117
+ tups,
118
+ nproc=nproc,
119
+ chunksize=nproc,
120
+ keys=indices,
121
+ save=tmp_file,)
122
+ ans = load(tmp_file)
123
+ for k, v in zip(indices, new_results):
124
+ assert k in ans
125
+
126
+ metric = self.calculat_metric(ans)
127
+ dump(metric, score_file)
128
+ return metric
VLMEvalKit-sudoku/vlmeval/dataset/image_shortqa.py ADDED
@@ -0,0 +1,163 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from vlmeval import *
2
+ from .image_base import ImageBaseDataset
3
+ from .utils import build_judge
4
+ from .utils.multiple_choice import report_acc, eval_vanilla, eval_circular_group
5
+ from .utils.shortqa import ShortQA_prompt
6
+ from ..utils import track_progress_rich
7
+ from ..smp.file import get_intermediate_file_path
8
+
9
+
10
+ def ShortQA_auxeval(model, line):
11
+ def proc_str(s):
12
+ chs = set(s)
13
+ chs = [x for x in chs if x not in string.ascii_letters + ': ']
14
+ for ch in chs:
15
+ s = s.replace(ch, ' ')
16
+ return s
17
+
18
+ def extraction(resp):
19
+ correct, reason = None, None
20
+ correct_st, correct_ed = '[Begin Correctness]', '[End Correctness]'
21
+ reason_st, reason_ed = '[Begin Reason]', '[End Reason]'
22
+ if correct_st in resp and correct_ed in resp:
23
+ correct = resp.split(correct_st)[1].split(correct_ed)[0].strip().lower()
24
+ if ('yes' in correct) ^ ('no' in correct):
25
+ correct = 1 if 'yes' in correct else 0
26
+ if reason_st in resp and reason_ed in resp:
27
+ reason = resp.split(reason_st)[1].split(reason_ed)[0].strip()
28
+ return correct, reason
29
+ else:
30
+ return None, None
31
+ else:
32
+ return None, None
33
+
34
+ prompt = ShortQA_prompt(line)
35
+ retry = 3
36
+ for i in range(retry):
37
+ output = model.generate(prompt, temperature=0.5 * i)
38
+ ans = extraction(output)
39
+ # print(output, ans)
40
+ if ans[0] in [0, 1]:
41
+ return dict(hit=ans[0], log=ans[1])
42
+
43
+ return dict(hit=0, log='Fail to Judge')
44
+
45
+
46
+ def Comprehensive_auxeval(model, data):
47
+ def valid(record, key_name):
48
+ return key_name in record and (not pd.isna(record[key_name])) and record[key_name] != ''
49
+
50
+ if isinstance(data, pd.DataFrame) and len(data) > 1:
51
+ # Should Adopt CircularEval
52
+ assert valid(data.iloc[0], 'A')
53
+ data['GT'] = data['answer']
54
+ return eval_circular_group(model, data)
55
+ else:
56
+ item = data.iloc[0] if isinstance(data, pd.DataFrame) else data
57
+ if valid(item, 'A') and len(item['answer']) == 1:
58
+ item['GT'] = item['answer']
59
+ return eval_vanilla(model, item)
60
+ else:
61
+ return ShortQA_auxeval(model, item)
62
+
63
+
64
+ class ImageShortQADataset(ImageBaseDataset):
65
+ TYPE = 'Short'
66
+
67
+ DATASET_URL = {
68
+ 'LiveMMBench_Infographic': '',
69
+ 'LiveMMBench_Perception': '',
70
+ 'LiveMMBench_Reasoning': '',
71
+ 'LiveMMBench_Reasoning_circular': '',
72
+ 'hle':'https://opencompass.openxlab.space/utils/VLMEval/hle.tsv',
73
+ }
74
+
75
+ DATASET_MD5 = {
76
+ 'hle': 'a83cbdbea89f27c2aa5b8f34a8894b72',
77
+ }
78
+
79
+ def build_prompt(self, line):
80
+ msgs = super().build_prompt(line)
81
+ assert msgs[-1]['type'] == 'text'
82
+ msgs[-1]['value'] += '\nPlease directly provide a short answer to the question. '
83
+ return msgs
84
+
85
+ # It returns a DataFrame
86
+ def evaluate(self, eval_file, **judge_kwargs):
87
+ data = load(eval_file)
88
+ _ = self.dataset_name
89
+ assert 'answer' in data and 'prediction' in data
90
+ data['prediction'] = [str(x) for x in data['prediction']]
91
+ data['answer'] = [str(x) for x in data['answer']]
92
+
93
+ storage = get_intermediate_file_path(eval_file, '_judge')
94
+ tmp_file = get_intermediate_file_path(eval_file, '_tmp', 'pkl')
95
+ nproc = judge_kwargs.pop('nproc', 4)
96
+
97
+ if not osp.exists(storage):
98
+ ans_map = {} if not osp.exists(tmp_file) else load(tmp_file)
99
+
100
+ model = judge_kwargs.get('model', 'gpt-4o-mini')
101
+ if model == 'exact_matching':
102
+ model = None
103
+ elif gpt_key_set():
104
+ model = build_judge(model=model, **judge_kwargs)
105
+ if not model.working():
106
+ warnings.warn('OPENAI API is not working properly, will use exact matching for evaluation')
107
+ warnings.warn(DEBUG_MESSAGE)
108
+ model = None
109
+ else:
110
+ model = None
111
+ warnings.warn('OPENAI_API_KEY is not working properly, will use exact matching for evaluation')
112
+
113
+ if model is not None:
114
+ if 'g_index' not in data:
115
+ lines = [data.iloc[i] for i in range(len(data))]
116
+ indices = [x['index'] for x in lines if x['index'] not in ans_map]
117
+ lines = [x for x in lines if x['index'] not in ans_map]
118
+ tups = [(model, line) for line in lines]
119
+ else:
120
+ main_data = data[[x == y for x, y in zip(data['index'], data['g_index'])]]
121
+ lines = [data[data['g_index'] == x] for x in main_data['index']]
122
+ indices = [x.iloc[0]['g_index'] for x in lines if x.iloc[0]['g_index'] not in ans_map]
123
+ lines = [x for x in lines if x.iloc[0]['g_index'] not in ans_map]
124
+ tups = [(model, x) for x in lines]
125
+ data = main_data
126
+
127
+ if len(lines):
128
+ res = track_progress_rich(
129
+ Comprehensive_auxeval, tups, nproc=nproc, chunksize=nproc, keys=indices, save=tmp_file)
130
+ for k, v in zip(indices, res):
131
+ ans_map[k] = v
132
+
133
+ judge_results = [ans_map[x] for x in data['index']]
134
+ data['hit'] = [x['hit'] for x in judge_results]
135
+ data['log'] = [x['log'] for x in judge_results]
136
+ dump(data, storage)
137
+
138
+ data = load(storage)
139
+ acc = report_acc(data)
140
+
141
+ score_file = get_intermediate_file_path(eval_file, '_acc', 'csv')
142
+ dump(acc, score_file)
143
+ return acc
144
+
145
+
146
+ class PathVQA_VAL(ImageShortQADataset):
147
+ DATASET_URL = {
148
+ 'PathVQA_VAL': 'https://huggingface.co/datasets/Pfei111/PathVQA/resolve/main/PathVQA_VAL.tsv',
149
+ }
150
+
151
+ DATASET_MD5 = {
152
+ 'PathVQA_VAL': None,
153
+ }
154
+
155
+
156
+ class PathVQA_TEST(ImageShortQADataset):
157
+ DATASET_URL = {
158
+ 'PathVQA_TEST': 'https://huggingface.co/datasets/Pfei111/PathVQA/resolve/main/PathVQA_TEST.tsv',
159
+ }
160
+
161
+ DATASET_MD5 = {
162
+ 'PathVQA_TEST': None,
163
+ }
VLMEvalKit-sudoku/vlmeval/dataset/longvideobench.py ADDED
@@ -0,0 +1,335 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from huggingface_hub import snapshot_download
2
+ from ..smp import *
3
+ from .video_base import VideoBaseDataset
4
+ from .utils import build_judge, DEBUG_MESSAGE
5
+ from glob import glob
6
+ import os
7
+
8
+ FAIL_MSG = 'Failed to obtain answer via API.'
9
+
10
+
11
+ def timestamp_to_seconds(timestamp):
12
+ # Split the timestamp into hours, minutes, and seconds
13
+ h, m, s = timestamp.split(":")
14
+ # Convert hours, minutes, and total seconds (including fractions) to float and compute total seconds
15
+ total_seconds = int(h) * 3600 + int(m) * 60 + float(s)
16
+ return total_seconds
17
+
18
+
19
+ def uniformly_subsample(lst, K):
20
+ n = len(lst)
21
+ if K >= n:
22
+ return lst
23
+ step = n / K
24
+ return [lst[int(i * step)] for i in range(K)]
25
+
26
+
27
+ def insert_subtitles_into_frames(
28
+ frames,
29
+ frame_timestamps,
30
+ subtitles,
31
+ starting_timestamp_for_subtitles,
32
+ duration,
33
+ ):
34
+ interleaved_list = []
35
+ cur_i = 0
36
+
37
+ for subtitle in subtitles:
38
+ if "timestamp" in subtitle:
39
+ start, end = subtitle["timestamp"]
40
+
41
+ if not isinstance(end, float):
42
+ end = duration
43
+
44
+ start -= starting_timestamp_for_subtitles
45
+ end -= starting_timestamp_for_subtitles
46
+
47
+ subtitle_timestamp = (start + end) / 2
48
+ subtitle_text = subtitle["text"]
49
+ else:
50
+ start, end = subtitle["start"], subtitle["end"]
51
+ start = timestamp_to_seconds(start)
52
+ end = timestamp_to_seconds(end)
53
+ start -= starting_timestamp_for_subtitles
54
+ end -= starting_timestamp_for_subtitles
55
+
56
+ subtitle_timestamp = (start + end) / 2
57
+ subtitle_text = subtitle["line"]
58
+
59
+ for i, (frame, frame_timestamp) in enumerate(
60
+ zip(frames[cur_i:], frame_timestamps[cur_i:])
61
+ ):
62
+ if frame_timestamp <= subtitle_timestamp:
63
+ # print("frame:", frame_timestamp)
64
+ interleaved_list.append({"type": "image", "value": frame})
65
+ cur_i += 1
66
+ else:
67
+ break
68
+
69
+ if end - start < 1:
70
+ end = subtitle_timestamp + 0.5
71
+ start = subtitle_timestamp - 0.5
72
+
73
+ covering_frames = False
74
+ for frame, frame_timestamp in zip(frames, frame_timestamps):
75
+ if frame_timestamp < end and frame_timestamp > start:
76
+ covering_frames = True
77
+ break
78
+
79
+ if covering_frames:
80
+ interleaved_list.append({"type": "text", "value": subtitle_text + "\n"})
81
+ else:
82
+ pass
83
+
84
+ for i, (frame, frame_timestamp) in enumerate(
85
+ zip(frames[cur_i:], frame_timestamps[cur_i:])
86
+ ):
87
+ interleaved_list.append({"type": "image", "value": frame})
88
+ return interleaved_list
89
+
90
+
91
+ class LongVideoBench(VideoBaseDataset):
92
+
93
+ MD5 = '82905eae3a5ae7383c5a8ee9655e1ab9'
94
+ SYS = ''
95
+
96
+ TYPE = 'Video-MCQ'
97
+
98
+ def __init__(self, dataset='LongVideoBench', use_subtitle=False, nframe=0, fps=-1):
99
+ super().__init__(dataset=dataset, nframe=nframe, fps=fps)
100
+ self.use_subtitle = use_subtitle
101
+ self.dataset_name = dataset
102
+
103
+ @classmethod
104
+ def supported_datasets(cls):
105
+ return ['LongVideoBench']
106
+
107
+ def prepare_dataset(self, dataset_name='LongVideoBench', repo_id='longvideobench/LongVideoBench'):
108
+ def check_integrity(pth):
109
+ data_file = osp.join(pth, f'{dataset_name}.tsv')
110
+ if not osp.exists(data_file):
111
+ return False
112
+ if md5(data_file) != self.MD5:
113
+ print("md5 mismatch", md5(data_file), self.MD5)
114
+ return False
115
+ data = load(data_file)
116
+ for video_pth in data['video_path']:
117
+ if not osp.exists(osp.join(pth, video_pth)):
118
+ print(video_pth, "is not found")
119
+ return False
120
+ return True
121
+
122
+ if modelscope_flag_set():
123
+ repo_id = "AI-ModelScope/LongVideoBench"
124
+
125
+ cache_path = get_cache_path(repo_id)
126
+
127
+ if cache_path is None:
128
+ cache_path = osp.expanduser("~/.cache/huggingface/hub/datasets--longvideobench--LongVideoBench")
129
+ if not osp.exists(cache_path):
130
+ os.makedirs(cache_path)
131
+
132
+ if check_integrity(cache_path):
133
+ dataset_path = cache_path
134
+ else:
135
+ def generate_tsv(pth):
136
+ data_file = osp.join(pth, f'{dataset_name}.tsv')
137
+ if osp.exists(data_file) and md5(data_file) == self.MD5:
138
+ return
139
+
140
+ data_file = pd.read_json(osp.join(pth, 'lvb_val.json'))
141
+ data_file = data_file.assign(index=range(len(data_file)))
142
+ data_file['video'] = data_file['video_id']
143
+ data_file['video_path'] = data_file['video_path'].apply(lambda x: f'./videos/{x}')
144
+
145
+ data_file.to_csv(osp.join(pth, f'{dataset_name}.tsv'), sep='\t', index=False)
146
+
147
+ if modelscope_flag_set():
148
+ from modelscope import dataset_snapshot_download
149
+ dataset_snapshot_download(dataset_id=repo_id)
150
+ else:
151
+ snapshot_download(repo_id=repo_id, repo_type='dataset')
152
+ print("All videos are downloaded for LongVideoBench")
153
+
154
+ if not glob(osp.join(cache_path, "videos")):
155
+ tar_files = glob(osp.join(cache_path, "**/*.tar*"), recursive=True)
156
+
157
+ def untar_video_data(tar_file, cache_dir):
158
+ import tarfile
159
+ with tarfile.open(tar_file, "r") as tar_ref:
160
+ tar_ref.extractall(cache_dir)
161
+ print(f"Extracted all files from {tar_file} to {cache_dir}")
162
+
163
+ def concat_tar_parts(tar_parts, output_tar):
164
+ with open(output_tar, "wb") as out_tar:
165
+ from tqdm import tqdm
166
+ for part in tqdm(sorted(tar_parts)):
167
+ with open(part, "rb") as part_file:
168
+ out_tar.write(part_file.read())
169
+ print(f"Concatenated parts {tar_parts} into {output_tar}")
170
+
171
+ tar_parts_dict = {}
172
+
173
+ # Group tar parts together
174
+ for tar_file in tar_files:
175
+ base_name = tar_file.split(".tar")[0]
176
+ if base_name not in tar_parts_dict:
177
+ tar_parts_dict[base_name] = []
178
+ tar_parts_dict[base_name].append(tar_file)
179
+
180
+ # Concatenate and untar split parts
181
+ for base_name, parts in tar_parts_dict.items():
182
+ print(f"Extracting following tar files: {parts}")
183
+ output_tar = base_name + ".tar"
184
+ if not osp.exists(output_tar):
185
+ print('Start concatenating tar files')
186
+
187
+ concat_tar_parts(parts, output_tar)
188
+ print('Finish concatenating tar files')
189
+
190
+ if not osp.exists(osp.join(cache_path, osp.basename(base_name))):
191
+ untar_video_data(output_tar, cache_path)
192
+
193
+ print('All videos are extracted for LongVideoBench')
194
+
195
+ dataset_path = cache_path
196
+ generate_tsv(dataset_path)
197
+
198
+ data_file = osp.join(dataset_path, f'{dataset_name}.tsv')
199
+ return dict(data_file=data_file, root=dataset_path)
200
+
201
+ def save_video_frames(self, video_path, video_llm=False):
202
+
203
+ vid_path = osp.join(self.data_root, video_path)
204
+ import decord
205
+ vid = decord.VideoReader(vid_path)
206
+ video_info = {
207
+ 'fps': vid.get_avg_fps(),
208
+ 'n_frames': len(vid),
209
+ }
210
+ if self.nframe > 0 and self.fps < 0:
211
+ step_size = len(vid) / (self.nframe + 1)
212
+ indices = [int(i * step_size) for i in range(1, self.nframe + 1)]
213
+ frame_paths = self.frame_paths(video_path[:-4])
214
+ elif self.fps > 0:
215
+ # not constrained by num_frames, get frames by fps
216
+ total_duration = video_info['n_frames'] / video_info['fps']
217
+ required_frames = int(total_duration * self.fps)
218
+ step_size = video_info['fps'] / self.fps
219
+ indices = [int(i * step_size) for i in range(required_frames)]
220
+ frame_paths = self.frame_paths_fps(video_path[:-4], len(indices))
221
+
222
+ flag = np.all([osp.exists(p) for p in frame_paths])
223
+
224
+ if not flag:
225
+ lock_path = osp.splitext(vid_path)[0] + '.lock'
226
+ with portalocker.Lock(lock_path, 'w', timeout=30):
227
+ if not np.all([osp.exists(p) for p in frame_paths]):
228
+ images = [vid[i].asnumpy() for i in indices]
229
+ images = [Image.fromarray(arr) for arr in images]
230
+ for im, pth in zip(images, frame_paths):
231
+ if not osp.exists(pth) and not video_llm:
232
+ im.save(pth)
233
+
234
+ return frame_paths, indices, video_info
235
+
236
+ # def save_video_into_images(self, line, num_frames=8):
237
+ # frame_paths, indices, video_info = self.save_video_frames(line['video_path'], num_frames)
238
+ # return frame_paths
239
+
240
+ def build_prompt(self, line, video_llm):
241
+ if isinstance(line, int):
242
+ assert line < len(self)
243
+ line = self.data.iloc[line]
244
+
245
+ frames, indices, video_info = self.save_video_frames(line['video_path'], video_llm)
246
+ fps = video_info["fps"]
247
+
248
+ message = [dict(type='text', value=self.SYS)]
249
+ if video_llm:
250
+ message.append(dict(type='video', value=osp.join(self.data_root, line['video_path'])))
251
+ else:
252
+ if not self.use_subtitle:
253
+ with open(osp.join(self.data_root, "subtitles", line["subtitle_path"])) as f:
254
+ subtitles = json.load(f)
255
+
256
+ frame_message = insert_subtitles_into_frames(
257
+ frames,
258
+ [ind_ / fps for ind_ in indices],
259
+ subtitles,
260
+ line["starting_timestamp_for_subtitles"],
261
+ line["duration"]
262
+ )
263
+
264
+ message += frame_message
265
+ else:
266
+ for im in frames:
267
+ message.append(dict(type='image', value=im))
268
+
269
+ line['question'] += '\n' + '\n'.join(
270
+ ["{}. {}".format(chr(ord("A") + i), cand) for i, cand in enumerate(eval(line['candidates']))]
271
+ )
272
+ prompt = line["question"] + "\nAnswer with the option's letter from the given choices directly."
273
+ message.append(dict(type='text', value=prompt))
274
+ return message
275
+
276
+ # It returns a dictionary
277
+ @classmethod
278
+ def evaluate(self, eval_file, **judge_kwargs):
279
+ from .utils.longvideobench import get_dimension_rating, extract_characters_regex, extract_option
280
+
281
+ assert get_file_extension(eval_file) in ['xlsx', 'json', 'tsv'], 'data file should be an supported format (xlsx/json/tsv) file' # noqa: E501
282
+
283
+ tmp_file = get_intermediate_file_path(eval_file, '_tmp', 'pkl')
284
+ tgt_file = get_intermediate_file_path(eval_file, '_rating', 'json')
285
+ score_file = get_intermediate_file_path(eval_file, '_score')
286
+
287
+ if not osp.exists(score_file):
288
+ model = judge_kwargs.get('model', 'exact_matching')
289
+ assert model in ['chatgpt-0125', 'exact_matching', 'gpt-4-0125']
290
+
291
+ if model == 'exact_matching':
292
+ model = None
293
+ elif gpt_key_set():
294
+ model = build_judge(**judge_kwargs)
295
+ if not model.working():
296
+ warnings.warn('OPENAI API is not working properly, will use exact matching for evaluation')
297
+ warnings.warn(DEBUG_MESSAGE)
298
+ model = None
299
+ else:
300
+ warnings.warn('OPENAI_API_KEY is not set properly, will use exact matching for evaluation')
301
+ model = None
302
+ res = {} if not osp.exists(tmp_file) else load(tmp_file)
303
+ res = {k: v for k, v in res.items() if FAIL_MSG not in v}
304
+
305
+ data = load(eval_file)
306
+ data_un = data[~pd.isna(data['prediction'])]
307
+
308
+ for idx in data['index']:
309
+ ans = data.loc[data['index'] == idx, 'correct_choice'].values[0]
310
+ ans = chr(ord("A") + ans)
311
+ pred = str(data.loc[data['index'] == idx, 'prediction'].values[0])
312
+
313
+ if extract_characters_regex(pred) == '':
314
+ extract_pred = extract_option(
315
+ model,
316
+ data.loc[data['index'] == idx].to_dict(orient='records')[0],
317
+ 'LongVideoBench'
318
+ )
319
+ data.loc[idx, 'score'] = int(extract_pred == ans)
320
+ else:
321
+ data.loc[idx, 'score'] = int(extract_characters_regex(pred) == ans)
322
+
323
+ rejected = [x for x in data['score'] if x == -1]
324
+
325
+ print(
326
+ f'Among {len(data)} questions, failed to obtain prediction for {len(data) - len(data_un)} questions, '
327
+ f'failed to obtain the score for another {len(rejected)} questions. '
328
+ f'Those questions will be counted as -1 score in ALL rating, and will not be counted in VALID rating.'
329
+ )
330
+
331
+ dump(data, score_file)
332
+
333
+ rating = get_dimension_rating(score_file)
334
+ dump(rating, tgt_file)
335
+ return rating
VLMEvalKit-sudoku/vlmeval/dataset/mmalignbench.py ADDED
@@ -0,0 +1,298 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # flake8: noqa
2
+ import re
3
+ from functools import partial
4
+
5
+ from .image_base import ImageBaseDataset
6
+ from .utils import build_judge, DEBUG_MESSAGE
7
+ from ..smp import *
8
+ from ..utils import track_progress_rich
9
+
10
+
11
+ SYSTEM_PROMPT = """\
12
+ Please act as an impartial evaluator and assess the quality of the responses provided by two AI assistants to a given user prompt and accompanying image. You will be provided with Assistant A's and Assistant B's answers. Your task is to determine which assistant's response is superior.
13
+
14
+ Start your evaluation by generating your own answer to the prompt and image. Ensure that you complete your answer before reviewing any assistant responses.
15
+
16
+ When evaluating the assistants' responses, compare each one to your own answer.
17
+
18
+ First, assess whether the assistants' answers are helpful and relevant. A response is considered helpful if it appropriately addresses the prompt, follows the given instructions, and is well-organized. A relevant answer closely aligns with the context or requirements of the prompt.
19
+
20
+ When applicable, consider the creativity and novelty of each assistant's response and evaluate the writing quality of both responses.
21
+
22
+ Then, identify and correct any errors or inaccuracies in the assistants' answers. Lastly, identify any critical information missing from the assistants' responses that should have been included to improve the answer.
23
+
24
+ After providing your explanation, you must output only one of the following choices as your final verdict with a label:
25
+
26
+ 1. Assistant A is significantly better: [[A>>B]]
27
+ 2. Assistant A is slightly better: [[A>B]]
28
+ 3. Tie, relatively the same: [[A=B]]
29
+ 4. Assistant B is slightly better: [[B>A]]
30
+ 5. Assistant B is significantly better: [[B>>A]]
31
+
32
+ Example output: "My final verdict is tie: [[A=B]]".\
33
+ """
34
+
35
+ SYSTEM_PROMPT_GT = """\
36
+ Please act as an impartial evaluator and assess the quality of the responses provided by two AI assistants to a given user prompt and accompanying image. You will be provided with Assistant A's and Assistant B's answers. Your task is to determine which assistant's response is superior.
37
+
38
+ Start your evaluation by generating your own answer to the prompt and image. Ensure that you complete your answer before reviewing any assistant responses.
39
+
40
+ When evaluating the assistants' responses, compare each one to your own answer.
41
+
42
+ First, assess whether the assistants' answers are helpful and relevant. A response is considered helpful if it appropriately addresses the prompt, follows the given instructions, and is well-organized. A relevant answer closely aligns with the context or requirements of the prompt.
43
+
44
+ When applicable, consider the creativity and novelty of each assistant's response and evaluate the writing quality of both responses.
45
+
46
+ Then, identify and correct any errors or inaccuracies in the assistants' answers. Lastly, identify any critical information missing from the assistants' responses that should have been included to improve the answer. Please refer to the provided Ground Truth answer, which constitutes the key fact relevant to the question.
47
+
48
+ After providing your explanation, you must output only one of the following choices as your final verdict with a label:
49
+
50
+ 1. Assistant A is significantly better: [[A>>B]]
51
+ 2. Assistant A is slightly better: [[A>B]]
52
+ 3. Tie, relatively the same: [[A=B]]
53
+ 4. Assistant B is slightly better: [[B>A]]
54
+ 5. Assistant B is significantly better: [[B>>A]]
55
+
56
+ Example output: "My final verdict is tie: [[A=B]]".\
57
+ """
58
+
59
+ PROMPT_TEMPLATE = """**INPUT**:
60
+
61
+ <|User Prompt|>\n{question}
62
+
63
+ <|The Start of Assistant A's Answer|>\n{answer_1}\n<|The End of Assistant A's Answer|>
64
+
65
+ <|The Start of Assistant B's Answer|>\n{answer_2}\n<|The End of Assistant B's Answer|>
66
+ """
67
+
68
+
69
+ PROMPT_TEMPLATE_GT = """**INPUT**:
70
+
71
+ <|User Prompt|>\n{question}
72
+
73
+ <|Ground Truth|>\n{gt}
74
+
75
+ <|The Start of Assistant A's Answer|>\n{answer_1}\n<|The End of Assistant A's Answer|>
76
+
77
+ <|The Start of Assistant B's Answer|>\n{answer_2}\n<|The End of Assistant B's Answer|>
78
+ """
79
+
80
+
81
+ REGEX_PATTERN = re.compile("\[\[([AB<>=]+)\]\]") # noqa: W605
82
+
83
+
84
+ def get_score(judgement, pattern=REGEX_PATTERN):
85
+ matches = pattern.findall(judgement)
86
+ matches = [m for m in matches if m != ""]
87
+ if len(set(matches)) == 0:
88
+ return None, True
89
+ elif len(set(matches)) == 1:
90
+ return matches[0].strip("\n"), False
91
+ else:
92
+ return None, True
93
+
94
+
95
+ def MMAlignBench_auxeval(model, line):
96
+ if 'gt' in line and str(line['gt']) != 'nan':
97
+ config = dict(question=line['question'], gt=line['gt'], answer_1=line['A'], answer_2=line['B'])
98
+ prompt = SYSTEM_PROMPT_GT + '\n' + PROMPT_TEMPLATE_GT.format(**config)
99
+ # prompt = PROMPT_TEMPLATE.format(**config)
100
+ print('gt_prompt'+prompt)
101
+ else:
102
+ config = dict(question=line['question'], answer_1=line['A'], answer_2=line['B'])
103
+ prompt = SYSTEM_PROMPT + '\n' + PROMPT_TEMPLATE.format(**config)
104
+ # prompt = PROMPT_TEMPLATE.format(**config)
105
+ print('prompt'+prompt)
106
+
107
+ prefix = 'data:image/jpeg;base64,'
108
+ img = prefix + line['image']
109
+
110
+ messages = [
111
+ dict(type='text', value=prompt),
112
+ dict(type='image', value=img)
113
+ ]
114
+
115
+ retry = 2
116
+ while retry:
117
+ resp = model.generate(messages)
118
+ score, try_again = get_score(resp)
119
+ if not try_again:
120
+ break
121
+ retry -= 1
122
+
123
+ if score is None:
124
+ return 'Unknown'
125
+ return [score, resp]
126
+
127
+
128
+ class MMAlignBench(ImageBaseDataset):
129
+ TYPE = 'VQA'
130
+ DATASET_URL = {'MMAlignBench': 'https://opencompass.openxlab.space/utils/VLMEval/MMAlignBench.tsv'}
131
+ DATASET_MD5 = {'MMAlignBench': 'd00d8e61c99257cbaf76d8d5e926f01e'}
132
+
133
+ score_map = {
134
+ 'A>>B': -2,
135
+ 'A>B': -1,
136
+ 'A=B': 0,
137
+ 'B>A': 1,
138
+ 'B>>A': 2
139
+ }
140
+
141
+ # Given one data record, return the built prompt (a multi-modal message), can override
142
+ def build_prompt(self, line):
143
+ if isinstance(line, int):
144
+ line = self.data.iloc[line]
145
+
146
+ if self.meta_only:
147
+ tgt_path = toliststr(line['image_path'])
148
+ else:
149
+ tgt_path = self.dump_image(line)
150
+
151
+ question = line['question']
152
+
153
+ msgs = []
154
+ if isinstance(tgt_path, list):
155
+ msgs.extend([dict(type='image', value=p) for p in tgt_path])
156
+ else:
157
+ msgs = [dict(type='image', value=tgt_path)]
158
+ # WildVision adopts text first
159
+ msgs = [dict(type='text', value=question)] + msgs
160
+ return msgs
161
+
162
+ @classmethod
163
+ def gen_eval_base(self, eval_file, b64_map):
164
+ data = load(eval_file)
165
+ data['B'] = data.pop('prediction')
166
+ data['A'] = data.pop('claude3_sonnet')
167
+ data['image'] = [b64_map[x] for x in data['index']]
168
+ return data
169
+
170
+ # It returns a DataFrame
171
+ @classmethod
172
+ def evaluate(self, eval_file, **judge_kwargs):
173
+ # We adopt pairwise evaluation (twice for a pair) for this dataset
174
+ model = judge_kwargs['model']
175
+ storage = get_intermediate_file_path(eval_file, f'_{model}')
176
+ score_file = get_intermediate_file_path(eval_file, f'_{model}_score', 'csv')
177
+ tmp_file = get_intermediate_file_path(eval_file, f'_{model}', 'pkl')
178
+ nproc = judge_kwargs.pop('nproc', 4)
179
+
180
+ if not osp.exists(storage):
181
+ raw_data = MMAlignBench('MMAlignBench').data
182
+ b64_map = {x: y for x, y in zip(raw_data['index'], raw_data['image'])}
183
+ data = self.gen_eval_base(eval_file, b64_map)
184
+
185
+ # judge_kwargs['system_prompt'] = SYSTEM_PROMPT
186
+ judge_kwargs['temperature'] = 0
187
+ judge_kwargs['img_detail'] = 'high'
188
+ judge_kwargs['timeout'] = 300
189
+ model = build_judge(max_tokens=4096, **judge_kwargs)
190
+
191
+ assert model.working(), (
192
+ 'MMAlignBench evaluation requires a working OPENAI API\n' + DEBUG_MESSAGE
193
+ )
194
+
195
+ lt = len(data)
196
+ lines = [data.iloc[i] for i in range(lt)]
197
+ tups = [(model, line) for line in lines]
198
+ indices = [line['index'] for line in lines]
199
+
200
+ ans = load(tmp_file) if osp.exists(tmp_file) else {}
201
+ tups = [x for x, i in zip(tups, indices) if i not in ans]
202
+ indices = [i for i in indices if i not in ans]
203
+
204
+ if len(indices):
205
+ new_results = track_progress_rich(
206
+ MMAlignBench_auxeval,
207
+ tups,
208
+ nproc=nproc,
209
+ chunksize=nproc,
210
+ keys=indices,
211
+ save=tmp_file,
212
+ )
213
+ ans = load(tmp_file)
214
+ for k, v in zip(indices, new_results):
215
+ ans[k] = {'score': v[0], 'resp': v[1]}
216
+ else:
217
+ for k,v in ans.items():
218
+ ans[k] = {'score': v[0], 'resp': v[1]}
219
+ # breakpoint()
220
+ data['score'] = [ans[x]['score'] for x in data['index']]
221
+ data['judge'] = [ans[x]['resp'] for x in data['index']]
222
+ data.pop('image')
223
+ dump(data, storage)
224
+
225
+ data = load(storage)
226
+ lt = len(data)
227
+
228
+ scores = defaultdict(lambda: 0)
229
+ type_scores = defaultdict(lambda: defaultdict(lambda: 0))
230
+
231
+ for i in range(lt):
232
+ item = data.iloc[i]
233
+ if item['score'] not in self.score_map:
234
+ score = 0
235
+ else:
236
+ score = self.score_map[item['score']]
237
+ if '_rev' in item['index']:
238
+ score = -score
239
+ scores[score] += 1
240
+ type = item['type']
241
+ type_scores[type][score] += 1
242
+
243
+ name_map = {
244
+ 2: 'Much Better',
245
+ 1: 'Better',
246
+ 0: 'Tie',
247
+ -1: 'Worse',
248
+ -2: 'Much Worse'
249
+ }
250
+ scores = {name_map[k]: v for k, v in scores.items()}
251
+ scores['Reward'] = (
252
+ 100 * scores.get('Much Better', 0)
253
+ + 50 * scores.get('Better', 0)
254
+ - 50 * scores.get('Worse', 0)
255
+ - 100 * scores.get('Much Worse', 0)
256
+ ) / lt
257
+ scores['Win Rate'] = (scores.get('Better', 0) + scores.get('Much Better', 0)) / lt
258
+ scores = {k: [v] for k, v in scores.items()}
259
+ scores = pd.DataFrame(scores)
260
+
261
+ for type_name, type_score_dict in type_scores.items():
262
+ type_score_dict = {name_map[k]: v for k, v in type_score_dict.items()}
263
+ type_lt = sum(type_score_dict.values())
264
+
265
+ type_score_dict['Reward'] = (
266
+ (
267
+ 100 * type_score_dict.get('Much Better', 0)
268
+ + 50 * type_score_dict.get('Better', 0)
269
+ - 50 * type_score_dict.get('Worse', 0)
270
+ - 100 * type_score_dict.get('Much Worse', 0)
271
+ )
272
+ / type_lt
273
+ if type_lt > 0
274
+ else 0
275
+ )
276
+
277
+ type_score_dict['Win Rate'] = (
278
+ (type_score_dict.get('Better', 0) + type_score_dict.get('Much Better', 0)) / type_lt
279
+ if type_lt > 0
280
+ else 0
281
+ )
282
+
283
+ # 将该类型的得分添加到结果中
284
+ type_score_df = pd.DataFrame(
285
+ {
286
+ f"{type_name}_Much Better": [type_score_dict.get('Much Better', 0)],
287
+ f"{type_name}_Better": [type_score_dict.get('Better', 0)],
288
+ f"{type_name}_Tie": [type_score_dict.get('Tie', 0)],
289
+ f"{type_name}_Worse": [type_score_dict.get('Worse', 0)],
290
+ f"{type_name}_Much Worse": [type_score_dict.get('Much Worse', 0)],
291
+ f"{type_name}_Reward": [type_score_dict['Reward']],
292
+ f"{type_name}_Win Rate": [type_score_dict['Win Rate']],
293
+ }
294
+ )
295
+ scores = pd.concat([scores, type_score_df], axis=1)
296
+
297
+ dump(scores, score_file)
298
+ return scores
VLMEvalKit-sudoku/vlmeval/dataset/mmlongbench.py ADDED
@@ -0,0 +1,584 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import re
2
+ import math
3
+ from urllib.request import urlopen
4
+ from PIL import Image, ImageDraw, ImageFont
5
+ import torchvision.transforms as transforms
6
+
7
+ from vlmeval.dataset.utils import build_judge, levenshtein_distance
8
+ from vlmeval.smp import *
9
+ from .image_base import ImageBaseDataset
10
+ from ..smp.file import get_intermediate_file_path
11
+
12
+ FAIL_MSG = 'Failed to obtain answer via API.'
13
+
14
+
15
+ def get_gpt4_ICE():
16
+ example_1 = """
17
+ ---
18
+ Question: List the primary questions asked about the services in this report.
19
+ Analysis: The primary questions asked about the services in the report for The Limes Residential Home are:\n\n
20
+ 1. Is the service safe?\n
21
+ 2. Is the service effective?\n
22
+ 3. Is the service caring?\n
23
+ 4. Is the service responsive?\n
24
+ 5. Is the service well-led?
25
+ Extracted answer: [
26
+ 'Is the servife safe?',
27
+ 'Is the service effective',
28
+ 'Is the serve caring?',
29
+ 'Is the service responsive?',
30
+ 'Is the service well-led?'
31
+ ]
32
+ Answer format: List\n
33
+ """
34
+
35
+ example_2 = """
36
+ ---
37
+ Question: How many regulations of the HSCA 2008 are breached in all according to this report?
38
+ Analysis: According to the report, the provider breached 10 Health and Social Care Act 2008 (Regulated Activities)
39
+ Regulations in total. Here are the specifics:\n\n1. Regulation 13: Safeguarding service users from abuse and
40
+ improper treatment\n2. Regulation 12: Safe care and treatment\n3. Regulation 18: Staffing\n4. Regulation 11:
41
+ Need for consent\n5. Regulation 10: Dignity and respect\n6. Regulation 9: Person-centred care\n7. Regulation 17:
42
+ Good governance\n8. Regulation 18 (CQC Registration Regulations 2009): Notification of other incidents\n9.
43
+ Regulation 18: Failure to maintain an accurate and up-to-date care plan\n10. Regulation 11: Failure to implement
44
+ the Mental Capacity Act 2005 code of practice effectively\n\nThese breaches involve issues concerning staffing,
45
+ safeguarding, medicines management, dignity and respect, consent, care planning, governance, and failure to
46
+ notify the CQC of incidents.
47
+ Extracted answer: 10
48
+ Answer format: Integer\n
49
+ """
50
+
51
+ example_3 = """
52
+ ---
53
+ Question: According to the survey that is the percentage of Chinese who are paying more or
54
+ about the same attention to politics after Trump's election?
55
+ Analysis: The survey provided does not specify the percentage of Chinese individuals specifically who are paying
56
+ more or about the same attention to politics after Trump's election. The report focuses primarily on American
57
+ demographics and does not include specific details about the Chinese population in relation to this question. If
58
+ you need information about a different demographic or a summary of the findings from the American demographic,
59
+ I can certainly help with that!
60
+ Extracted answer: Not answerable
61
+ Answer format: String\n
62
+ """
63
+
64
+ example_4 = """
65
+ ---
66
+ Question: How many quotations from male respondent over 50 years old are included in this report?
67
+ Analysis: The image you've provided appears to be a screenshot of a document with multiple charts. However, the
68
+ text is too small and blurry to read accurately. If you can provide a clearer image or more context, I might be
69
+ able to help you with your question.
70
+ Extracted answer: Fail to answer
71
+ Answer format: String\n
72
+ """
73
+
74
+ return [example_1, example_2, example_3, example_4]
75
+
76
+
77
+ def build_mmlongbench_gpt4_prompt(line):
78
+ task_description = """
79
+ Given the question and analysis, you are tasked to extract answers with required formats from the free-form analysis.
80
+ - Your extracted answers should be one of the following formats: (1) Integer, (2) Float, (3) String and (4) List.
81
+ If you find the analysis the question can not be answered from the given documents, type "Not answerable".
82
+ Exception: If the analysis only tells you that it can not read/understand the images or documents,
83
+ type "Fail to answer".
84
+ - Please make your response as concise as possible. Also note that your response should be formatted as below:
85
+ ```
86
+ Extracted answer: [answer]
87
+ Answer format: [answer format]
88
+ ```
89
+ Please read the following example, then extract the answer from the model response
90
+ and type it at the end of the prompt.\n
91
+ """
92
+ question = line['question']
93
+ prediction = str(line['prediction'])
94
+ prompt = task_description
95
+ examples = get_gpt4_ICE()
96
+ for example in examples:
97
+ prompt += example
98
+ prompt += '---\nQuestion:' + question + '\n'
99
+ prompt += 'Analysis: ' + prediction
100
+ return prompt
101
+
102
+
103
+ def anls_compute(groundtruth, prediction, threshold=0.5):
104
+ dist = levenshtein_distance(groundtruth, prediction)
105
+ length = max(len(groundtruth.upper()), len(prediction.upper()))
106
+ value = 0.0 if length == 0 else float(dist) / float(length)
107
+ anls = 1.0 - value
108
+ if anls <= threshold:
109
+ anls = 0.0
110
+ return anls
111
+
112
+
113
+ def is_float_equal(reference, prediction, include_percentage: bool = False, is_close: float = False) -> bool:
114
+ def get_precision(gt_ans: float) -> int:
115
+ precision = 3
116
+ if '.' in str(gt_ans):
117
+ precision = len(str(gt_ans).split('.')[-1])
118
+ return precision
119
+
120
+ reference = float(str(reference).strip().rstrip('%').strip())
121
+ try:
122
+ prediction = float(str(prediction).strip().rstrip('%').strip())
123
+ except:
124
+ return False
125
+
126
+ if include_percentage:
127
+ gt_result = [reference / 100, reference, reference * 100]
128
+ else:
129
+ gt_result = [reference]
130
+ for item in gt_result:
131
+ try:
132
+ if is_close:
133
+ if math.isclose(item, prediction, rel_tol=0.01):
134
+ return True
135
+ precision = max(min(get_precision(prediction), get_precision(item)), 2)
136
+ if round(prediction, precision) == round(item, precision):
137
+ return True
138
+ except Exception:
139
+ continue
140
+ return False
141
+
142
+
143
+ def get_clean_string(s):
144
+ s = str(s).lower().strip()
145
+ if s.endswith('mile'):
146
+ s.rstrip('mile').strip()
147
+ if s.endswith('miles'):
148
+ s.rstrip('miles').strip()
149
+ if s.endswith('million'):
150
+ s.rstrip('million').strip()
151
+ # remove parenthesis
152
+ s = re.sub(r'\s*\([^)]*\)', '', s).strip()
153
+ # remove quotes
154
+ s = re.sub(r"^['\"]|['\"]$", '', s).strip()
155
+ s = s.strip().lstrip('$').strip()
156
+ s = s.strip().rstrip('%').strip()
157
+ return s
158
+
159
+
160
+ def is_exact_match(s):
161
+ flag = False
162
+ # Website
163
+ if 'https://' in s:
164
+ flag = True
165
+ # code file
166
+ if s.endswith('.py') or s.endswith('ipynb'):
167
+ flag = True
168
+ if s.startswith('page'):
169
+ flag = True
170
+ # telephone number
171
+ if re.fullmatch(r'\b\d+(-\d+|\s\d+)?\b', s):
172
+ flag = True
173
+ # time
174
+ if 'a.m.' in s or 'p.m.' in s:
175
+ flag = True
176
+ # YYYY-MM-DD
177
+ if re.fullmatch(r'\b\d{4}[-\s]\d{2}[-\s]\d{2}\b', s):
178
+ flag = True
179
+ # YYYY-MM
180
+ if re.fullmatch(r'\b\d{4}[-\s]\d{2}\b', s):
181
+ flag = True
182
+ # Email address
183
+ if re.fullmatch(r'[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,}', s):
184
+ flag = True
185
+ return flag
186
+
187
+
188
+ def isfloat(num):
189
+ try:
190
+ float(num)
191
+ return True
192
+ except ValueError:
193
+ return False
194
+
195
+
196
+ def get_font():
197
+ try:
198
+ truetype_url = "http://opencompass.openxlab.space/utils/Fonts/SimHei.ttf"
199
+ ff = urlopen(truetype_url)
200
+ font = ImageFont.truetype(ff, size=40)
201
+ except Exception as e:
202
+ logging.warning(f'{type(e)}: {e}')
203
+ logging.warning("Fail to download the font. Use the default one.")
204
+ font = ImageFont.load_default(size=40)
205
+ return font
206
+
207
+
208
+ def frame2img(img_path_list, font, save_path=None, idx_start=0):
209
+ imgs = [Image.open(img_path) for img_path in img_path_list]
210
+
211
+ new_imgs = []
212
+ for img in imgs:
213
+ w, h = img.size
214
+ scale = w / h
215
+ if w > h:
216
+ new_w = 560 * 2
217
+ new_h = int(560 * 2 / scale)
218
+ else:
219
+ new_w = int(560 * 2 * scale)
220
+ new_h = 560 * 2
221
+ img = transforms.functional.resize(img, [new_h, new_w],)
222
+ new_imgs.append(img)
223
+ imgs = new_imgs
224
+ new_w = 0
225
+ new_h = 0
226
+ pad = 40
227
+ if w > h:
228
+ for im in imgs:
229
+ w, h = im.size
230
+ new_w = max(new_w, w)
231
+ new_h += h + 10 + pad
232
+ new_img = Image.new("RGB", (new_w, new_h), "white")
233
+ draw = ImageDraw.Draw(new_img)
234
+ curr_h = 0
235
+ for idx, im in enumerate(imgs):
236
+ w, h = im.size
237
+ new_img.paste(im, (0, pad + curr_h))
238
+ draw.text((0, curr_h), f"<IMAGE {idx + idx_start}>", font=font, fill="black")
239
+ if idx + 1 < len(imgs):
240
+ draw.line([(0, pad + curr_h + h + 5), (new_w, pad + curr_h + h + 5)], fill='black', width=2)
241
+ curr_h += h + 10 + pad
242
+ else:
243
+ for im in imgs:
244
+ w, h = im.size
245
+ new_w += w + 10
246
+ new_h = max(new_h, h)
247
+ new_h += pad
248
+ new_img = Image.new('RGB', (new_w, new_h), 'white')
249
+ draw = ImageDraw.Draw(new_img)
250
+ curr_w = 0
251
+ for idx, im in enumerate(imgs):
252
+ w, h = im.size
253
+ new_img.paste(im, (curr_w, pad))
254
+ draw.text((curr_w, 0), f"<IMAGE {idx + idx_start}>", font=font, fill='black')
255
+ if idx + 1 < len(imgs):
256
+ draw.line([(curr_w + w + 5, 0), (curr_w + w + 5, new_h)], fill='black', width=2)
257
+ curr_w += w + 10
258
+
259
+ if save_path is not None:
260
+ new_img.save(save_path)
261
+
262
+ return new_img
263
+
264
+
265
+ def concat_images(image_list, max_concat=1, column_num=1):
266
+ concatenated_images = []
267
+ if column_num == -1:
268
+ MAX_COLUMN_NUM = 20
269
+ max_concat = 1
270
+ while len(image_list) / max_concat > MAX_COLUMN_NUM:
271
+ max_concat += 1
272
+ interval = max(math.ceil(len(image_list) / max_concat), 1)
273
+ for i in range(0, len(image_list), interval):
274
+ batch_images = image_list[i:i + interval]
275
+ concatenated_image = frame2img(batch_images, font=get_font(), idx_start=i)
276
+ concatenated_images.append(concatenated_image)
277
+ else:
278
+ interval = max(math.ceil(len(image_list) / max_concat), 1)
279
+ for i in range(0, len(image_list), interval):
280
+ batch_images = [Image.open(filename) for filename in image_list[i:i + interval]]
281
+ if column_num == 1:
282
+ total_height = batch_images[0].height * len(batch_images)
283
+ else:
284
+ total_height = batch_images[0].height * ((len(batch_images) - 1) // column_num + 1)
285
+ concatenated_image = Image.new('RGB', (batch_images[0].width * column_num, total_height), 'white')
286
+
287
+ x_offset, y_offset = 0, 0
288
+ for count, image in enumerate(batch_images):
289
+ concatenated_image.paste(image, (x_offset, y_offset))
290
+ x_offset += image.width
291
+ if (count + 1) % column_num == 0:
292
+ y_offset += image.height
293
+ x_offset = 0
294
+ concatenated_images.append(concatenated_image)
295
+ return concatenated_images
296
+
297
+
298
+ def eval_score(gt, pred, answer_type):
299
+ if answer_type == 'Int':
300
+ try:
301
+ gt, pred = int(gt), int(float(pred))
302
+ except:
303
+ pred = ''
304
+ score = (gt == pred)
305
+ elif answer_type == 'Float':
306
+ try:
307
+ gt = float(get_clean_string(str(gt)))
308
+ pred = float(get_clean_string(str(pred)))
309
+ except:
310
+ pred = ''
311
+ score = is_float_equal(gt, pred, include_percentage=True, is_close=True)
312
+ elif answer_type == 'Str':
313
+ gt = get_clean_string(gt)
314
+ pred = get_clean_string(pred)
315
+ if is_exact_match(gt):
316
+ score = (gt == pred)
317
+ else:
318
+ score = anls_compute(gt, pred)
319
+ else:
320
+ if isinstance(gt, str) and gt.startswith('['):
321
+ gt = eval(gt)
322
+ if not isinstance(gt, list):
323
+ gt = [gt]
324
+ if isinstance(pred, str) and pred.startswith('['):
325
+ pred = eval(pred)
326
+ if not isinstance(pred, list):
327
+ pred = [pred]
328
+ print(len(gt), len(pred))
329
+ if len(gt) != len(pred):
330
+ score = 0.0
331
+ else:
332
+ gt = sorted([get_clean_string(a) for a in gt])
333
+ pred = sorted([get_clean_string(a) for a in pred])
334
+ print(gt, pred)
335
+ if isfloat(gt[0]) or is_exact_match(gt[0]):
336
+ score = ('-'.join(gt) == '-'.join(pred))
337
+ else:
338
+ score = min([anls_compute(gt_v, pred_v) for gt_v, pred_v in zip(gt, pred)])
339
+
340
+ return float(score)
341
+
342
+
343
+ def MMLongBench_auxeval(model, line):
344
+ prompt = build_mmlongbench_gpt4_prompt(line)
345
+ log = ''
346
+ retry = 5
347
+
348
+ for i in range(retry):
349
+ prediction = line['prediction']
350
+ res = model.generate(prompt, temperature=i * 0.5)
351
+
352
+ if FAIL_MSG in res:
353
+ log += f'Try {i}: output is {prediction}, failed to parse.\n'
354
+ else:
355
+ log += 'Succeed'
356
+ try:
357
+ pred = res.split('Answer format:')[0].split('Extracted answer:')[1].strip()
358
+ except:
359
+ pred = ''
360
+ return dict(log=log, res=res, pred=pred)
361
+ log += 'All 5 retries failed.\n'
362
+ return dict(log=log, res='', pred='')
363
+
364
+
365
+ def get_f1(data):
366
+ gt_pos_data = data[data.apply(lambda k: k['answer'] != 'Not answerable', axis=1)]
367
+ pred_pos_data = data[data.apply(lambda k: k['pred'] != 'Not answerable', axis=1)]
368
+ recall = sum(gt_pos_data['score'].tolist()) / len(gt_pos_data)
369
+ precision = sum(pred_pos_data['score'].tolist()) / len(pred_pos_data)
370
+ return 2 * recall * precision / (recall + precision)
371
+
372
+
373
+ def MMLongBench_acc(result_file):
374
+ data = load(result_file)
375
+ overall_score = 0.0
376
+ score_list = list()
377
+ for i in range(len(data)):
378
+ item = data.iloc[i]
379
+ try:
380
+ score = eval_score(item['answer'], item['pred'], item['answer_format'])
381
+ except:
382
+ score = 0.0
383
+ score_list.append(score)
384
+ overall_score += score
385
+
386
+ data['score'] = score_list
387
+ dump(data, result_file)
388
+
389
+ data_chart = data[data.apply(lambda k: 'Chart' in eval(k['evidence_sources']), axis=1)]
390
+ data_table = data[data.apply(lambda k: 'Table' in eval(k['evidence_sources']), axis=1)]
391
+ data_image = data[data.apply(lambda k: 'Figure' in eval(k['evidence_sources']), axis=1)]
392
+ data_text = data[data.apply(lambda k: 'Pure-text (Plain-text)' in eval(k['evidence_sources']), axis=1)]
393
+ data_layout = data[data.apply(lambda k: 'Generalized-text (Layout)' in eval(k['evidence_sources']), axis=1)]
394
+
395
+ data_single = data[data.apply(lambda k: len(eval(k['evidence_pages'])) == 1, axis=1)]
396
+ data_multi = data[data.apply(lambda k: len(eval(k['evidence_pages'])) > 1, axis=1)]
397
+ data_unans = data[data.apply(lambda k: len(eval(k['evidence_pages'])) == 0, axis=1)]
398
+
399
+ res = dict()
400
+ res['category'] = [
401
+ 'overall_f1', 'overall_acc', 'text', 'layout', 'table', 'chart',
402
+ 'image', 'single-page', 'multi-page', 'unanswerable'
403
+ ]
404
+ res['num'] = [
405
+ len(data), len(data), len(data_text), len(data_layout), len(data_table),
406
+ len(data_chart), len(data_image), len(data_single), len(data_multi), len(data_unans)
407
+ ]
408
+ res['avg_score'] = [
409
+ get_f1(data),
410
+ overall_score / len(data),
411
+ sum(data_text['score'].tolist()) / len(data_text) if len(data_text) > 0 else 0.0,
412
+ sum(data_layout['score'].tolist()) / len(data_layout) if len(data_layout) > 0 else 0.0,
413
+ sum(data_table['score'].tolist()) / len(data_table) if len(data_table) > 0 else 0.0,
414
+ sum(data_chart['score'].tolist()) / len(data_chart) if len(data_chart) > 0 else 0.0,
415
+ sum(data_image['score'].tolist()) / len(data_image) if len(data_image) > 0 else 0.0,
416
+ sum(data_single['score'].tolist()) / len(data_single) if len(data_single) > 0 else 0.0,
417
+ sum(data_multi['score'].tolist()) / len(data_multi) if len(data_multi) > 0 else 0.0,
418
+ sum(data_unans['score'].tolist()) / len(data_unans) if len(data_unans) > 0 else 0.0,
419
+ ]
420
+ res = pd.DataFrame(res)
421
+ return res
422
+
423
+
424
+ class MMLongBench(ImageBaseDataset):
425
+
426
+ TYPE = 'VQA'
427
+
428
+ DATASET_URL = {
429
+ 'MMLongBench_DOC': 'https://opencompass.openxlab.space/utils/VLMEval/MMLongBench_DOC.tsv',
430
+ }
431
+ DATASET_MD5 = {
432
+ 'MMLongBench_DOC': '9b393e1f4c52718380d50586197eac9b',
433
+ }
434
+
435
+ SUPPORTED_MODELS = {
436
+ 'GPT4': (1, 1),
437
+ 'GPT4V': (1, 1),
438
+ 'GPT4V_HIGH': (1, 1),
439
+ 'GPT4o': (1, 1),
440
+ 'GPT4o_HIGH': (1, 1),
441
+ 'GPT4o_MINI': (1, 1),
442
+ 'MiniCPM-Llama3-V-2_5': (1, 5),
443
+ 'InternVL-Chat-V1-5': (5, 2),
444
+ 'XComposer2_4KHD': (1, 5),
445
+ 'XComposer2d5': (1, -1),
446
+ }
447
+
448
+ def __init__(self, dataset, **kwargs):
449
+ self.model_list = list(self.SUPPORTED_MODELS.keys())
450
+ model_name = kwargs['model']
451
+ if not listinstr(self.model_list, model_name):
452
+ raise AssertionError("{} doesn't support the evaluation on MMLongBench_DOC.".format(model_name))
453
+ super(MMLongBench, self).__init__(dataset)
454
+
455
+ self.is_api = True if listinstr(['GPT4'], model_name) else False
456
+ self.max_pages = 120
457
+ concat_num, column_num = self.SUPPORTED_MODELS.get(model_name)
458
+ self.concat_num = concat_num
459
+ self.column_num = column_num
460
+
461
+ def dump_image(self, origin_line):
462
+ os.makedirs(self.img_root, exist_ok=True)
463
+ try:
464
+ import fitz
465
+ except Exception as e:
466
+ logging.critical(f'{type(e)}: {e}')
467
+ logging.critical('Please use `pip install pymupdf` to parse PDF files.')
468
+
469
+ line = origin_line.copy()
470
+ line['image_path'] = line['image_path'][:self.max_pages]
471
+ skip_pdf_parse = True
472
+ for im_name in line['image_path']:
473
+ path = osp.join(self.img_root, im_name)
474
+ if not read_ok(path):
475
+ skip_pdf_parse = False
476
+ break
477
+
478
+ # Just for being compatible with the zooped loop: zip(line['image'], line['image_path'])
479
+ if skip_pdf_parse:
480
+ line['image'] = line['image_path']
481
+ else:
482
+ pdf_data = base64.b64decode(line['image'])
483
+ pdf_file = io.BytesIO(pdf_data)
484
+ encoded_images = []
485
+ with fitz.open(stream=pdf_file, filetype='pdf') as doc:
486
+ doc = doc[:self.max_pages]
487
+ for page in doc:
488
+ image = page.get_pixmap(dpi=144)
489
+ image_file = io.BytesIO(image.tobytes(output='png'))
490
+ image = Image.open(image_file)
491
+ encoded_image = encode_image_to_base64(image)
492
+ encoded_images.append(encoded_image)
493
+ line['image'] = encoded_images
494
+ print('process {}'.format(line['doc_id']))
495
+
496
+ if 'image' in line:
497
+ if isinstance(line['image'], list):
498
+ tgt_path = []
499
+ assert 'image_path' in line
500
+ for img, im_name in zip(line['image'], line['image_path']):
501
+ path = osp.join(self.img_root, im_name)
502
+ if not read_ok(path):
503
+ decode_base64_to_image_file(img, path)
504
+ tgt_path.append(path)
505
+ else:
506
+ tgt_path = osp.join(self.img_root, f"{line['index']}.jpg")
507
+ if not read_ok(tgt_path):
508
+ decode_base64_to_image_file(line['image'], tgt_path)
509
+ tgt_path = [tgt_path]
510
+ else:
511
+ assert 'image_path' in line
512
+ tgt_path = toliststr(line['image_path'])
513
+
514
+ if self.concat_num > 0 and not self.is_api:
515
+ concatenated_images = concat_images(tgt_path, max_concat=self.concat_num, column_num=self.column_num)
516
+
517
+ old_tgt_path = tgt_path
518
+ assert isinstance(old_tgt_path, list)
519
+ if self.column_num != -1:
520
+ tgt_path = [
521
+ '_'.join(old_tgt_path[0].split('_')[:-1]) + '_concat{}_{}.jpg'.format(self.concat_num, i)
522
+ for i in range(len(concatenated_images))
523
+ ]
524
+ else:
525
+ tgt_path = [
526
+ '_'.join(old_tgt_path[0].split('_')[:-1]) + '_concat_all_{}.jpg'.format(i)
527
+ for i in range(len(concatenated_images))
528
+ ]
529
+
530
+ for path, concatenated_image in zip(tgt_path, concatenated_images):
531
+ if not read_ok(path):
532
+ decode_base64_to_image_file(encode_image_to_base64(concatenated_image), path)
533
+ num_images, image_size = len(old_tgt_path), concatenated_image.size
534
+ print('concat {} images to a new one with size {}. save at {}'.format(num_images, image_size, path))
535
+ return tgt_path
536
+
537
+ @classmethod
538
+ def evaluate(self, eval_file, **judge_kwargs):
539
+ logger = get_logger('Evaluation')
540
+ model = judge_kwargs['model']
541
+
542
+ storage = get_intermediate_file_path(eval_file, f'_{model}')
543
+ tmp_file = get_intermediate_file_path(eval_file, f'_{model}', 'pkl')
544
+
545
+ if osp.exists(storage):
546
+ logger.warning(f'GPT scoring file {storage} already exists, will reuse it in MMLongBench_eval. ')
547
+ else:
548
+ data = load(eval_file)
549
+ model = build_judge(max_tokens=128, **judge_kwargs)
550
+ lt = len(data)
551
+ lines = [data.iloc[i] for i in range(lt)]
552
+ tups = [(model, line) for line in lines]
553
+ indices = [line['index'] for line in lines]
554
+
555
+ ans = {}
556
+ if osp.exists(tmp_file):
557
+ ans = load(tmp_file)
558
+ tups = [x for x, i in zip(tups, indices) if i not in ans]
559
+ indices = [i for i in indices if i not in ans]
560
+
561
+ if len(indices):
562
+ new_results = list()
563
+ for model, line in tqdm(tups):
564
+ res = MMLongBench_auxeval(model, line)
565
+ new_results.append(res)
566
+
567
+ log_map, res_map, pred_map = {}, {}, {}
568
+ all_inds = [line['index'] for line in lines]
569
+ for k, v in zip(all_inds, new_results):
570
+ log_map[k] = v['log']
571
+ res_map[k] = v['res']
572
+ pred_map[k] = v['pred']
573
+ data['res'] = [res_map[idx] for idx in data['index']]
574
+ data['log'] = [log_map[idx] for idx in data['index']]
575
+ data['pred'] = [pred_map[idx] for idx in data['index']]
576
+ dump(data, storage)
577
+
578
+ score = MMLongBench_acc(storage)
579
+ score_pth = get_intermediate_file_path(storage, '_score', 'csv')
580
+
581
+ dump(score, score_pth)
582
+ logger.info(f'MMLongBench_eval successfully finished evaluating {eval_file}, results saved in {score_pth}')
583
+ logger.info('Score: ')
584
+ logger.info(score)
VLMEvalKit-sudoku/vlmeval/dataset/moviechat1k.py ADDED
@@ -0,0 +1,264 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from huggingface_hub import snapshot_download
2
+ from ..smp import *
3
+ from ..smp.file import get_intermediate_file_path, get_file_extension
4
+ from .video_base import VideoBaseDataset
5
+ from .utils import build_judge, DEBUG_MESSAGE
6
+ from ..utils import track_progress_rich
7
+ import random
8
+ import json
9
+ import ast
10
+ from glob import glob
11
+
12
+ FAIL_MSG = 'Failed to obtain answer via API.'
13
+
14
+
15
+ class MovieChat1k(VideoBaseDataset):
16
+
17
+ MD5 = '7c0aa7e10de1cddb37af42b4abc9a2dd'
18
+
19
+ TYPE = 'Video-VQA'
20
+
21
+ def __init__(self, dataset='MovieChat1k', pack=False, nframe=0, fps=-1, subset='all', limit=1.0):
22
+ super().__init__(dataset=dataset, pack=pack, nframe=nframe, fps=fps)
23
+
24
+ if subset == 'all':
25
+ pass
26
+ elif subset == 'global':
27
+ self.data = self.data[self.data['mode'] == 'global']
28
+ elif subset == 'breakpoint':
29
+ self.data = self.data[self.data['mode'] == 'breakpoint']
30
+ else:
31
+ raise ValueError(f'Invalid subset: {subset}')
32
+
33
+ if limit <= 1.0 and limit > 0:
34
+ sample_num = int(limit * len(self.data))
35
+ self.data = self.data.iloc[:sample_num]
36
+ elif limit > 1.0 and limit < len(self.data):
37
+ self.data = self.data.iloc[:limit]
38
+ else:
39
+ raise ValueError(f'Invalid limit: {limit}')
40
+
41
+ @classmethod
42
+ def supported_datasets(cls):
43
+ return ['MovieChat1k']
44
+
45
+ def prepare_dataset(self, dataset_name='MovieChat1k', repo_id='Enxin/VLMEval-MovieChat1k'):
46
+ def check_integrity(pth):
47
+ data_file = osp.join(pth, f'{dataset_name}.tsv')
48
+ if md5(data_file) != self.MD5:
49
+ return False
50
+ data = load(data_file)
51
+ for video_pth in data['video']:
52
+ if not osp.exists(osp.join(pth, video_pth)):
53
+ return False
54
+ return True
55
+
56
+ if os.path.exists(repo_id):
57
+ dataset_path = repo_id
58
+ else:
59
+ cache_path = get_cache_path(repo_id)
60
+ if cache_path is not None and check_integrity(cache_path):
61
+ dataset_path = cache_path
62
+ else:
63
+ cache_path = snapshot_download(repo_id=repo_id, repo_type="dataset")
64
+ if not glob(osp.join(cache_path, "video")):
65
+ tar_files = glob(osp.join(cache_path, "**/*.tar*"), recursive=True)
66
+
67
+ def untar_video_data(tar_file, cache_dir):
68
+ import tarfile
69
+ with tarfile.open(tar_file, "r") as tar_ref:
70
+ tar_ref.extractall(cache_dir)
71
+ print(f"Extracted all files from {tar_file} to {cache_dir}")
72
+
73
+ def concat_tar_parts(tar_parts, output_tar):
74
+ with open(output_tar, "wb") as out_tar:
75
+ from tqdm import tqdm
76
+ for part in tqdm(sorted(tar_parts)):
77
+ with open(part, "rb") as part_file:
78
+ out_tar.write(part_file.read())
79
+ print(f"Concatenated parts {tar_parts} into {output_tar}")
80
+
81
+ tar_parts_dict = {}
82
+
83
+ # Group tar parts together
84
+ for tar_file in tar_files:
85
+ base_name = tar_file.split(".tar")[0]
86
+ if base_name not in tar_parts_dict:
87
+ tar_parts_dict[base_name] = []
88
+ tar_parts_dict[base_name].append(tar_file)
89
+
90
+ # Concatenate and untar split parts
91
+ for base_name, parts in tar_parts_dict.items():
92
+ print(f"Extracting following tar files: {parts}")
93
+ output_tar = base_name + ".tar"
94
+ if not osp.exists(output_tar):
95
+ print('Start concatenating tar files')
96
+
97
+ concat_tar_parts(parts, output_tar)
98
+ print('Finish concatenating tar files')
99
+
100
+ if not osp.exists(osp.join(cache_path, 'videos')):
101
+ untar_video_data(output_tar, cache_path)
102
+ dataset_path = cache_path
103
+ self.video_path = osp.join(dataset_path, 'videos/')
104
+ data_file = osp.join(dataset_path, f'{dataset_name}.tsv')
105
+
106
+ return dict(data_file=data_file, root=osp.join(dataset_path, 'videos'))
107
+
108
+ def build_prompt_pack(self, line):
109
+ if isinstance(line, int):
110
+ assert line < len(self)
111
+ video = self.videos[line]
112
+ elif isinstance(line, pd.Series):
113
+ video = line['video']
114
+ elif isinstance(line, str):
115
+ video = line
116
+
117
+ frames = self.save_video_frames(video)
118
+ message = []
119
+ for im in frames:
120
+ message.append(dict(type='image', value=im))
121
+
122
+ message.append(dict(type='text', value=line['question'], role='user'))
123
+ return message
124
+
125
+ def build_prompt_nopack(self, line, video_llm):
126
+ """Build prompt for a single line without packing"""
127
+ if isinstance(line, int):
128
+ assert line < len(self)
129
+ line = self.data.iloc[line]
130
+
131
+ if video_llm:
132
+ video_path = os.path.join(self.video_path, line['video'])
133
+ return [
134
+ dict(type='video', value=video_path),
135
+ dict(type='text', value=line['question'])
136
+ ]
137
+ else:
138
+ frames = self.save_video_frames(line['video'])
139
+ message = []
140
+ for im in frames:
141
+ message.append(dict(type='image', value=im))
142
+ message.append(dict(type='text', value=line['question']))
143
+ return message
144
+
145
+ def build_prompt(self, line, video_llm):
146
+ if self.pack and not video_llm:
147
+ return self.build_prompt_pack(line)
148
+ else:
149
+ return self.build_prompt_nopack(line, video_llm)
150
+
151
+ @staticmethod
152
+ def remove_side_quote(s, syms=[',', '"', "'"]):
153
+ if np.all([x in syms for x in s]):
154
+ return ''
155
+ while s[0] in syms:
156
+ s = s[1:]
157
+ while s[-1] in syms:
158
+ s = s[:-1]
159
+ return s
160
+
161
+ @staticmethod
162
+ def robust_json_load(s):
163
+ try:
164
+ jsons = list(extract_json_objects(s))
165
+ assert len(jsons) == 1
166
+ return jsons[0]
167
+ except:
168
+ if '{' in s and s.find('{') == s.rfind('{'):
169
+ sub_str = s[s.find('{') + 1:].strip()
170
+ lines = sub_str.split('\n')
171
+ res = {}
172
+ for l in lines:
173
+ l = l.strip()
174
+ if ': ' in l:
175
+ key = l.split(': ')[0].strip()
176
+ val = l.split(': ')[1].strip()
177
+ key = MovieChat1k.remove_side_quote(key)
178
+ val = MovieChat1k.remove_side_quote(val)
179
+ if len(key) and len(val):
180
+ res[key] = val
181
+ return res
182
+ return None
183
+
184
+ def load_pack_answers(self, data_raw):
185
+ vstats = defaultdict(lambda: 0)
186
+ data = defaultdict(lambda: {})
187
+
188
+ for k in data_raw:
189
+ ans = data_raw[k].strip()
190
+ if FAIL_MSG in ans:
191
+ vstats['GEN_FAIL'] += 1
192
+ continue
193
+ res = self.robust_json_load(ans)
194
+ if res is not None:
195
+ data[k] = res
196
+ vstats['PARSE_OK'] += 1
197
+ else:
198
+ vstats['PARSE_FAIL'] += 1
199
+
200
+ # return data
201
+ meta = cp.deepcopy(self.data)
202
+ lt = len(meta)
203
+ prediction = []
204
+ for i in range(lt):
205
+ line = meta.iloc[i]
206
+ vid = line['video']
207
+ idx = str(line['index'])
208
+ prediction.append(data[vid][idx] if idx in data[vid] else None)
209
+ meta['prediction'] = prediction
210
+ vstats['VALIDQ'] = len([x for x in prediction if x is not None])
211
+ vstats['INVALIDQ'] = len([x for x in prediction if x is None])
212
+ return meta, vstats
213
+
214
+ # It returns a dictionary
215
+ @classmethod
216
+ def evaluate(self, eval_file, **judge_kwargs):
217
+ from .utils.moviechat1k import get_dimension_rating, prepare_score_prompt
218
+
219
+ assert get_file_extension(eval_file) in ['xlsx', 'json', 'tsv'], 'data file should be an supported format (xlsx/json/tsv) file' # noqa: E501
220
+ judge = judge_kwargs.setdefault('model', 'chatgpt-0125')
221
+ assert judge in ['chatgpt-0125'], f'Invalid judge model for MovieChat1k: {judge}'
222
+ nproc = judge_kwargs.pop('nproc', 4)
223
+ _ = judge_kwargs.pop('verbose', None)
224
+ _ = judge_kwargs.pop('retry', None)
225
+
226
+ tmp_file = get_intermediate_file_path(eval_file, f'_{judge}_tmp', 'pkl')
227
+ tgt_file = get_intermediate_file_path(eval_file, f'_{judge}_rating', 'json')
228
+ score_file = get_intermediate_file_path(eval_file, f'_{judge}_score')
229
+
230
+ model = build_judge(**judge_kwargs)
231
+
232
+ if not osp.exists(score_file):
233
+ res = {} if not osp.exists(tmp_file) else load(tmp_file)
234
+ res = {k: v for k, v in res.items() if model.fail_msg not in v}
235
+
236
+ data = load(eval_file)
237
+ data_un = data[~data['index'].isin(res)]
238
+ data_un = data_un[~pd.isna(data_un['prediction'])]
239
+ lt = len(data_un)
240
+ prompts = [prepare_score_prompt(data_un.iloc[i]) for i in range(lt)]
241
+ indices = [data_un.iloc[i]['index'] for i in range(lt)]
242
+ if len(prompts):
243
+ _ = track_progress_rich(
244
+ model.generate,
245
+ prompts,
246
+ keys=indices,
247
+ save=tmp_file,
248
+ nproc=nproc,
249
+ chunksize=nproc
250
+ )
251
+ score_map = load(tmp_file)
252
+ data['score'] = [score_map[idx] if idx in score_map else -1 for idx in data['index']]
253
+ rejected = [x for x in score_map.values() if FAIL_MSG in x]
254
+ print(
255
+ f'Among {len(data)} questions, failed to obtain prediction for {len(data) - len(score_map)} questions, '
256
+ f'failed to obtain the score for another {len(rejected)} questions. '
257
+ f'Those questions will be counted as 0 score in ALL rating, and will not be counted in VALID rating.'
258
+ )
259
+
260
+ dump(data, score_file)
261
+
262
+ rating = get_dimension_rating(score_file)
263
+ dump(rating, tgt_file)
264
+ return rating
VLMEvalKit-sudoku/vlmeval/dataset/mvbench.py ADDED
@@ -0,0 +1,675 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import huggingface_hub
2
+ from huggingface_hub import snapshot_download
3
+ from ..smp import *
4
+ from .video_base import VideoBaseDataset
5
+ from .utils import build_judge, DEBUG_MESSAGE
6
+ from ..utils import track_progress_rich
7
+ import torchvision.transforms as T
8
+ from torchvision import transforms
9
+ from torchvision.transforms.functional import InterpolationMode
10
+ import imageio
11
+ import cv2
12
+ import zipfile
13
+ import os
14
+ import glob
15
+ from .utils.mvbench import *
16
+
17
+ FAIL_MSG = 'Failed to obtain answer via API.'
18
+
19
+
20
+ class MVBench(VideoBaseDataset):
21
+
22
+ MD5 = 'fd21d36522cdedd46d84dc46715ad832'
23
+ SYS = """Carefully watch the video and pay attention to the cause and sequence of events, \
24
+ the detail and movement of objects, and the action and pose of persons. \
25
+ Based on your observations, select the best option that accurately addresses the question.
26
+ """
27
+
28
+ TYPE = 'Video-MCQ'
29
+
30
+ def __init__(self, dataset='MVBench', nframe=0, fps=-1):
31
+ self.type_data_list = {
32
+ 'Action Sequence': ('action_sequence.json',
33
+ 'your_data_path/star/Charades_v1_480/', 'video', True), # has start & end
34
+ 'Action Prediction': ('action_prediction.json',
35
+ 'your_data_path/star/Charades_v1_480/', 'video', True), # has start & end
36
+ 'Action Antonym': ('action_antonym.json',
37
+ 'your_data_path/ssv2_video/', 'video', False),
38
+ 'Fine-grained Action': ('fine_grained_action.json',
39
+ 'your_data_path/Moments_in_Time_Raw/videos/', 'video', False),
40
+ 'Unexpected Action': ('unexpected_action.json',
41
+ 'your_data_path/FunQA_test/test/', 'video', False),
42
+ 'Object Existence': ('object_existence.json',
43
+ 'your_data_path/clevrer/video_validation/', 'video', False),
44
+ 'Object Interaction': ('object_interaction.json',
45
+ 'your_data_path/star/Charades_v1_480/', 'video', True), # has start & end
46
+ 'Object Shuffle': ('object_shuffle.json',
47
+ 'your_data_path/perception/videos/', 'video', False),
48
+ 'Moving Direction': ('moving_direction.json',
49
+ 'your_data_path/clevrer/video_validation/', 'video', False),
50
+ 'Action Localization': ('action_localization.json',
51
+ 'your_data_path/sta/sta_video/', 'video', True), # has start & end
52
+ 'Scene Transition': ('scene_transition.json',
53
+ 'your_data_path/scene_qa/video/', 'video', False),
54
+ 'Action Count': ('action_count.json',
55
+ 'your_data_path/perception/videos/', 'video', False),
56
+ 'Moving Count': ('moving_count.json',
57
+ 'your_data_path/clevrer/video_validation/', 'video', False),
58
+ 'Moving Attribute': ('moving_attribute.json',
59
+ 'your_data_path/clevrer/video_validation/', 'video', False),
60
+ 'State Change': ('state_change.json',
61
+ 'your_data_path/perception/videos/', 'video', False),
62
+ 'Fine-grained Pose': ('fine_grained_pose.json',
63
+ 'your_data_path/nturgbd/', 'video', False),
64
+ 'Character Order': ('character_order.json',
65
+ 'your_data_path/perception/videos/', 'video', False),
66
+ 'Egocentric Navigation': ('egocentric_navigation.json',
67
+ 'your_data_path/vlnqa/', 'video', False),
68
+ 'Episodic Reasoning': ('episodic_reasoning.json',
69
+ 'your_data_path/tvqa/frames_fps3_hq/', 'frame', True), # has start & end, read frame
70
+ 'Counterfactual Inference': ('counterfactual_inference.json',
71
+ 'your_data_path/clevrer/video_validation/', 'video', False),
72
+ }
73
+ super().__init__(dataset=dataset, nframe=nframe, fps=fps)
74
+
75
+ @classmethod
76
+ def supported_datasets(cls):
77
+ return ['MVBench']
78
+
79
+ def prepare_dataset(self, dataset_name='MVBench', repo_id='OpenGVLab/MVBench'):
80
+ def check_integrity(pth):
81
+ data_file = osp.join(pth, f'{dataset_name}.tsv')
82
+
83
+ if not os.path.exists(data_file):
84
+ return False
85
+
86
+ if md5(data_file) != self.MD5:
87
+ return False
88
+
89
+ data = load(data_file)
90
+ for idx, item in data.iterrows():
91
+ if not osp.exists(osp.join(pth, item['prefix'], item['video'])):
92
+ return False
93
+ return True
94
+
95
+ if modelscope_flag_set():
96
+ repo_id = 'modelscope/MVBench'
97
+
98
+ cache_path = get_cache_path(repo_id, branch='main')
99
+ if cache_path is not None and check_integrity(cache_path):
100
+ dataset_path = cache_path
101
+ else:
102
+ def unzip_hf_zip(pth):
103
+ pth = os.path.join(pth, 'video/')
104
+ for filename in os.listdir(pth):
105
+ if filename.endswith('.zip'):
106
+ # 构建完整的文件路径
107
+ zip_path = os.path.join(pth, filename)
108
+
109
+ # 解压 ZIP 文件
110
+ with zipfile.ZipFile(zip_path, 'r') as zip_ref:
111
+ zip_ref.extractall(pth)
112
+
113
+ def generate_tsv(pth):
114
+ data_file = osp.join(pth, f'{dataset_name}.tsv')
115
+ if os.path.exists(data_file) and md5(data_file) == self.MD5:
116
+ return
117
+ json_data_dir = os.path.join(pth, 'json')
118
+ self.data_list = []
119
+ for k, v in self.type_data_list.items():
120
+ with open(os.path.join(json_data_dir, v[0]), 'r') as f:
121
+ json_data = json.load(f)
122
+ for data in json_data:
123
+ if os.path.exists(os.path.join(pth, v[1].replace('your_data_path', 'video'), data['video'])):
124
+ self.data_list.append({
125
+ 'task_type': k,
126
+ 'prefix': v[1].replace('your_data_path', 'video'),
127
+ 'data_type': v[2],
128
+ 'bound': v[3],
129
+ 'start': data['start'] if 'start' in data.keys() else None,
130
+ 'end': data['end'] if 'end' in data.keys() else None,
131
+ 'video': data['video'],
132
+ 'question': data['question'],
133
+ 'answer': data['answer'],
134
+ 'candidates': data['candidates']
135
+ })
136
+ else:
137
+ print(
138
+ 'NTURGB-D zip file is removed according to MVBench, you can view it at '
139
+ 'https://huggingface.co/datasets/OpenGVLab/MVBench for detailed reason.'
140
+ )
141
+ raise Exception(
142
+ f"{os.path.join(v[1].replace('your_data_path', 'video'), data['video'])} does not exist"
143
+ )
144
+
145
+ data_df = pd.DataFrame(self.data_list)
146
+ data_df = data_df.assign(index=range(len(data_df)))
147
+ data_df.to_csv(data_file, sep='\t', index=False)
148
+
149
+ def move_files(pth):
150
+ src_folder = os.path.join(pth, 'video/data0613')
151
+ if not os.path.exists(src_folder):
152
+ return
153
+ for subdir in os.listdir(src_folder):
154
+ subdir_path = os.path.join(src_folder, subdir)
155
+ if os.path.isdir(subdir_path):
156
+ for subsubdir in os.listdir(subdir_path):
157
+ subsubdir_path = os.path.join(subdir_path, subsubdir)
158
+ if os.path.isdir(subsubdir_path):
159
+ for item in os.listdir(subsubdir_path):
160
+ item_path = os.path.join(subsubdir_path, item)
161
+ target_folder = os.path.join(pth, 'video', subdir, subsubdir)
162
+ if not os.path.exists(target_folder):
163
+ os.makedirs(target_folder)
164
+ target_path = os.path.join(target_folder, item)
165
+ try:
166
+ shutil.move(item_path, target_path)
167
+ except Exception as e:
168
+ print(f"Error moving {item_path} to {target_path}: {e}")
169
+
170
+ if modelscope_flag_set():
171
+ from modelscope import dataset_snapshot_download
172
+ dataset_path = dataset_snapshot_download(dataset_id=repo_id, revision='master')
173
+ else:
174
+ hf_token = os.environ.get('HUGGINGFACE_TOKEN')
175
+ huggingface_hub.login(hf_token)
176
+ dataset_path = snapshot_download(repo_id=repo_id, repo_type='dataset')
177
+ unzip_hf_zip(dataset_path)
178
+ move_files(dataset_path)
179
+ generate_tsv(dataset_path)
180
+
181
+ data_file = osp.join(dataset_path, f'{dataset_name}.tsv')
182
+
183
+ self.decord_method = {
184
+ 'video': self.read_video,
185
+ 'gif': self.read_gif,
186
+ 'frame': self.read_frame,
187
+ }
188
+
189
+ self.nframe = 8
190
+ self.frame_fps = 3
191
+
192
+ # transform
193
+ self.transform = T.Compose([
194
+ Stack(),
195
+ ToTorchFormatTensor()
196
+ ])
197
+
198
+ return dict(root=dataset_path, data_file=data_file)
199
+
200
+ def get_index(self, bound, fps, max_frame, first_idx=0):
201
+ if bound:
202
+ start, end = bound[0], bound[1]
203
+ else:
204
+ start, end = -100000, 100000
205
+ start_idx = max(first_idx, round(start * fps))
206
+ end_idx = min(round(end * fps), max_frame)
207
+ seg_size = float(end_idx - start_idx) / self.num_segments
208
+ frame_indices = np.array([
209
+ int(start_idx + (seg_size / 2) + np.round(seg_size * idx))
210
+ for idx in range(self.num_segments)
211
+ ])
212
+ return frame_indices
213
+
214
+ def read_video(self, video_path, bound=None):
215
+ from decord import VideoReader, cpu
216
+ vr = VideoReader(video_path, ctx=cpu(0), num_threads=1)
217
+ max_frame = len(vr) - 1
218
+ fps = float(vr.get_avg_fps())
219
+
220
+ images_group = list()
221
+ frame_indices = self.get_index(bound, fps, max_frame, first_idx=0)
222
+ for frame_index in frame_indices:
223
+ img = Image.fromarray(vr[frame_index].asnumpy())
224
+ images_group.append(img)
225
+ torch_imgs = self.transform(images_group)
226
+ return torch_imgs
227
+
228
+ def read_gif(self, video_path, bound=None, fps=25):
229
+ gif = imageio.get_reader(video_path)
230
+ max_frame = len(gif) - 1
231
+
232
+ images_group = list()
233
+ frame_indices = self.get_index(bound, fps, max_frame, first_idx=0)
234
+ for index, frame in enumerate(gif):
235
+ if index in frame_indices:
236
+ img = cv2.cvtColor(frame, cv2.COLOR_RGBA2RGB)
237
+ img = Image.fromarray(img)
238
+ images_group.append(img)
239
+ torch_imgs = self.transform(images_group)
240
+ return torch_imgs
241
+
242
+ def read_frame(self, video_path, bound=None, fps=3):
243
+ max_frame = len(os.listdir(video_path))
244
+ images_group = list()
245
+ frame_indices = self.get_index(bound, fps, max_frame, first_idx=1) # frame_idx starts from 1
246
+ for frame_index in frame_indices:
247
+ img = Image.open(os.path.join(video_path, f'{frame_index:05d}.jpg'))
248
+ images_group.append(img)
249
+ torch_imgs = self.transform(images_group)
250
+ return torch_imgs
251
+
252
+ def save_video_frames(self, imgs, video_name, frames):
253
+
254
+ frame_paths = self.frame_paths(video_name)
255
+ flag = np.all([osp.exists(p) for p in frame_paths])
256
+
257
+ if not flag:
258
+ # 建议锁文件以 video_name 命名
259
+ lock_path = osp.join(self.frame_root, f'{video_name}.lock')
260
+ with portalocker.Lock(lock_path, 'w', timeout=30):
261
+ # 锁内再判断一次,防止重复写
262
+ if not np.all([osp.exists(p) for p in frame_paths]):
263
+ block_size = imgs.size(0) // frames
264
+ split_tensors = torch.split(imgs, block_size)
265
+ to_pil = transforms.ToPILImage()
266
+ images = [to_pil(arr) for arr in split_tensors]
267
+ for im, pth in zip(images, frame_paths):
268
+ if not osp.exists(pth):
269
+ im.save(pth)
270
+
271
+ return frame_paths
272
+
273
+ def qa_template(self, data):
274
+ question = f"Question: {data['question']}\n"
275
+ question += 'Options:\n'
276
+ answer = data['answer']
277
+ answer_idx = -1
278
+ for idx, c in enumerate(eval(data['candidates'])):
279
+ question += f"({chr(ord('A') + idx)}) {c}\n"
280
+ if c == answer:
281
+ answer_idx = idx
282
+ question = question.rstrip()
283
+ answer = f"({chr(ord('A') + answer_idx)}) {answer}"
284
+ return question, answer
285
+
286
+ def load_into_video_and_process(self, line):
287
+ try:
288
+ from moviepy.editor import VideoFileClip, ImageSequenceClip
289
+ except:
290
+ raise ImportError(
291
+ 'MoviePy is not installed, please install it by running "pip install moviepy==1.0.3"'
292
+ )
293
+ video_path = os.path.join(self.data_root, line['prefix'], line['video'])
294
+
295
+ if line['data_type'] in ['gif'] or os.path.splitext(video_path)[1] in ['.webm']:
296
+ processed_video_path = video_path.replace(os.path.splitext(video_path)[1], '.mp4')
297
+ if not os.path.exists(processed_video_path):
298
+ # using MoviePy to transform GIF, webm into mp4 format
299
+ gif_clip = VideoFileClip(video_path)
300
+ gif_clip.write_videofile(processed_video_path, codec='libx264')
301
+ gif_clip.close()
302
+ elif line['data_type'] in ['frame']:
303
+ input_images = os.path.join(video_path, '*.jpg')
304
+ processed_video_path = f'{video_path}.mp4'
305
+ if not os.path.exists(processed_video_path):
306
+ # using MoviePy to transform images into mp4
307
+ image_files = sorted(glob.glob(input_images))
308
+ image_clip = ImageSequenceClip(image_files, fps=self.frame_fps)
309
+ image_clip.write_videofile(processed_video_path, codec='libx264')
310
+ image_clip.close()
311
+ else:
312
+ processed_video_path = video_path
313
+
314
+ if line['bound']:
315
+ base_name, suffix = os.path.splitext(processed_video_path)
316
+ output_video_path = f'{base_name}_processed{suffix}'
317
+ if not os.path.exists(output_video_path):
318
+ video_clip = VideoFileClip(processed_video_path)
319
+ clip = video_clip.subclip(line['start'], min(line['end'], video_clip.duration))
320
+ clip.write_videofile(output_video_path)
321
+ clip.close()
322
+ else:
323
+ output_video_path = processed_video_path
324
+
325
+ return output_video_path
326
+
327
+ def save_video_into_images(self, line):
328
+ bound = None
329
+ if line['bound']:
330
+ bound = (
331
+ line['start'],
332
+ line['end'],
333
+ )
334
+ video_path = os.path.join(self.data_root, line['prefix'], line['video'])
335
+ decord_method = self.decord_method[line['data_type']]
336
+ self.num_segments = self.nframe
337
+ torch_imgs = decord_method(video_path, bound)
338
+ img_frame_paths = self.save_video_frames(torch_imgs, line['video'], self.num_segments)
339
+ return img_frame_paths
340
+
341
+ def build_prompt(self, line, video_llm):
342
+ if self.fps > 0:
343
+ raise ValueError('MVBench does not support fps setting, please transfer to MVBench_MP4!')
344
+ if isinstance(line, int):
345
+ assert line < len(self)
346
+ line = self.data.iloc[line]
347
+
348
+ question, answer = self.qa_template(line)
349
+ message = [dict(type='text', value=self.SYS, role='system')]
350
+ if video_llm:
351
+ new_video_path = self.load_into_video_and_process(line)
352
+ message.append(dict(type='video', value=new_video_path))
353
+ else:
354
+ img_frame_paths = self.save_video_into_images(line)
355
+ for im in img_frame_paths:
356
+ message.append(dict(type='image', value=im))
357
+ message.append(dict(type='text', value=question))
358
+ message.append(dict(type='text', value='\nOnly give the best option.'))
359
+ message.append(dict(type='text', value='Best option:(', role='assistant'))
360
+ return message
361
+
362
+ @classmethod
363
+ def evaluate(self, eval_file, **judge_kwargs):
364
+
365
+ assert get_file_extension(eval_file) in ['xlsx', 'json', 'tsv'], 'data file should be an supported format (xlsx/json/tsv) file' # noqa: E501
366
+
367
+ tmp_file = get_intermediate_file_path(eval_file, '_tmp', 'pkl')
368
+ tgt_file = get_intermediate_file_path(eval_file, '_rating', 'json')
369
+ score_file = get_intermediate_file_path(eval_file, '_score')
370
+
371
+ if not osp.exists(score_file):
372
+ model = judge_kwargs.setdefault('model', 'chatgpt-0125')
373
+ assert model in ['chatgpt-0125', 'exact_matching', 'gpt-4-0125']
374
+
375
+ if model == 'exact_matching':
376
+ model = None
377
+ elif gpt_key_set():
378
+ model = build_judge(**judge_kwargs)
379
+ if not model.working():
380
+ warnings.warn('OPENAI API is not working properly, will use exact matching for evaluation')
381
+ warnings.warn(DEBUG_MESSAGE)
382
+ model = None
383
+ else:
384
+ warnings.warn('OPENAI_API_KEY is not set properly, will use exact matching for evaluation')
385
+ model = None
386
+ res = {} if not osp.exists(tmp_file) else load(tmp_file)
387
+ res = {k: v for k, v in res.items() if FAIL_MSG not in v}
388
+
389
+ data = load(eval_file)
390
+ data_un = data[~pd.isna(data['prediction'])]
391
+
392
+ for idx in data_un['index']:
393
+ ans = data.loc[data['index'] == idx, 'answer'].values[0]
394
+ pred = data.loc[data['index'] == idx, 'prediction'].values[0]
395
+ options = eval(data.loc[data['index'] == idx, 'candidates'].values[0])
396
+ answer_idx = -1
397
+ for id, c in enumerate(options):
398
+ if c == ans:
399
+ answer_idx = id
400
+ ans = f"({chr(ord('A') + answer_idx)}) {ans}"
401
+ input_item = data.loc[data['index'] == idx].to_dict(orient='records')[0]
402
+ for id, option_content in enumerate(eval(input_item['candidates'])):
403
+ input_item[chr(ord('A') + id)] = option_content
404
+ if option_content == input_item['answer']:
405
+ input_item['answer'] = chr(ord('A') + id)
406
+
407
+ if FAIL_MSG in pred:
408
+ data.loc[idx, 'score'] = -1
409
+ else:
410
+ data.loc[idx, 'score'] = int(check_ans_with_model(
411
+ pred, ans, model,
412
+ input_item,
413
+ 'MVBench'
414
+ ))
415
+
416
+ rejected = [x for x in data['score'] if x == -1]
417
+
418
+ print(
419
+ f'Among {len(data)} questions, failed to obtain prediction for {len(data) - len(data_un)} questions, '
420
+ f'failed to obtain the score for another {len(rejected)} questions. '
421
+ f'Those questions will be counted as -1 score in ALL rating, and will not be counted in VALID rating.'
422
+ )
423
+
424
+ dump(data, score_file)
425
+
426
+ rating = get_dimension_rating(score_file)
427
+ dump(rating, tgt_file)
428
+ return rating
429
+
430
+
431
+ class MVBench_MP4(VideoBaseDataset):
432
+
433
+ MP4_MD5 = '5c8c6f8b7972c2de65a629590f7c42f5'
434
+ SYS = """Carefully watch the video and pay attention to the cause and sequence of events, \
435
+ the detail and movement of objects, and the action and pose of persons. \
436
+ Based on your observations, select the best option that accurately addresses the question.
437
+ """
438
+ TYPE = 'Video-MCQ'
439
+
440
+ def __init__(self, dataset='MVBench_MP4', nframe=0, fps=-1):
441
+ super().__init__(dataset=dataset, nframe=nframe, fps=fps)
442
+
443
+ @classmethod
444
+ def supported_datasets(cls):
445
+ return ['MVBench_MP4']
446
+
447
+ def prepare_dataset(self, dataset_name='MVBench_MP4', repo_id='OpenGVLab/MVBench'):
448
+ def check_integrity(pth):
449
+ data_file = osp.join(pth, f'{dataset_name}.tsv')
450
+
451
+ if not os.path.exists(data_file):
452
+ return False
453
+
454
+ if md5(data_file) != self.MP4_MD5:
455
+ return False
456
+
457
+ data = load(data_file)
458
+ for idx, item in data.iterrows():
459
+ if not osp.exists(osp.join(pth, item['prefix'], item['video'])):
460
+ return False
461
+ return True
462
+
463
+ if modelscope_flag_set():
464
+ repo_id = 'modelscope/MVBench'
465
+
466
+ cache_path = get_cache_path(repo_id, branch='video')
467
+ if cache_path is not None and check_integrity(cache_path):
468
+ dataset_path = cache_path
469
+ else:
470
+ def generate_tsv(pth):
471
+ data_file = osp.join(pth, f'{dataset_name}.tsv')
472
+ if os.path.exists(data_file) and md5(data_file) == self.MP4_MD5:
473
+ return
474
+ json_data_path = os.path.join(dataset_path, 'test.json')
475
+ json_data = load(json_data_path)
476
+ root_data_dict = json_data['root']
477
+ self.data_list = []
478
+ for k, v in json_data['meta'].items():
479
+ for item in v:
480
+ self.data_list.append({
481
+ 'task_type': k,
482
+ 'prefix': root_data_dict[k],
483
+ 'video': item['video'],
484
+ 'question': item['question'],
485
+ 'answer': item['answer'],
486
+ 'candidates': item['candidates']
487
+ })
488
+ data_df = pd.DataFrame(self.data_list)
489
+ data_df = data_df.assign(index=range(len(data_df)))
490
+ data_df.to_csv(data_file, sep='\t', index=False)
491
+
492
+ if modelscope_flag_set():
493
+ from modelscope import dataset_snapshot_download
494
+ dataset_path = dataset_snapshot_download(dataset_id=repo_id, revision='video')
495
+ else:
496
+ hf_token = os.environ.get('HUGGINGFACE_TOKEN')
497
+ huggingface_hub.login(hf_token)
498
+ dataset_path = snapshot_download(repo_id=repo_id, repo_type='dataset', revision='video')
499
+ generate_tsv(dataset_path)
500
+
501
+ data_file = osp.join(dataset_path, f'{dataset_name}.tsv')
502
+
503
+ # transform
504
+ self.transform = T.Compose([
505
+ Stack(),
506
+ ToTorchFormatTensor()
507
+ ])
508
+
509
+ return dict(root=dataset_path, data_file=data_file)
510
+
511
+ def qa_template(self, data):
512
+ question = f"Question: {data['question']}\n"
513
+ question += 'Options:\n'
514
+ answer = data['answer']
515
+ answer_idx = -1
516
+ for idx, c in enumerate(eval(data['candidates'])):
517
+ question += f"({chr(ord('A') + idx)}) {c}\n"
518
+ if c == answer:
519
+ answer_idx = idx
520
+ question = question.rstrip()
521
+ answer = f"({chr(ord('A') + answer_idx)}) {answer}"
522
+ return question, answer
523
+
524
+ def get_index_by_frame(self, max_frame):
525
+ seg_size = float(max_frame) / self.num_segments
526
+ frame_indices = np.array([
527
+ int((seg_size / 2) + np.round(seg_size * idx))
528
+ for idx in range(self.num_segments)
529
+ ])
530
+ return frame_indices
531
+
532
+ def get_index_by_fps(self, vid, fps):
533
+ total_frames = len(vid)
534
+ video_fps = vid.get_avg_fps()
535
+ total_duration = total_frames / video_fps
536
+ required_frames = int(total_duration * fps)
537
+ step_size = video_fps / fps
538
+ frame_indices = np.array([int(i * step_size) for i in range(required_frames)])
539
+ self.num_segments = len(frame_indices)
540
+ return frame_indices
541
+
542
+ def read_video(self, video_path):
543
+ from decord import VideoReader, cpu
544
+ vr = VideoReader(video_path, ctx=cpu(0), num_threads=1)
545
+ max_frame = len(vr) - 1
546
+
547
+ images_group = list()
548
+ if self.fps < 0:
549
+ frame_indices = self.get_index_by_frame(max_frame)
550
+ else:
551
+ frame_indices = self.get_index_by_fps(vr, self.fps)
552
+
553
+ for frame_index in frame_indices:
554
+ img = Image.fromarray(vr[frame_index].asnumpy())
555
+ images_group.append(img)
556
+ torch_imgs = self.transform(images_group)
557
+ return torch_imgs
558
+
559
+ def save_video_frames(self, imgs, video_name, frames):
560
+ if self.fps > 0:
561
+ frame_paths = self.frame_paths_fps(video_name, frames)
562
+ else:
563
+ frame_paths = self.frame_paths(video_name)
564
+ flag = np.all([osp.exists(p) for p in frame_paths])
565
+
566
+ if not flag:
567
+ lock_path = osp.join(self.frame_root, f'{video_name}.lock')
568
+ with portalocker.Lock(lock_path, 'w', timeout=30):
569
+ if not np.all([osp.exists(p) for p in frame_paths]):
570
+ block_size = imgs.size(0) // frames
571
+ split_tensors = torch.split(imgs, block_size)
572
+ to_pil = transforms.ToPILImage()
573
+ images = [to_pil(arr) for arr in split_tensors]
574
+ for im, pth in zip(images, frame_paths):
575
+ if not osp.exists(pth):
576
+ im.save(pth)
577
+
578
+ return frame_paths
579
+
580
+ def save_video_into_images(self, line):
581
+ video_path = os.path.join(self.data_root, line['prefix'], line['video'])
582
+ if self.fps <= 0:
583
+ self.num_segments = self.nframe
584
+ else:
585
+ self.num_segments = 0
586
+ torch_imgs = self.read_video(video_path)
587
+ img_frame_paths = self.save_video_frames(torch_imgs, line['video'], self.num_segments)
588
+ return img_frame_paths
589
+
590
+ def build_prompt(self, line, video_llm):
591
+ if isinstance(line, int):
592
+ assert line < len(self)
593
+ line = self.data.iloc[line]
594
+
595
+ question, answer = self.qa_template(line)
596
+ message = [dict(type='text', value=self.SYS, role='system')]
597
+ video_path = os.path.join(self.data_root, line['prefix'], line['video'])
598
+ if video_llm:
599
+ message.append(dict(type='video', value=video_path))
600
+ else:
601
+ img_frame_paths = self.save_video_into_images(line)
602
+ for im in img_frame_paths:
603
+ message.append(dict(type='image', value=im))
604
+ message.append(dict(type='text', value=question))
605
+ message.append(dict(type='text', value='\nOnly give the best option.'))
606
+ message.append(dict(type='text', value='Best option:(', role='assistant'))
607
+ return message
608
+
609
+ @classmethod
610
+ def evaluate(self, eval_file, **judge_kwargs):
611
+
612
+ assert get_file_extension(eval_file) in ['xlsx', 'json', 'tsv'], 'data file should be an supported format (xlsx/json/tsv) file' # noqa: E501
613
+
614
+ tmp_file = get_intermediate_file_path(eval_file, '_tmp', 'pkl')
615
+ tgt_file = get_intermediate_file_path(eval_file, '_rating', 'json')
616
+ score_file = get_intermediate_file_path(eval_file, '_score')
617
+
618
+ if not osp.exists(score_file):
619
+ model = judge_kwargs.setdefault('model', 'chatgpt-0125')
620
+ assert model in ['chatgpt-0125', 'exact_matching', 'gpt-4-0125']
621
+
622
+ if model == 'exact_matching':
623
+ model = None
624
+ elif gpt_key_set():
625
+ model = build_judge(**judge_kwargs)
626
+ if not model.working():
627
+ warnings.warn('OPENAI API is not working properly, will use exact matching for evaluation')
628
+ warnings.warn(DEBUG_MESSAGE)
629
+ model = None
630
+ else:
631
+ warnings.warn('OPENAI_API_KEY is not set properly, will use exact matching for evaluation')
632
+ model = None
633
+ res = {} if not osp.exists(tmp_file) else load(tmp_file)
634
+ res = {k: v for k, v in res.items() if FAIL_MSG not in v}
635
+
636
+ data = load(eval_file)
637
+ data_un = data[~pd.isna(data['prediction'])]
638
+
639
+ for idx in data_un['index']:
640
+ ans = data.loc[data['index'] == idx, 'answer'].values[0]
641
+ pred = data.loc[data['index'] == idx, 'prediction'].values[0]
642
+ options = eval(data.loc[data['index'] == idx, 'candidates'].values[0])
643
+ answer_idx = -1
644
+ for id, c in enumerate(options):
645
+ if c == ans:
646
+ answer_idx = id
647
+ ans = f"({chr(ord('A') + answer_idx)}) {ans}"
648
+ input_item = data.loc[data['index'] == idx].to_dict(orient='records')[0]
649
+ for id, option_content in enumerate(eval(input_item['candidates'])):
650
+ input_item[chr(ord('A') + id)] = option_content
651
+ if option_content == input_item['answer']:
652
+ input_item['answer'] = chr(ord('A') + id)
653
+
654
+ if FAIL_MSG in pred:
655
+ data.loc[idx, 'score'] = -1
656
+ else:
657
+ data.loc[idx, 'score'] = int(check_ans_with_model(
658
+ pred, ans, model,
659
+ input_item,
660
+ 'MVBench_MP4'
661
+ ))
662
+
663
+ rejected = [x for x in data['score'] if x == -1]
664
+
665
+ print(
666
+ f'Among {len(data)} questions, failed to obtain prediction for {len(data) - len(data_un)} questions, '
667
+ f'failed to obtain the score for another {len(rejected)} questions. '
668
+ f'Those questions will be counted as -1 score in ALL rating, and will not be counted in VALID rating.'
669
+ )
670
+
671
+ dump(data, score_file)
672
+
673
+ rating = get_dimension_rating(score_file)
674
+ dump(rating, tgt_file)
675
+ return rating
VLMEvalKit-sudoku/vlmeval/dataset/sfebench.py ADDED
@@ -0,0 +1,223 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import string
2
+ from vlmeval import *
3
+ from ..smp import *
4
+ from ..smp.file import get_intermediate_file_path
5
+ from .image_vqa import ImageVQADataset
6
+ from .utils.judge_util import build_judge
7
+ from ..utils import track_progress_rich
8
+
9
+ EVAL_TEMPLATE = """
10
+ You are a strict evaluator assessing answer correctness. You must score the model's prediction on a scale from 0 to 9.
11
+ 0 represents an entirely incorrect answer and 9 indicates a highly correct answer.
12
+
13
+ # Input
14
+ Question
15
+ {question}
16
+ Ground Truth Answer
17
+ {answer}
18
+ Model Prediction
19
+ {prediction}
20
+
21
+ # Evaluation Rules
22
+ - The model prediction may contain the reasoning process, you should spot the final answer
23
+ from it.
24
+ - For multiple-choice questions: Assign a higher score if the predicted answer matches the
25
+ ground truth, either by option letters or content. Include partial credit for answers that are
26
+ close in content.
27
+ - For exact match and open-ended questions:
28
+ - Assign a high score if the prediction matches the answer semantically, considering variations in format.
29
+ - Deduct points for partially correct answers or those with incorrect additional information.
30
+ - Ignore minor differences in formatting, capitalization, or spacing since the model may explain in a different way.
31
+ - Treat numerical answers as correct if they match within reasonable precision
32
+ - For questions requiring units, both value and unit must be correct
33
+
34
+ # Scoring Guide
35
+ Provide a single integer from 0 to 9 to reflect your judgment of the answer's correctness.
36
+ # Strict Output format example
37
+ 4
38
+ """
39
+
40
+
41
+ def report_score(df):
42
+ # assert group in [None, 'category']
43
+ res = defaultdict(list)
44
+
45
+ if 'split' in df:
46
+ splits = list(set(df['split']))
47
+ res['split'] = splits
48
+ else:
49
+ df['split'] = ['none'] * len(df)
50
+ res['split'] = ['none']
51
+
52
+ for group in [None, 'category']:
53
+ if group is None:
54
+ res['Overall'] = [np.mean(df[df['split'] == sp]['score']) / 9 * 100 for sp in res['split']]
55
+ elif group not in df:
56
+ continue
57
+ else:
58
+ abilities = list(set(df[group]))
59
+ abilities.sort()
60
+ for ab in abilities:
61
+ sub_df = df[df[group] == ab]
62
+ res[ab] = [np.mean(sub_df[sub_df['split'] == sp]['score']) / 9 * 100 for sp in res['split']]
63
+ return pd.DataFrame(res)
64
+
65
+
66
+ def make_prompt(line):
67
+ question = line['question']
68
+ answer = line['answer']
69
+ tmpl = EVAL_TEMPLATE
70
+ prompt = tmpl.format(
71
+ question=question,
72
+ answer=answer,
73
+ prediction=line['prediction']
74
+ )
75
+ return prompt
76
+
77
+
78
+ def SFE_auxeval(model, data):
79
+ if isinstance(data, pd.DataFrame) and len(data) > 1:
80
+ lt = len(data)
81
+ for i in range(lt):
82
+ total_score = 0
83
+ item = data.iloc[i]
84
+ prompt = make_prompt(item)
85
+ retry = 3
86
+ for j in range(retry):
87
+ output = model.generate(prompt, temperature=0.5 * j)
88
+ if output.isdigit() and 0 <= int(output) <= 9:
89
+ total_score += int(output)
90
+ break
91
+ avg_score = total_score / lt
92
+ return dict(score=avg_score, log='Success to Judge')
93
+ else:
94
+ item = data.iloc[0] if isinstance(data, pd.DataFrame) else data
95
+ prompt = make_prompt(item)
96
+ retry = 3
97
+ for i in range(retry):
98
+ output = model.generate(prompt, temperature=0.5 * i)
99
+ if output.isdigit() and 0 <= int(output) <= 9:
100
+ return dict(score=int(output), log='Success to Judge')
101
+ return dict(score=0, log='Fail to Judge')
102
+
103
+
104
+ class SFE(ImageVQADataset):
105
+
106
+ DATASET_URL = {
107
+ 'SFE': 'https://opencompass.openxlab.space/utils/VLMEval/SFE.tsv',
108
+ 'SFE-zh': 'https://opencompass.openxlab.space/utils/VLMEval/SFE-zh.tsv'
109
+ }
110
+
111
+ DATASET_MD5 = {
112
+ 'SFE': 'd4601425e7c9a62446b63a1faee17da5',
113
+ 'SFE-zh': '3e0250b7f30da55bf8f7b95eace66d82'
114
+ }
115
+
116
+ MCQ_PROMPT = (
117
+ "You are an expert in {discipline} and need to solve the following question. "
118
+ + "The question is a multiple-choice question. "
119
+ + "Answer with the option letter from the given choices."
120
+ )
121
+
122
+ EXACT_MATCH_PROMPT = (
123
+ "You are an expert in {discipline} and need to solve the following question. "
124
+ + "The question is an exact match question. Answer the question using a single word or phrase."
125
+ )
126
+
127
+ OPEN_QUESTION_PROMPT = (
128
+ "You are an expert in {discipline} and need to solve the following question. "
129
+ + "The question is an open-ended question. Answer the question using a phrase."
130
+ )
131
+
132
+ def build_prompt(self, line):
133
+ if isinstance(line, int):
134
+ line = self.data.iloc[line]
135
+ tgt_path = self.dump_image(line)
136
+
137
+ question_type = line['question_type']
138
+ field = line['category']
139
+ question = line['question']
140
+
141
+ if question_type == 'exact_match':
142
+ prompt = self.EXACT_MATCH_PROMPT.format(discipline=field)
143
+ question = prompt + " " + question
144
+ elif question_type == 'mcq':
145
+ prompt = self.MCQ_PROMPT.format(discipline=field)
146
+ question = prompt + " " + question
147
+ if not pd.isna(line['A']):
148
+ question += '\nChoices are:\n'
149
+ for ch in string.ascii_uppercase[:15]:
150
+ if not pd.isna(line[ch]):
151
+ question += f'{ch}. {line[ch]}\n'
152
+ else:
153
+ break
154
+ elif question_type == 'open_ended':
155
+ prompt = self.OPEN_QUESTION_PROMPT.format(discipline=field)
156
+ question = prompt + " " + question
157
+
158
+ prompt_segs = question.split('<image>')
159
+ assert len(prompt_segs) == len(tgt_path) + 1
160
+ msgs = []
161
+ for i in range(len(tgt_path)):
162
+ text = prompt_segs[i].strip()
163
+ if text != '':
164
+ msgs.append(dict(type='text', value=text))
165
+ msgs.append(dict(type='image', value=tgt_path[i]))
166
+ text = prompt_segs[-1].strip()
167
+ if text != '':
168
+ msgs.append(dict(type='text', value=text))
169
+ return msgs
170
+
171
+ def evaluate(self, eval_file, **judge_kwargs):
172
+ data = load(eval_file)
173
+ _ = self.dataset_name
174
+ assert 'answer' in data and 'prediction' in data
175
+ data['prediction'] = [str(x) for x in data['prediction']]
176
+ data['answer'] = [str(x) for x in data['answer']]
177
+ storage = get_intermediate_file_path(eval_file, '_judge')
178
+ tmp_file = get_intermediate_file_path(eval_file, '_tmp', 'pkl')
179
+ nproc = judge_kwargs.pop('nproc', 4)
180
+ if not osp.exists(storage):
181
+ ans_map = {} if not osp.exists(tmp_file) else load(tmp_file)
182
+
183
+ model = judge_kwargs.pop('model', 'gpt-4o-1120')
184
+ if model == 'exact_matching':
185
+ model = None
186
+ elif gpt_key_set():
187
+ model = build_judge(model=model, **judge_kwargs)
188
+ if not model.working():
189
+ warnings.warn('OPENAI API is not working properly, will use exact matching for evaluation')
190
+ model = None
191
+ else:
192
+ model = None
193
+ warnings.warn('OPENAI_API_KEY is not working properly, will use exact matching for evaluation')
194
+
195
+ if model is not None:
196
+ if 'g_index' not in data:
197
+ lines = [data.iloc[i] for i in range(len(data))]
198
+ indices = [x['index'] for x in lines if x['index'] not in ans_map]
199
+ lines = [x for x in lines if x['index'] not in ans_map]
200
+ tups = [(model, line) for line in lines]
201
+ else:
202
+ main_data = data[[x == y for x, y in zip(data['index'], data['g_index'])]]
203
+ lines = [data[data['g_index'] == x] for x in main_data['index']]
204
+ indices = [x.iloc[0]['g_index'] for x in lines if x.iloc[0]['g_index'] not in ans_map]
205
+ lines = [x for x in lines if x.iloc[0]['g_index'] not in ans_map]
206
+ tups = [(model, x) for x in lines]
207
+ data = main_data
208
+
209
+ if len(lines):
210
+ res = track_progress_rich(
211
+ SFE_auxeval, tups, nproc=nproc, chunksize=nproc, keys=indices, save=tmp_file)
212
+ for k, v in zip(indices, res):
213
+ ans_map[k] = v
214
+
215
+ judge_results = [ans_map[x] for x in data['index']]
216
+ data['score'] = [x['score'] for x in judge_results]
217
+ dump(data, storage)
218
+ data = load(storage)
219
+ score = report_score(data)
220
+
221
+ score_file = get_intermediate_file_path(eval_file, '_score', 'csv')
222
+ dump(score, score_file)
223
+ return score
VLMEvalKit-sudoku/vlmeval/dataset/slidevqa.py ADDED
@@ -0,0 +1,189 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import re
2
+ import math
3
+ from typing import List
4
+
5
+ from vlmeval.dataset.utils.judge_util import build_judge
6
+ from vlmeval.smp import *
7
+ from .image_base import ImageBaseDataset
8
+ from .mmlongbench import concat_images, MMLongBench_auxeval, anls_compute
9
+ from ..smp.file import get_intermediate_file_path
10
+
11
+
12
+ FAIL_MSG = 'Failed to obtain answer via API.'
13
+
14
+
15
+ def get_f1(gt, pred):
16
+ gt_bow, pred_bow = gt.strip().split(), pred.strip().split()
17
+ if not gt_bow or not pred_bow:
18
+ return 0.0
19
+
20
+ recall = len([pred_e for pred_e in pred_bow if pred_e in gt_bow]) / len(gt_bow)
21
+ precision = len([pred_e for pred_e in pred_bow if pred_e in gt_bow]) / len(pred_bow)
22
+ f1 = 2 * recall * precision / (recall + precision) if (recall + precision) > 1e-4 else 0.0
23
+ return f1
24
+
25
+
26
+ def SlideVQA_acc(result_file):
27
+ data = load(result_file)
28
+ anls_list, em_list, f1_list = list(), list(), list()
29
+ for i in range(len(data)):
30
+ item = data.iloc[i]
31
+ if isinstance(item['answer'], float) and math.isnan(item['answer']):
32
+ item['answer'] = 'Not answerable'
33
+
34
+ item['answer'] = re.sub('\n', '', item['answer']).lower()
35
+ item['pred'] = str(item['pred']).lower()
36
+ anls_score = anls_compute(item['answer'], item['pred'])
37
+ em_score = (item['answer'].strip() == item['pred'].strip())
38
+ f1_score = get_f1(item['answer'], item['pred'])
39
+ anls_list.append(anls_score)
40
+ em_list.append(em_score)
41
+ f1_list.append(f1_score)
42
+ print('---------------------')
43
+ print(item['answer'], item['pred'], anls_score, em_score, f1_score)
44
+
45
+ data['anls'] = anls_list
46
+ data['em'] = em_list
47
+ data['f1'] = f1_list
48
+ dump(data, result_file)
49
+
50
+ res = dict()
51
+ res['category'], res['num'] = ['anls', 'EM', 'F1'], [len(data), len(data), len(data)]
52
+ res['avg'] = [sum(anls_list) / len(data), sum(em_list) / len(data), sum(f1_list) / len(data)]
53
+ res = pd.DataFrame(res)
54
+ return res
55
+
56
+
57
+ class SlideVQA(ImageBaseDataset):
58
+
59
+ TYPE = 'VQA'
60
+
61
+ DATASET_URL = {
62
+ 'SLIDEVQA_MINI': 'https://opencompass.openxlab.space/utils/VLMEval/SLIDEVQA_MINI.tsv',
63
+ 'SLIDEVQA': 'https://opencompass.openxlab.space/utils/VLMEval/SLIDEVQA.tsv',
64
+ }
65
+ DATASET_MD5 = {
66
+ 'SLIDEVQA_MINI': '6d9a8d8814fa5b7669deb2af3a3208eb',
67
+ 'SLIDEVQA': '5e822c2f800e94c1e23badfd478326b6',
68
+ }
69
+
70
+ SUPPORTED_MODELS = {
71
+ 'GPT4': (1, 1),
72
+ 'GPT4V': (1, 1),
73
+ 'GPT4V_HIGH': (1, 1),
74
+ 'GPT4o': (1, 1),
75
+ 'GPT4o_HIGH': (1, 1),
76
+ 'GPT4o_MINI': (1, 1),
77
+ 'XComposer2d5': (1, -1),
78
+ 'XComposer2_4KHD': (1, -1),
79
+ 'MiniCPM-Llama3-V-2_5': (1, 5),
80
+ 'InternVL-Chat-V1-5': (5, 2),
81
+ }
82
+
83
+ def __init__(self, dataset, **kwargs):
84
+ self.model_list = list(self.SUPPORTED_MODELS.keys())
85
+ model_name = kwargs['model']
86
+ if not listinstr(self.model_list, model_name):
87
+ raise AssertionError("{} doesn't support the evaluation on SlideVQA.".format(model_name))
88
+ super(SlideVQA, self).__init__(dataset)
89
+
90
+ self.is_api = True if listinstr(['GPT4'], model_name) else False
91
+ self.max_pages = 120
92
+ concat_num, column_num = self.SUPPORTED_MODELS.get(model_name)
93
+ self.concat_num = concat_num
94
+ self.column_num = column_num
95
+
96
+ def dump_image(self, origin_line):
97
+ os.makedirs(self.img_root, exist_ok=True)
98
+
99
+ line = origin_line.copy()
100
+ if not isinstance(line['image_path'], List):
101
+ line['image_path'] = [line['image_path']]
102
+ line['image_path'] = line['image_path'][:self.max_pages]
103
+
104
+ if 'image' in line:
105
+ if isinstance(line['image'], list):
106
+ tgt_path = []
107
+ assert 'image_path' in line
108
+ for img, im_name in zip(line['image'], line['image_path']):
109
+ path = osp.join(self.img_root, im_name)
110
+ if not read_ok(path):
111
+ decode_base64_to_image_file(img, path)
112
+ tgt_path.append(path)
113
+ else:
114
+ tgt_path = osp.join(self.img_root, f"{line['index']}.jpg")
115
+ if not read_ok(tgt_path):
116
+ decode_base64_to_image_file(line['image'], tgt_path)
117
+ tgt_path = [tgt_path]
118
+ else:
119
+ assert 'image_path' in line
120
+ tgt_path = toliststr(line['image_path'])
121
+
122
+ if self.concat_num > 0 and not self.is_api:
123
+ concatenated_images = concat_images(tgt_path, max_concat=self.concat_num, column_num=self.column_num)
124
+
125
+ old_tgt_path = tgt_path
126
+ assert isinstance(old_tgt_path, list)
127
+ if self.column_num != -1:
128
+ tgt_path = [
129
+ '_'.join(old_tgt_path[0].split('_')[:-1]) + '_concat{}_{}.jpg'.format(self.concat_num, i)
130
+ for i in range(len(concatenated_images))
131
+ ]
132
+ else:
133
+ tgt_path = ['_'.join(old_tgt_path[0].split('_')[:-1]) + '_concat_all.jpg']
134
+
135
+ for path, concatenated_image in zip(tgt_path, concatenated_images):
136
+ if not read_ok(path):
137
+ decode_base64_to_image_file(encode_image_to_base64(concatenated_image), path)
138
+ num_images, image_size = len(old_tgt_path), concatenated_image.size
139
+ print('concat {} images to a new one with size {}. save at {}'.format(num_images, image_size, path))
140
+ return tgt_path
141
+
142
+ @classmethod
143
+ def evaluate(self, eval_file, **judge_kwargs):
144
+ logger = get_logger('Evaluation')
145
+ model = judge_kwargs['model']
146
+
147
+ storage = get_intermediate_file_path(eval_file, f'_{model}')
148
+ tmp_file = get_intermediate_file_path(eval_file, f'_{model}', 'pkl')
149
+
150
+ if osp.exists(storage):
151
+ logger.warning(f'GPT scoring file {storage} already exists, will reuse it in SlideVQA_eval. ')
152
+ else:
153
+ data = load(eval_file)
154
+ model = build_judge(max_tokens=128, **judge_kwargs)
155
+ lt = len(data)
156
+ lines = [data.iloc[i] for i in range(lt)]
157
+ tups = [(model, line) for line in lines]
158
+ indices = [line['index'] for line in lines]
159
+
160
+ ans = {}
161
+ if osp.exists(tmp_file):
162
+ ans = load(tmp_file)
163
+ tups = [x for x, i in zip(tups, indices) if i not in ans]
164
+ indices = [i for i in indices if i not in ans]
165
+
166
+ if len(indices):
167
+ new_results = list()
168
+ for model, line in tqdm(tups):
169
+ res = MMLongBench_auxeval(model, line)
170
+ new_results.append(res)
171
+
172
+ log_map, res_map, pred_map = {}, {}, {}
173
+ all_inds = [line['index'] for line in lines]
174
+ for k, v in zip(all_inds, new_results):
175
+ log_map[k] = v['log']
176
+ res_map[k] = v['res']
177
+ pred_map[k] = v['pred']
178
+ data['res'] = [res_map[idx] for idx in data['index']]
179
+ data['log'] = [log_map[idx] for idx in data['index']]
180
+ data['pred'] = [pred_map[idx] for idx in data['index']]
181
+ dump(data, storage)
182
+
183
+ score = SlideVQA_acc(storage)
184
+ score_pth = get_intermediate_file_path(storage, '_score', 'csv')
185
+
186
+ dump(score, score_pth)
187
+ logger.info(f'SlideVQA successfully finished evaluating {eval_file}, results saved in {score_pth}')
188
+ logger.info('Score: ')
189
+ logger.info(score)
VLMEvalKit-sudoku/vlmeval/dataset/tamperbench.py ADDED
@@ -0,0 +1,537 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import huggingface_hub
2
+ from huggingface_hub import snapshot_download
3
+ from ..smp import *
4
+ from ..smp.file import get_intermediate_file_path, get_file_extension
5
+ from .video_base import VideoBaseDataset
6
+ from .utils import build_judge, DEBUG_MESSAGE
7
+ import torchvision.transforms as T
8
+ from torchvision import transforms
9
+ import imageio
10
+ import cv2
11
+ import zipfile
12
+ import os
13
+ import glob
14
+ from .utils.tamperbench import *
15
+ import warnings
16
+
17
+ # constants
18
+ FAIL_MSG = 'Failed to obtain answer via API.'
19
+
20
+
21
+ class MVTamperBench(VideoBaseDataset):
22
+
23
+ BASENAME = "MVTamperBench"
24
+ MD5 = {
25
+ 'MVTamperBench': '3557260881ba47db8add440c5edb742a',
26
+ 'MVTamperBenchStart': 'c1d3c299ddbff6000f0d9cad820187b8',
27
+ 'MVTamperBenchEnd': 'aa2c19dd02e1b006ee2d4be9f6f2b62b',
28
+ }
29
+ SYS = """Carefully watch the video and pay attention to the cause and sequence of events, \
30
+ """
31
+
32
+ TYPE = 'Video-MCQ'
33
+
34
+ def __init__(self, dataset='MVTamperBench', nframe=0, fps=-1):
35
+ self.dataset_name = dataset
36
+ self.type_data_list = {
37
+ 'Action Sequence': ('action_sequence.json',
38
+ 'your_data_path/star/Charades_v1_480/', 'video', False), # has start & end
39
+ 'Action Prediction': ('action_prediction.json',
40
+ 'your_data_path/star/Charades_v1_480/', 'video', False), # has start & end
41
+ 'Action Antonym': ('action_antonym.json',
42
+ 'your_data_path/ssv2_video/', 'video', False),
43
+ 'Fine-grained Action': ('fine_grained_action.json',
44
+ 'your_data_path/Moments_in_Time_Raw/videos/', 'video', False),
45
+ 'Unexpected Action': ('unexpected_action.json',
46
+ 'your_data_path/FunQA_test/test/', 'video', False),
47
+ 'Object Existence': ('object_existence.json',
48
+ 'your_data_path/clevrer/video_validation/', 'video', False),
49
+ 'Object Interaction': ('object_interaction.json',
50
+ 'your_data_path/star/Charades_v1_480/', 'video', False), # has start & end
51
+ 'Object Shuffle': ('object_shuffle.json',
52
+ 'your_data_path/perception/videos/', 'video', False),
53
+ 'Moving Direction': ('moving_direction.json',
54
+ 'your_data_path/clevrer/video_validation/', 'video', False),
55
+ 'Action Localization': ('action_localization.json',
56
+ 'your_data_path/sta/sta_video/', 'video', False), # has start & end
57
+ 'Scene Transition': ('scene_transition.json',
58
+ 'your_data_path/scene_qa/video/', 'video', False),
59
+ 'Action Count': ('action_count.json',
60
+ 'your_data_path/perception/videos/', 'video', False),
61
+ 'Moving Count': ('moving_count.json',
62
+ 'your_data_path/clevrer/video_validation/', 'video', False),
63
+ 'Moving Attribute': ('moving_attribute.json',
64
+ 'your_data_path/clevrer/video_validation/', 'video', False),
65
+ 'State Change': ('state_change.json',
66
+ 'your_data_path/perception/videos/', 'video', False),
67
+ 'Character Order': ('character_order.json',
68
+ 'your_data_path/perception/videos/', 'video', False),
69
+ 'Egocentric Navigation': ('egocentric_navigation.json',
70
+ 'your_data_path/vlnqa/', 'video', False),
71
+ 'Episodic Reasoning': ('episodic_reasoning.json',
72
+ 'your_data_path/tvqa/frames_fps3/', 'video', False), # has start & end
73
+ 'Counterfactual Inference': ('counterfactual_inference.json',
74
+ 'your_data_path/clevrer/video_validation/', 'video', False),
75
+ }
76
+ super().__init__(dataset=dataset, nframe=nframe, fps=fps)
77
+
78
+ @classmethod
79
+ def supported_datasets(cls):
80
+ return ['MVTamperBench', 'MVTamperBenchStart', 'MVTamperBenchEnd']
81
+
82
+ def prepare_dataset(self, dataset_name='MVTamperBench', repo_id=None):
83
+ if repo_id:
84
+ dataset_name = repo_id.split('/')[-1]
85
+ else:
86
+ repo_id = f'Srikant86/{dataset_name}'
87
+
88
+ def check_integrity(pth):
89
+ """
90
+ Verifies the completeness and consistency of the dataset located at the specified path.
91
+
92
+ Args:
93
+ path_to_dataset (str): The directory path where the dataset is stored.
94
+
95
+ Returns:
96
+ bool: True if the dataset is intact, False otherwise.
97
+ """
98
+ # Construct the full path to the data file
99
+ data_file = osp.join(pth, f'{dataset_name}.tsv')
100
+
101
+ # Check if the data file exists
102
+ if not os.path.exists(data_file):
103
+ # If the data file doesn't exist, immediately return False
104
+ return False
105
+ # Verify the integrity of the data file by checking its MD5 hash
106
+ if md5(data_file) != self.MD5[dataset_name]:
107
+ return False
108
+ # Load the data from the data file
109
+ data = load(data_file)
110
+ for idx, item in data.iterrows():
111
+ if not osp.exists(osp.join(pth, item['prefix'], item['video'])):
112
+ return False
113
+ # If all checks pass, the dataset is considered intact
114
+ return True
115
+
116
+ cache_path = get_cache_path(repo_id, branch='main')
117
+ if cache_path is not None and check_integrity(cache_path):
118
+ dataset_path = cache_path
119
+ else:
120
+ def unzip_hf_zip(pth):
121
+ pth = os.path.join(pth, 'video/')
122
+ for filename in os.listdir(pth):
123
+ if filename.endswith('.zip'):
124
+ # 构建完整的文件路径
125
+ zip_path = os.path.join(pth, filename)
126
+
127
+ # 解压 ZIP 文件
128
+ with zipfile.ZipFile(zip_path, 'r') as zip_ref:
129
+ zip_ref.extractall(pth)
130
+
131
+ def generate_tsv(pth):
132
+ data_file = osp.join(pth, f'{dataset_name}.tsv')
133
+ if os.path.exists(data_file) and md5(data_file) == self.MD5[dataset_name]:
134
+ return
135
+ json_data_dir = os.path.join(dataset_path, 'json')
136
+ self.data_list = []
137
+ for k, v in self.type_data_list.items():
138
+ with open(os.path.join(json_data_dir, v[0]), 'r') as f:
139
+ json_data = json.load(f)
140
+ for data in json_data:
141
+ if os.path.exists(
142
+ os.path.join(dataset_path, v[1].replace('your_data_path', 'video'), data['video'])):
143
+ self.data_list.append({
144
+ 'task_type': k,
145
+ 'prefix': v[1].replace('your_data_path', 'video'),
146
+ 'data_type': v[2],
147
+ 'bound': v[3],
148
+ 'start': data['start'] if 'start' in data.keys() else None,
149
+ 'end': data['end'] if 'end' in data.keys() else None,
150
+ 'video': data['video'],
151
+ 'question': data['question'],
152
+ 'answer': data['answer'],
153
+ 'candidates': data['candidates'],
154
+ 'tamper_type': data['tamper_type'],
155
+ 'task_tamper_type': f"{k}_{data['tamper_type']}"
156
+ })
157
+
158
+ data_df = pd.DataFrame(self.data_list)
159
+ data_df = data_df.assign(index=range(len(data_df)))
160
+ data_df.to_csv(data_file, sep='\t', index=False)
161
+
162
+ def move_files(pth):
163
+ # special for mvbench/data0613 supplementary data
164
+ src_folder = os.path.join(pth, 'video/data0613')
165
+ if not os.path.exists(src_folder):
166
+ return
167
+ for subdir in os.listdir(src_folder):
168
+ subdir_path = os.path.join(src_folder, subdir)
169
+ if os.path.isdir(subdir_path):
170
+ for subsubdir in os.listdir(subdir_path):
171
+ subsubdir_path = os.path.join(subdir_path, subsubdir)
172
+ if os.path.isdir(subsubdir_path):
173
+ for item in os.listdir(subsubdir_path):
174
+ item_path = os.path.join(subsubdir_path, item)
175
+ target_folder = os.path.join(pth, 'video', subdir, subsubdir)
176
+ if not os.path.exists(os.path.join(target_folder, item)):
177
+ shutil.move(item_path, os.path.join(target_folder, item))
178
+
179
+ src_folder = os.path.join(pth, 'video/perception')
180
+ if not os.path.exists(src_folder):
181
+ return
182
+ for subdir in os.listdir(src_folder):
183
+ subdir_path = os.path.join(src_folder, subdir)
184
+ if os.path.isdir(subdir_path):
185
+ for subsubdir in os.listdir(subdir_path):
186
+ subsubdir_path = os.path.join(subdir_path, subsubdir)
187
+ if os.path.isdir(subsubdir_path):
188
+ if not os.path.exists(src_folder):
189
+ return
190
+ for item in os.listdir(subsubdir_path):
191
+ item_path = os.path.join(subsubdir_path, item)
192
+ target_folder = os.path.join(pth, 'video/perception', subdir)
193
+ if not os.path.exists(os.path.join(target_folder, item)):
194
+ shutil.move(item_path, target_folder)
195
+
196
+ hf_token = os.environ.get('HUGGINGFACE_TOKEN')
197
+ huggingface_hub.login(hf_token)
198
+ dataset_path = snapshot_download(repo_id=repo_id, repo_type='dataset')
199
+ unzip_hf_zip(dataset_path)
200
+ move_files(dataset_path)
201
+ generate_tsv(dataset_path)
202
+
203
+ data_file = osp.join(dataset_path, f'{dataset_name}.tsv')
204
+
205
+ self.decord_method = {
206
+ 'video': self.read_video,
207
+ 'gif': self.read_gif,
208
+ 'frame': self.read_frame,
209
+ }
210
+
211
+ self.nframe = 8
212
+ self.frame_fps = 3
213
+
214
+ # transform
215
+ self.transform = T.Compose([
216
+ Stack(),
217
+ ToTorchFormatTensor()
218
+ ])
219
+
220
+ return dict(root=dataset_path, data_file=data_file)
221
+
222
+ def get_index(self, bound, fps, max_frame, first_idx=0):
223
+ start, end = bound if bound else (-100000, 100000)
224
+ start_idx = max(first_idx, round(start * fps))
225
+ end_idx = min(round(end * fps), max_frame)
226
+ seg_size = (end_idx - start_idx) / self.num_segments
227
+ mid_seg_size = seg_size / 2
228
+ indices = np.arange(self.num_segments)
229
+ frame_indices = start_idx + mid_seg_size + np.round(seg_size * indices)
230
+ return frame_indices.astype(int)
231
+
232
+ def read_video(self, video_path, bound=None):
233
+ from decord import VideoReader, cpu
234
+ vr = VideoReader(video_path, ctx=cpu(0), num_threads=1)
235
+ max_frame = len(vr) - 1
236
+ fps = float(vr.get_avg_fps())
237
+
238
+ images_group = list()
239
+ frame_indices = self.get_index(bound, fps, max_frame, first_idx=0)
240
+ for frame_index in frame_indices:
241
+ img = Image.fromarray(vr[frame_index].asnumpy())
242
+ images_group.append(img)
243
+ torch_imgs = self.transform(images_group)
244
+ return torch_imgs
245
+
246
+ def read_gif(self, video_path, bound=None, fps=25):
247
+ gif = imageio.get_reader(video_path)
248
+ max_frame = len(gif) - 1
249
+
250
+ images_group = list()
251
+ frame_indices = self.get_index(bound, fps, max_frame, first_idx=0)
252
+ for index, frame in enumerate(gif):
253
+ if index in frame_indices:
254
+ img = cv2.cvtColor(frame, cv2.COLOR_RGBA2RGB)
255
+ img = Image.fromarray(img)
256
+ images_group.append(img)
257
+ torch_imgs = self.transform(images_group)
258
+ return torch_imgs
259
+
260
+ def read_frame(self, video_path, bound=None, fps=3):
261
+ """
262
+ Reads frames from a video directory, processes them, and returns a tensor of images.
263
+
264
+ Args:
265
+ video_path (str): Path to the directory containing video frames.
266
+ bound (tuple, optional): A tuple specifying the range of frames to read. Defaults to None.
267
+ fps (int, optional): Frames per second to sample from the video. Defaults to 3.
268
+
269
+ Returns:
270
+ torch.Tensor: A tensor containing the processed images.
271
+ """
272
+ max_frame = len(os.listdir(video_path))
273
+ images_group = list()
274
+ frame_indices = self.get_index(bound, fps, max_frame, first_idx=1) # frame_idx starts from 1
275
+ for frame_index in frame_indices:
276
+ img = Image.open(os.path.join(video_path, f'{frame_index:05d}.jpg'))
277
+ images_group.append(img)
278
+ torch_imgs = self.transform(images_group)
279
+ return torch_imgs
280
+
281
+ def save_video_frames(self, imgs, video_name, frames):
282
+
283
+ frame_paths = self.frame_paths(video_name)
284
+ flag = np.all([osp.exists(p) for p in frame_paths])
285
+
286
+ if not flag:
287
+ lock_path = osp.join(self.frame_root, f'{video_name}.lock')
288
+ with portalocker.Lock(lock_path, 'w', timeout=30):
289
+ if not np.all([osp.exists(p) for p in frame_paths]):
290
+ block_size = imgs.size(0) // frames
291
+ split_tensors = torch.split(imgs, block_size)
292
+ to_pil = transforms.ToPILImage()
293
+ images = [to_pil(arr) for arr in split_tensors]
294
+ for im, pth in zip(images, frame_paths):
295
+ if not osp.exists(pth):
296
+ im.save(pth)
297
+
298
+ return frame_paths
299
+
300
+ def qa_template(self, data):
301
+ question = f"Question: {data['question']}\n"
302
+ question += 'Options:\n'
303
+ answer = data['answer']
304
+ answer_idx = -1
305
+ for idx, c in enumerate(eval(data['candidates'])):
306
+ question += f"({chr(ord('A') + idx)}) {c}\n"
307
+ if c == answer:
308
+ answer_idx = idx
309
+ question = question.rstrip()
310
+ answer = f"({chr(ord('A') + answer_idx)}) {answer}"
311
+ return question, answer
312
+
313
+ def load_into_video_and_process(self, line):
314
+ """
315
+ Loads a video or image sequence, processes it, and returns the path to the processed video.
316
+
317
+ Args:
318
+ line (dict): A dictionary containing the following keys:
319
+ - 'prefix' (str): The prefix path to the video or image sequence.
320
+ - 'video' (str): The video file name or directory containing image frames.
321
+ - 'data_type' (str): The type of data, either 'gif', 'webm', or 'frame'.
322
+ - 'bound' (bool): Whether to process a subclip of the video.
323
+ - 'start' (float): The start time of the subclip (if 'bound' is True).
324
+ - 'end' (float): The end time of the subclip (if 'bound' is True).
325
+
326
+ Returns:
327
+ str: The path to the processed video file.
328
+
329
+ Raises:
330
+ ImportError: If MoviePy is not installed.
331
+ """
332
+ try:
333
+ from moviepy.editor import VideoFileClip, ImageSequenceClip
334
+ except:
335
+ raise ImportError(
336
+ 'MoviePy is not installed, please install it by running "pip install moviepy==1.0.3"'
337
+ )
338
+ video_path = os.path.join(self.data_root, line['prefix'], line['video'])
339
+
340
+ if line['data_type'] in ['gif'] or os.path.splitext(video_path)[1] in ['.webm']:
341
+ processed_video_path = video_path.replace(os.path.splitext(video_path)[1], '.mp4')
342
+ if not os.path.exists(processed_video_path):
343
+ # using MoviePy to transform GIF, webm into mp4 format
344
+ gif_clip = VideoFileClip(video_path)
345
+ gif_clip.write_videofile(processed_video_path, codec='libx264')
346
+ gif_clip.close()
347
+ elif line['data_type'] in ['frame']:
348
+ input_images = os.path.join(video_path, '*.jpg')
349
+ processed_video_path = f'{video_path}.mp4'
350
+ if not os.path.exists(processed_video_path):
351
+ # using MoviePy to transform images into mp4
352
+ image_files = sorted(glob.glob(input_images))
353
+ image_clip = ImageSequenceClip(image_files, fps=self.frame_fps)
354
+ image_clip.write_videofile(processed_video_path, codec='libx264')
355
+ image_clip.close()
356
+ else:
357
+ processed_video_path = video_path
358
+
359
+ if line['bound']:
360
+ base_name, suffix = os.path.splitext(processed_video_path)
361
+ output_video_path = f'{base_name}_processed{suffix}'
362
+ if not os.path.exists(output_video_path):
363
+ video_clip = VideoFileClip(processed_video_path)
364
+ clip = video_clip.subclip(line['start'], min(line['end'], video_clip.duration))
365
+ clip.write_videofile(output_video_path)
366
+ clip.close()
367
+ else:
368
+ output_video_path = processed_video_path
369
+
370
+ return output_video_path
371
+
372
+ def save_video_into_images(self, line):
373
+ bound = None
374
+ if line['bound']:
375
+ bound = (
376
+ line['start'],
377
+ line['end'],
378
+ )
379
+ video_path = os.path.join(self.data_root, line['prefix'], line['video'])
380
+ decord_method = self.decord_method[line['data_type']]
381
+ self.num_segments = self.nframe
382
+ torch_imgs = decord_method(video_path, bound)
383
+ img_frame_paths = self.save_video_frames(torch_imgs, line['video'], self.num_segments)
384
+ return img_frame_paths
385
+
386
+ def build_prompt(self, line, video_llm):
387
+ """
388
+ Builds a prompt for a language model based on the provided data and settings.
389
+
390
+ Args:
391
+ line (int or dict): Either an integer index into the dataset or dictionary representing a single data point.
392
+ video_llm (bool): Whether to use a video-based language model or process individual frames as images.
393
+
394
+ Returns:
395
+ list: A list of dictionaries representing the constructed prompt, where each dictionary contains the type
396
+ and value of the prompt element.
397
+
398
+ Raises:
399
+ ValueError: If the frame rate (fps) is greater than zero, indicating that this method
400
+ is not compatible with MVBench's requirements.
401
+ """
402
+ # Ensure that the frame rate is not set, as MVBench does not support it
403
+ if self.fps > 0:
404
+ raise ValueError('MVBench does not support fps setting, please transfer to MVBench_MP4!')
405
+
406
+ # If line is an integer, retrieve the corresponding data point from the d
407
+ if isinstance(line, int):
408
+ assert line < len(self)
409
+ line = self.data.iloc[line]
410
+
411
+ # Generate the question and answer pair based on the current data point
412
+ question, answer = self.qa_template(line)
413
+ # Initialize the prompt with a system message
414
+ message = [dict(type='text', value=self.SYS, role='system')]
415
+ # Add the generated question to the prompt
416
+ message.append(dict(type='text', value=question))
417
+ # Process the video data according to the specified mode
418
+ if video_llm:
419
+ # Load the video and process it for the video-based langua
420
+ new_video_path = self.load_into_video_and_process(line)
421
+ message.append(dict(type='video', value=new_video_path))
422
+ else:
423
+ # Save the video as individual image frames for processing
424
+ img_frame_paths = self.save_video_into_images(line)
425
+ for im in img_frame_paths:
426
+ message.append(dict(type='image', value=im))
427
+ # Add instructions to the prompt
428
+ message.append(dict(type='text', value='\nOnly give the best option.'))
429
+ # Indicate the start of the assistant's response
430
+ message.append(dict(type='text', value='Best option:(', role='assistant'))
431
+ return message
432
+
433
+ @classmethod
434
+ def evaluate(self, eval_file, **judge_kwargs):
435
+ """
436
+ Evaluates the given evaluation file and generates ratings based on different dimensions.
437
+
438
+ Args:
439
+ eval_file (str): Path to the evaluation file. The file should be in a supported format (xlsx/json/tsv).
440
+ **judge_kwargs: Additional keyword arguments for the judge model.
441
+
442
+ Returns:
443
+ dict: A dictionary containing ratings for task type, tamper type, and task-tamper type.
444
+
445
+ Raises:
446
+ AssertionError: If the eval_file is not a supported format.
447
+ Warning: If the OPENAI API is not working properly or the API key is not set,
448
+ exact matching will be used for evaluation.
449
+
450
+ Notes:
451
+ - The function generates temporary files and score files based on the eval_file name.
452
+ - If the score file already exists, it will be used directly.
453
+ - The function processes the data, evaluates predictions, and calculates scores.
454
+ - Ratings are generated for different dimensions and saved to respective files.
455
+ """
456
+
457
+ assert get_file_extension(eval_file) in ['xlsx', 'json', 'tsv'], 'data file should be an supported format (xlsx/json/tsv) file' # noqa: E501
458
+
459
+ tmp_file = get_intermediate_file_path(eval_file, '_tmp', 'pkl')
460
+ tgt_task_type_file = get_intermediate_file_path(eval_file, '_task_type_rating', 'json')
461
+ tgt_tamper_type_file = get_intermediate_file_path(eval_file, '_tamper_type_rating', 'json')
462
+ tgt_task_tamper_type_file = get_intermediate_file_path(eval_file, '_task_tamper_type_rating', 'json')
463
+ score_file = get_intermediate_file_path(eval_file, '_score')
464
+ score_metrics_file = get_intermediate_file_path(eval_file, '_score_f1')
465
+ action_metrics_file = get_intermediate_file_path(eval_file, '_action_f1')
466
+
467
+ if not osp.exists(score_file):
468
+ model = judge_kwargs.setdefault('model', 'chatgpt-0125')
469
+ assert model in ['chatgpt-0125', 'exact_matching', 'gpt-4-0125']
470
+
471
+ if model == 'exact_matching':
472
+ model = None
473
+ elif gpt_key_set():
474
+ model = build_judge(**judge_kwargs)
475
+ if not model.working():
476
+ warnings.warn('OPENAI API is not working properly, will use exact matching for evaluation')
477
+ warnings.warn(DEBUG_MESSAGE)
478
+ model = None
479
+ else:
480
+ warnings.warn('OPENAI_API_KEY is not set properly, will use exact matching for evaluation')
481
+ model = None
482
+ res = {} if not osp.exists(tmp_file) else load(tmp_file)
483
+ res = {k: v for k, v in res.items() if FAIL_MSG not in v}
484
+
485
+ data = load(eval_file)
486
+ data_un = data[~pd.isna(data['prediction'])]
487
+
488
+ for idx in data_un['index']:
489
+ ans = data.loc[data['index'] == idx, 'answer'].values[0]
490
+ pred = data.loc[data['index'] == idx, 'prediction'].values[0]
491
+ options = eval(data.loc[data['index'] == idx, 'candidates'].values[0])
492
+ answer_idx = -1
493
+ for id, c in enumerate(options):
494
+ if c == ans:
495
+ answer_idx = id
496
+ ans = f"({chr(ord('A') + answer_idx)}) {ans}"
497
+ input_item = data.loc[data['index'] == idx].to_dict(orient='records')[0]
498
+ for id, option_content in enumerate(eval(input_item['candidates'])):
499
+ input_item[chr(ord('A') + id)] = option_content
500
+ if option_content == input_item['answer']:
501
+ input_item['answer'] = chr(ord('A') + id)
502
+
503
+ if FAIL_MSG in pred:
504
+ data.loc[idx, 'score'] = -1
505
+ else:
506
+ data.loc[idx, 'score'] = int(check_ans_with_model(
507
+ pred, ans, model,
508
+ input_item,
509
+ 'MVTamperBench'
510
+ ))
511
+
512
+ rejected = [x for x in data['score'] if x == -1]
513
+
514
+ print(
515
+ f'Among {len(data)} questions, failed to obtain prediction for {len(data) - len(data_un)} questions, '
516
+ f'failed to obtain the score for another {len(rejected)} questions. '
517
+ f'Those questions will be counted as -1 score in ALL rating, and will not be counted in VALID rating.'
518
+ )
519
+
520
+ dump(data, score_file)
521
+
522
+ model_name = score_file.split(f"_{self.BASENAME}")[0].split("/")[-1]
523
+
524
+ score_metrics = process_results(score_file, model_name)
525
+ dump(score_metrics, score_metrics_file)
526
+
527
+ action_metrics = aggregate_metrics_with_macro_average(score_file)
528
+ dump(action_metrics, action_metrics_file)
529
+
530
+ rating_task_type = get_dimension_rating(score_file, 'task_type')
531
+ dump(rating_task_type, tgt_task_type_file)
532
+ rating_tamper_type = get_dimension_rating(score_file, 'tamper_type')
533
+ dump(rating_tamper_type, tgt_tamper_type_file)
534
+ rating_task_tamper_type = get_dimension_rating(score_file, 'task_tamper_type')
535
+ dump(rating_task_tamper_type, tgt_task_tamper_type_file)
536
+ rating = {**rating_task_type, **rating_tamper_type, **rating_task_tamper_type}
537
+ return rating
VLMEvalKit-sudoku/vlmeval/dataset/tempcompass.py ADDED
@@ -0,0 +1,646 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import huggingface_hub
2
+ from huggingface_hub import snapshot_download
3
+ from ..smp import *
4
+ from .video_concat_dataset import ConcatVideoDataset
5
+ from .video_base import VideoBaseDataset
6
+ from .utils import build_judge, DEBUG_MESSAGE
7
+ from ..utils import track_progress_rich
8
+ import torchvision.transforms as T
9
+ from torchvision import transforms
10
+ from torchvision.transforms.functional import InterpolationMode
11
+ from .utils.tempcompass import *
12
+
13
+
14
+ FAIL_MSG = 'Failed to obtain answer via API.'
15
+
16
+
17
+ class TempCompass(ConcatVideoDataset):
18
+ def __init__(self, dataset='TempCompass', nframe=0, fps=-1):
19
+ self.DATASET_SETS[dataset] = ['TempCompass_MCQ', 'TempCompass_Captioning', 'TempCompass_YorN']
20
+ super().__init__(dataset=dataset, nframe=nframe, fps=fps)
21
+
22
+ @classmethod
23
+ def supported_datasets(cls):
24
+ return ['TempCompass']
25
+
26
+ def evaluate(self, eval_file, **judge_kwargs):
27
+ result = super().evaluate(eval_file=eval_file, **judge_kwargs)
28
+ result = result.reset_index().rename(columns={'index': 'dim.task_type'})
29
+ score_file = get_intermediate_file_path(eval_file, '_acc', 'csv')
30
+ avg_dict = {}
31
+ for idx, item in result.iterrows():
32
+ dim, task_type = item['dim.task_type'].split('. ')
33
+ if dim not in avg_dict:
34
+ avg_dict[dim] = {'success': 0.0, 'overall': 0.0}
35
+ if task_type not in avg_dict:
36
+ avg_dict[task_type] = {'success': 0.0, 'overall': 0.0}
37
+ if 'overall' not in avg_dict:
38
+ avg_dict['overall'] = {'success': 0.0, 'overall': 0.0}
39
+ avg_dict[dim]['success'] += item['success']
40
+ avg_dict[dim]['overall'] += item['overall']
41
+ avg_dict[task_type]['success'] += item['success']
42
+ avg_dict[task_type]['overall'] += item['overall']
43
+ avg_dict['overall']['success'] += item['success']
44
+ avg_dict['overall']['overall'] += item['overall']
45
+ result.loc[idx, 'acc'] = round(item['success'] / item['overall'] * 100, 2)
46
+ for key, value in avg_dict.items():
47
+ # 使用 loc 方法添加新行
48
+ result.loc[len(result)] = {
49
+ 'dim.task_type': key,
50
+ 'success': value['success'],
51
+ 'overall': value['overall'],
52
+ 'acc': round(value['success'] / value['overall'] * 100, 2)
53
+ }
54
+ dump(result, score_file)
55
+ return result
56
+
57
+
58
+ class TempCompass_MCQ(VideoBaseDataset):
59
+
60
+ MD5 = '7efbb9e6d9dabacd22daf274852691dd'
61
+ TYPE = 'Video-MCQ'
62
+
63
+ def __init__(self, dataset='TempCompass_MCQ', nframe=0, fps=-1):
64
+ self.type_data_list = {
65
+ 'multi-choice': ('multi-choice.json', './videos', '.mp4'),
66
+ 'caption_matching': ('caption_matching.json', './videos', '.mp4'),
67
+ }
68
+ super().__init__(dataset=dataset, nframe=nframe, fps=fps)
69
+
70
+ @classmethod
71
+ def supported_datasets(cls):
72
+ return ['TempCompass_MCQ']
73
+
74
+ def prepare_dataset(self, dataset_name='TempCompass_MCQ', repo_id='lmms-lab/TempCompass'):
75
+ def check_integrity(pth):
76
+ data_file = osp.join(pth, f'{dataset_name}.tsv')
77
+
78
+ if not osp.exists(data_file):
79
+ return False
80
+
81
+ if md5(data_file) != self.MD5:
82
+ return False
83
+
84
+ data = load(data_file)
85
+ for idx, item in data.iterrows():
86
+ if not osp.exists(osp.join(pth, item['prefix'], item['video'] + item['suffix'])):
87
+ return False
88
+ return True
89
+
90
+ cache_path = get_cache_path(repo_id)
91
+ if cache_path is not None and check_integrity(cache_path):
92
+ dataset_path = cache_path
93
+ else:
94
+ def read_parquet(pth):
95
+ import pandas as pd
96
+ for task_name in self.type_data_list.keys():
97
+ if not osp.exists(osp.join(pth, f'{task_name}.json')):
98
+ data = pd.read_parquet(osp.join(pth, task_name, 'test-00000-of-00001.parquet'))
99
+ data.to_json(osp.join(pth, f'{task_name}.json'), orient='records', lines=False)
100
+
101
+ def unzip_videos(pth):
102
+ import zipfile
103
+ if not osp.exists(osp.join(pth, 'videos')):
104
+ zip_file = osp.join(pth, 'tempcompass_videos.zip')
105
+ with zipfile.ZipFile(zip_file, 'r') as zip_ref:
106
+ zip_ref.extractall(pth)
107
+
108
+ def generate_tsv(pth):
109
+ data_file = osp.join(pth, f'{dataset_name}.tsv')
110
+ if osp.exists(data_file) and md5(data_file) == self.MD5:
111
+ return
112
+ self.data_list = []
113
+ for k, v in self.type_data_list.items():
114
+ with open(osp.join(pth, v[0]), 'r') as f:
115
+ json_data = json.load(f)
116
+ for data in json_data:
117
+ self.data_list.append({
118
+ 'task_type': k,
119
+ 'prefix': v[1],
120
+ 'suffix': v[2],
121
+ 'video': data['video_id'],
122
+ 'question': data['question'].split('\n')[0],
123
+ 'answer': data['answer'],
124
+ 'dim': data['dim'],
125
+ 'candidates': data['question'].split('\n')[1:],
126
+ })
127
+
128
+ data_df = pd.DataFrame(self.data_list)
129
+ data_df = data_df.assign(index=range(len(data_df)))
130
+ data_df.to_csv(data_file, sep='\t', index=False)
131
+
132
+ if modelscope_flag_set():
133
+ from modelscope import dataset_snapshot_download
134
+ dataset_path = dataset_snapshot_download(dataset_id=repo_id)
135
+ else:
136
+ dataset_path = snapshot_download(repo_id=repo_id, repo_type='dataset')
137
+ read_parquet(dataset_path)
138
+ unzip_videos(dataset_path)
139
+ generate_tsv(dataset_path)
140
+
141
+ data_file = osp.join(dataset_path, f'{dataset_name}.tsv')
142
+ return dict(root=dataset_path, data_file=data_file)
143
+
144
+ def qa_template(self, data):
145
+ question = data['question'] + '\n' + '\n'.join(eval(data['candidates']))
146
+ answer = data['answer']
147
+ return question, answer
148
+
149
+ def save_video_frames(self, line):
150
+ vid_path = osp.join(self.data_root, line['prefix'], line['video'] + line['suffix'])
151
+ import decord
152
+ vid = decord.VideoReader(vid_path)
153
+ video_info = {
154
+ 'fps': vid.get_avg_fps(),
155
+ 'n_frames': len(vid),
156
+ }
157
+ if self.nframe > 0 and self.fps < 0:
158
+ step_size = len(vid) / (self.nframe + 1)
159
+ indices = [int(i * step_size) for i in range(1, self.nframe + 1)]
160
+ frame_paths = self.frame_paths(line['video'])
161
+ elif self.fps > 0:
162
+ # not constrained by num_frames, get frames by fps
163
+ total_duration = video_info['n_frames'] / video_info['fps']
164
+ required_frames = int(total_duration * self.fps)
165
+ step_size = video_info['fps'] / self.fps
166
+ indices = [int(i * step_size) for i in range(required_frames)]
167
+ frame_paths = self.frame_paths_fps(line['video'], len(indices))
168
+
169
+ flag = np.all([osp.exists(p) for p in frame_paths])
170
+
171
+ if not flag:
172
+ lock_path = osp.splitext(vid_path)[0] + '.lock'
173
+ with portalocker.Lock(lock_path, 'w', timeout=30):
174
+ if not np.all([osp.exists(p) for p in frame_paths]):
175
+ images = [vid[i].asnumpy() for i in indices]
176
+ images = [Image.fromarray(arr) for arr in images]
177
+ for im, pth in zip(images, frame_paths):
178
+ if not osp.exists(pth):
179
+ im.save(pth)
180
+
181
+ return frame_paths
182
+
183
+ def save_video_into_images(self, line):
184
+ frame_paths = self.save_video_frames(line)
185
+ return frame_paths
186
+
187
+ def build_prompt(self, line, video_llm):
188
+ if isinstance(line, int):
189
+ assert line < len(self)
190
+ line = self.data.iloc[line]
191
+
192
+ question, answer = self.qa_template(line)
193
+ message = []
194
+ video_path = osp.join(self.data_root, line['prefix'], line['video'] + line['suffix'])
195
+ if video_llm:
196
+ message.append(dict(type='video', value=video_path))
197
+ else:
198
+ img_frame_paths = self.save_video_into_images(line)
199
+ for im in img_frame_paths:
200
+ message.append(dict(type='image', value=im))
201
+ message.append(dict(type='text', value=question))
202
+ message.append(dict(type='text', value='\nPlease directly give the best option:'))
203
+ return message
204
+
205
+ @classmethod
206
+ def evaluate(self, eval_file, **judge_kwargs):
207
+ model = judge_kwargs.get('model', 'exact_matching')
208
+ assert model in ['chatgpt-1106', 'exact_matching']
209
+ judge_kwargs.update({
210
+ "max_tokens": 128,
211
+ "temperature": 1.0,
212
+ "top_p": 1,
213
+ "presence_penalty": 1,
214
+ })
215
+
216
+ score_file = get_intermediate_file_path(eval_file, f'_{model}_score')
217
+ tmp_file = get_intermediate_file_path(eval_file, f'_{model}', 'pkl')
218
+ nproc = judge_kwargs.pop('nproc', 4)
219
+
220
+ if not osp.exists(score_file):
221
+ data = load(eval_file)
222
+ if model != 'exact_matching':
223
+ model = build_judge(system_prompt=sys_prompt, **judge_kwargs)
224
+ else:
225
+ model = None
226
+
227
+ lt = len(data)
228
+ lines = [data.iloc[i] for i in range(lt)]
229
+ tups = [(model, line) for line in lines]
230
+ indices = [line['index'] for line in lines]
231
+
232
+ ans = {}
233
+ if osp.exists(tmp_file):
234
+ ans = load(tmp_file)
235
+ tups = [x for x, i in zip(tups, indices) if i not in ans]
236
+ indices = [i for i in indices if i not in ans]
237
+
238
+ if len(indices):
239
+ _ = track_progress_rich(
240
+ evaluate_tempcompass_mcq,
241
+ tups,
242
+ nproc=nproc,
243
+ chunksize=nproc,
244
+ keys=indices,
245
+ save=tmp_file,
246
+ )
247
+ ans = load(tmp_file)
248
+ for idx, item in data.iterrows():
249
+ data.loc[idx, 'score'] = ans[idx]['rating']
250
+ dump(data, score_file)
251
+
252
+ rating = get_dimension_rating(score_file)
253
+ return rating
254
+
255
+
256
+ class TempCompass_Captioning(VideoBaseDataset):
257
+
258
+ MD5 = '35be9bf2581ea7767f02e9a8f37ae1ab'
259
+ TYPE = 'Video-VQA'
260
+
261
+ def __init__(self, dataset='TempCompass_Captioning', nframe=0, fps=-1):
262
+ self.type_data_list = {
263
+ 'captioning': ('captioning.json', './videos', '.mp4'),
264
+ }
265
+ super().__init__(dataset=dataset, nframe=nframe, fps=fps)
266
+
267
+ @classmethod
268
+ def supported_datasets(cls):
269
+ return ['TempCompass_Captioning']
270
+
271
+ def prepare_dataset(self, dataset_name='TempCompass_Captioning', repo_id='lmms-lab/TempCompass'):
272
+ def check_integrity(pth):
273
+ data_file = osp.join(pth, f'{dataset_name}.tsv')
274
+
275
+ if not osp.exists(data_file):
276
+ return False
277
+
278
+ if md5(data_file) != self.MD5:
279
+ return False
280
+
281
+ data = load(data_file)
282
+ for idx, item in data.iterrows():
283
+ if not osp.exists(osp.join(pth, item['prefix'], item['video'] + item['suffix'])):
284
+ return False
285
+ return True
286
+
287
+ cache_path = get_cache_path(repo_id)
288
+ if cache_path is not None and check_integrity(cache_path):
289
+ dataset_path = cache_path
290
+ else:
291
+ def read_parquet(pth):
292
+ import pandas as pd
293
+ for task_name in self.type_data_list.keys():
294
+ if not osp.exists(osp.join(pth, f'{task_name}.json')):
295
+ data = pd.read_parquet(osp.join(pth, task_name, 'test-00000-of-00001.parquet'))
296
+ data.to_json(osp.join(pth, f'{task_name}.json'), orient='records', lines=False)
297
+
298
+ def unzip_videos(pth):
299
+ import zipfile
300
+ if not osp.exists(osp.join(pth, 'videos')):
301
+ zip_file = osp.join(pth, 'tempcompass_videos.zip')
302
+ with zipfile.ZipFile(zip_file, 'r') as zip_ref:
303
+ zip_ref.extractall(pth)
304
+
305
+ def generate_tsv(pth):
306
+ data_file = osp.join(pth, f'{dataset_name}.tsv')
307
+ if osp.exists(data_file) and md5(data_file) == self.MD5:
308
+ return
309
+ self.data_list = []
310
+ for k, v in self.type_data_list.items():
311
+ with open(osp.join(pth, v[0]), 'r') as f:
312
+ json_data = json.load(f)
313
+ for data in json_data:
314
+ self.data_list.append({
315
+ 'task_type': k,
316
+ 'prefix': v[1],
317
+ 'suffix': v[2],
318
+ 'video': data['video_id'],
319
+ 'question': data['question'],
320
+ 'answer': data['answer'],
321
+ 'dim': data['dim'],
322
+ 'mc_question': data['mc_question'],
323
+ 'mc_answer': data['mc_answer'],
324
+ })
325
+
326
+ data_df = pd.DataFrame(self.data_list)
327
+ data_df = data_df.assign(index=range(len(data_df)))
328
+ data_df.to_csv(data_file, sep='\t', index=False)
329
+
330
+ if modelscope_flag_set():
331
+ from modelscope import dataset_snapshot_download
332
+ dataset_path = dataset_snapshot_download(dataset_id=repo_id)
333
+ else:
334
+ dataset_path = snapshot_download(repo_id=repo_id, repo_type='dataset')
335
+ read_parquet(dataset_path)
336
+ unzip_videos(dataset_path)
337
+ generate_tsv(dataset_path)
338
+
339
+ data_file = osp.join(dataset_path, f'{dataset_name}.tsv')
340
+ return dict(root=dataset_path, data_file=data_file)
341
+
342
+ def qa_template(self, data):
343
+ question = data['question']
344
+ answer = data['answer']
345
+ return question, answer
346
+
347
+ def save_video_frames(self, line):
348
+ vid_path = osp.join(self.data_root, line['prefix'], line['video'] + line['suffix'])
349
+ import decord
350
+ vid = decord.VideoReader(vid_path)
351
+ video_info = {
352
+ 'fps': vid.get_avg_fps(),
353
+ 'n_frames': len(vid),
354
+ }
355
+ if self.nframe > 0 and self.fps < 0:
356
+ step_size = len(vid) / (self.nframe + 1)
357
+ indices = [int(i * step_size) for i in range(1, self.nframe + 1)]
358
+ frame_paths = self.frame_paths(line['video'])
359
+ elif self.fps > 0:
360
+ # not constrained by num_frames, get frames by fps
361
+ total_duration = video_info['n_frames'] / video_info['fps']
362
+ required_frames = int(total_duration * self.fps)
363
+ step_size = video_info['fps'] / self.fps
364
+ indices = [int(i * step_size) for i in range(required_frames)]
365
+ frame_paths = self.frame_paths_fps(line['video'], len(indices))
366
+
367
+ flag = np.all([osp.exists(p) for p in frame_paths])
368
+
369
+ if not flag:
370
+ lock_path = osp.splitext(vid_path)[0] + '.lock'
371
+ with portalocker.Lock(lock_path, 'w', timeout=30):
372
+ if not np.all([osp.exists(p) for p in frame_paths]):
373
+ images = [vid[i].asnumpy() for i in indices]
374
+ images = [Image.fromarray(arr) for arr in images]
375
+ for im, pth in zip(images, frame_paths):
376
+ if not osp.exists(pth):
377
+ im.save(pth)
378
+
379
+ return frame_paths
380
+
381
+ def save_video_into_images(self, line):
382
+ frame_paths = self.save_video_frames(line)
383
+ return frame_paths
384
+
385
+ def build_prompt(self, line, video_llm):
386
+ if isinstance(line, int):
387
+ assert line < len(self)
388
+ line = self.data.iloc[line]
389
+
390
+ question, answer = self.qa_template(line)
391
+ message = []
392
+ video_path = osp.join(self.data_root, line['prefix'], line['video'] + line['suffix'])
393
+ if video_llm:
394
+ message.append(dict(type='video', value=video_path))
395
+ else:
396
+ img_frame_paths = self.save_video_into_images(line)
397
+ for im in img_frame_paths:
398
+ message.append(dict(type='image', value=im))
399
+ message.append(dict(type='text', value=question))
400
+ return message
401
+
402
+ @classmethod
403
+ def evaluate(self, eval_file, **judge_kwargs):
404
+ model = judge_kwargs.setdefault('model', 'chatgpt-1106')
405
+ assert model in ['chatgpt-1106']
406
+ judge_kwargs.update({
407
+ "max_tokens": 128,
408
+ "temperature": 1.0,
409
+ "top_p": 1,
410
+ "presence_penalty": 1,
411
+ })
412
+
413
+ score_file = get_intermediate_file_path(eval_file, f'_{model}_score')
414
+ tmp_file = get_intermediate_file_path(eval_file, f'_{model}', 'pkl')
415
+ nproc = judge_kwargs.pop('nproc', 4)
416
+
417
+ if not osp.exists(score_file):
418
+ data = load(eval_file)
419
+ if model != 'exact_matching':
420
+ model = build_judge(system_prompt=sys_prompt, **judge_kwargs)
421
+ else:
422
+ model = None
423
+
424
+ lt = len(data)
425
+ lines = [data.iloc[i] for i in range(lt)]
426
+ tups = [(model, line) for line in lines]
427
+ indices = [line['index'] for line in lines]
428
+
429
+ ans = {}
430
+ if osp.exists(tmp_file):
431
+ ans = load(tmp_file)
432
+ tups = [x for x, i in zip(tups, indices) if i not in ans]
433
+ indices = [i for i in indices if i not in ans]
434
+
435
+ if len(indices):
436
+ _ = track_progress_rich(
437
+ evaluate_tempcompass_captioning,
438
+ tups,
439
+ nproc=nproc,
440
+ chunksize=nproc,
441
+ keys=indices,
442
+ save=tmp_file,
443
+ )
444
+ ans = load(tmp_file)
445
+ for idx, item in data.iterrows():
446
+ data.loc[idx, 'score'] = ans[idx]['rating']
447
+ dump(data, score_file)
448
+
449
+ rating = get_dimension_rating(score_file)
450
+ return rating
451
+
452
+
453
+ class TempCompass_YorN(VideoBaseDataset):
454
+
455
+ MD5 = 'c72c046d7fa0e82c8cd7462f2e844ea8'
456
+ TYPE = 'Video-Y/N'
457
+
458
+ def __init__(self, dataset='TempCompass_YorN', nframe=0, fps=-1):
459
+ self.type_data_list = {
460
+ 'yes_no': ('yes_no.json', './videos', '.mp4'),
461
+ }
462
+ super().__init__(dataset=dataset, nframe=nframe, fps=fps)
463
+
464
+ @classmethod
465
+ def supported_datasets(cls):
466
+ return ['TempCompass_YorN']
467
+
468
+ def prepare_dataset(self, dataset_name='TempCompass_YorN', repo_id='lmms-lab/TempCompass'):
469
+ def check_integrity(pth):
470
+ data_file = osp.join(pth, f'{dataset_name}.tsv')
471
+
472
+ if not osp.exists(data_file):
473
+ return False
474
+
475
+ if md5(data_file) != self.MD5:
476
+ return False
477
+
478
+ data = load(data_file)
479
+ for idx, item in data.iterrows():
480
+ if not osp.exists(osp.join(pth, item['prefix'], item['video'] + item['suffix'])):
481
+ return False
482
+ return True
483
+
484
+ cache_path = get_cache_path(repo_id)
485
+ if cache_path is not None and check_integrity(cache_path):
486
+ dataset_path = cache_path
487
+ else:
488
+ def read_parquet(pth):
489
+ import pandas as pd
490
+ for task_name in self.type_data_list.keys():
491
+ if not osp.exists(osp.join(pth, f'{task_name}.json')):
492
+ data = pd.read_parquet(osp.join(pth, task_name, 'test-00000-of-00001.parquet'))
493
+ data.to_json(osp.join(pth, f'{task_name}.json'), orient='records', lines=False)
494
+
495
+ def unzip_videos(pth):
496
+ import zipfile
497
+ if not osp.exists(osp.join(pth, 'videos')):
498
+ zip_file = osp.join(pth, 'tempcompass_videos.zip')
499
+ with zipfile.ZipFile(zip_file, 'r') as zip_ref:
500
+ zip_ref.extractall(pth)
501
+
502
+ def generate_tsv(pth):
503
+ data_file = osp.join(pth, f'{dataset_name}.tsv')
504
+ if osp.exists(data_file) and md5(data_file) == self.MD5:
505
+ return
506
+ self.data_list = []
507
+ for k, v in self.type_data_list.items():
508
+ with open(osp.join(pth, v[0]), 'r') as f:
509
+ json_data = json.load(f)
510
+ for data in json_data:
511
+ self.data_list.append({
512
+ 'task_type': k,
513
+ 'prefix': v[1],
514
+ 'suffix': v[2],
515
+ 'video': data['video_id'],
516
+ 'question': data['question'].split('\n')[0],
517
+ 'answer': data['answer'],
518
+ 'dim': data['dim']
519
+ })
520
+
521
+ data_df = pd.DataFrame(self.data_list)
522
+ data_df = data_df.assign(index=range(len(data_df)))
523
+ data_df.to_csv(data_file, sep='\t', index=False)
524
+
525
+ if modelscope_flag_set():
526
+ from modelscope import dataset_snapshot_download
527
+ dataset_path = dataset_snapshot_download(dataset_id=repo_id)
528
+ else:
529
+ dataset_path = snapshot_download(repo_id=repo_id, repo_type='dataset')
530
+ read_parquet(dataset_path)
531
+ unzip_videos(dataset_path)
532
+ generate_tsv(dataset_path)
533
+
534
+ data_file = osp.join(dataset_path, f'{dataset_name}.tsv')
535
+ return dict(root=dataset_path, data_file=data_file)
536
+
537
+ def qa_template(self, data):
538
+ question = data['question']
539
+ answer = data['answer']
540
+ return question, answer
541
+
542
+ def save_video_frames(self, line):
543
+ vid_path = osp.join(self.data_root, line['prefix'], line['video'] + line['suffix'])
544
+ import decord
545
+ vid = decord.VideoReader(vid_path)
546
+ video_info = {
547
+ 'fps': vid.get_avg_fps(),
548
+ 'n_frames': len(vid),
549
+ }
550
+ if self.nframe > 0 and self.fps < 0:
551
+ step_size = len(vid) / (self.nframe + 1)
552
+ indices = [int(i * step_size) for i in range(1, self.nframe + 1)]
553
+ frame_paths = self.frame_paths(line['video'])
554
+ elif self.fps > 0:
555
+ # not constrained by num_frames, get frames by fps
556
+ total_duration = video_info['n_frames'] / video_info['fps']
557
+ required_frames = int(total_duration * self.fps)
558
+ step_size = video_info['fps'] / self.fps
559
+ indices = [int(i * step_size) for i in range(required_frames)]
560
+ frame_paths = self.frame_paths_fps(line['video'], len(indices))
561
+
562
+ flag = np.all([osp.exists(p) for p in frame_paths])
563
+
564
+ if not flag:
565
+ lock_path = osp.splitext(vid_path)[0] + '.lock'
566
+ with portalocker.Lock(lock_path, 'w', timeout=30):
567
+ if not np.all([osp.exists(p) for p in frame_paths]):
568
+ images = [vid[i].asnumpy() for i in indices]
569
+ images = [Image.fromarray(arr) for arr in images]
570
+ for im, pth in zip(images, frame_paths):
571
+ if not osp.exists(pth):
572
+ im.save(pth)
573
+
574
+ return frame_paths
575
+
576
+ def save_video_into_images(self, line):
577
+ frame_paths = self.save_video_frames(line)
578
+ return frame_paths
579
+
580
+ def build_prompt(self, line, video_llm):
581
+ if isinstance(line, int):
582
+ assert line < len(self)
583
+ line = self.data.iloc[line]
584
+
585
+ question, answer = self.qa_template(line)
586
+ message = []
587
+ video_path = osp.join(self.data_root, line['prefix'], line['video'] + line['suffix'])
588
+ if video_llm:
589
+ message.append(dict(type='video', value=video_path))
590
+ else:
591
+ img_frame_paths = self.save_video_into_images(line)
592
+ for im in img_frame_paths:
593
+ message.append(dict(type='image', value=im))
594
+ message.append(dict(type='text', value=question))
595
+ message.append(dict(type='text', value='\nPlease answer yes or no:'))
596
+ return message
597
+
598
+ @classmethod
599
+ def evaluate(self, eval_file, **judge_kwargs):
600
+ model = judge_kwargs.get('model', 'exact_matching')
601
+ assert model in ['chatgpt-1106', 'exact_matching']
602
+ judge_kwargs.update({
603
+ "max_tokens": 128,
604
+ "temperature": 1.0,
605
+ "top_p": 1,
606
+ "presence_penalty": 1,
607
+ })
608
+
609
+ score_file = get_intermediate_file_path(eval_file, f'_{model}_score')
610
+ tmp_file = get_intermediate_file_path(eval_file, f'_{model}', 'pkl')
611
+ nproc = judge_kwargs.pop('nproc', 4)
612
+
613
+ if not osp.exists(score_file):
614
+ data = load(eval_file)
615
+ if model != 'exact_matching':
616
+ model = build_judge(system_prompt=sys_prompt, **judge_kwargs)
617
+ else:
618
+ model = None
619
+
620
+ lt = len(data)
621
+ lines = [data.iloc[i] for i in range(lt)]
622
+ tups = [(model, line) for line in lines]
623
+ indices = [line['index'] for line in lines]
624
+
625
+ ans = {}
626
+ if osp.exists(tmp_file):
627
+ ans = load(tmp_file)
628
+ tups = [x for x, i in zip(tups, indices) if i not in ans]
629
+ indices = [i for i in indices if i not in ans]
630
+
631
+ if len(indices):
632
+ _ = track_progress_rich(
633
+ evaluate_tempcompass_YorN,
634
+ tups,
635
+ nproc=nproc,
636
+ chunksize=nproc,
637
+ keys=indices,
638
+ save=tmp_file,
639
+ )
640
+ ans = load(tmp_file)
641
+ for idx, item in data.iterrows():
642
+ data.loc[idx, 'score'] = ans[idx]['rating']
643
+ dump(data, score_file)
644
+
645
+ rating = get_dimension_rating(score_file)
646
+ return rating
VLMEvalKit-sudoku/vlmeval/dataset/text_mcq.py ADDED
@@ -0,0 +1,123 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .text_base import TextBaseDataset
2
+ from .utils import build_judge, DEBUG_MESSAGE
3
+ from ..smp import *
4
+ from ..smp.file import get_intermediate_file_path
5
+
6
+
7
+ class TextMCQDataset(TextBaseDataset):
8
+ TYPE = 'MCQ'
9
+
10
+ DATASET_URL = {}
11
+
12
+ DATASET_MD5 = {}
13
+
14
+ def build_prompt(self, line):
15
+
16
+ if isinstance(line, int):
17
+ line = self.data.iloc[line]
18
+
19
+ question = line['question']
20
+ options = {
21
+ cand: line[cand]
22
+ for cand in string.ascii_uppercase
23
+ if cand in line and not pd.isna(line[cand])
24
+ }
25
+ options_prompt = 'Options:\n'
26
+ for key, item in options.items():
27
+ options_prompt += f'{key}. {item}\n'
28
+ hint = line['hint'] if ('hint' in line and not pd.isna(line['hint'])) else None
29
+ prompt = ''
30
+ if hint is not None:
31
+ prompt += f'Hint: {hint}\n'
32
+ prompt += f'Question: {question}\n'
33
+ if len(options):
34
+ prompt += options_prompt
35
+ prompt += 'Please select the correct answer from the options above. \n'
36
+
37
+ msgs = []
38
+
39
+ msgs.append(dict(type='text', value=prompt))
40
+
41
+ return msgs
42
+
43
+ def evaluate(self, eval_file, **judge_kwargs):
44
+ from .utils.multiple_choice import report_acc, report_acc_MMT, mcq_circular_eval, mcq_vanilla_eval
45
+ # assert dataset is not None
46
+ dataset_map = {
47
+ 'MMBench_TEST_EN': 'MMBench', 'MMBench_TEST_EN_V11': 'MMBench_V11',
48
+ 'MMBench_TEST_CN': 'MMBench_CN', 'MMBench_TEST_CN_V11': 'MMBench_CN_V11'
49
+ }
50
+ dataset = self.dataset_name
51
+ if dataset in dataset_map:
52
+ dataset = dataset_map[dataset]
53
+ nproc = judge_kwargs.pop('nproc', 4)
54
+
55
+ circular = False
56
+ model = judge_kwargs.get('model', 'exact_matching')
57
+ assert model in ['chatgpt-0125', 'exact_matching', 'gpt-4-0125']
58
+ name_str_map = {'chatgpt-0125': 'openai', 'gpt-4-0125': 'gpt4'}
59
+ name_str = name_str_map[model] if model in name_str_map else model
60
+
61
+ if model == 'exact_matching':
62
+ model = None
63
+ elif gpt_key_set():
64
+ model = build_judge(**judge_kwargs)
65
+ if not model.working():
66
+ warnings.warn('OPENAI API is not working properly, will use exact matching for evaluation')
67
+ warnings.warn(DEBUG_MESSAGE)
68
+ model = None
69
+ else:
70
+ warnings.warn('OPENAI_API_KEY is not set properly, will use exact matching for evaluation')
71
+ model = None
72
+
73
+ result_file = get_intermediate_file_path(eval_file, f'_{name_str}_result', 'pkl')
74
+
75
+ data = load(eval_file)
76
+ data = data.sort_values(by='index')
77
+ data['prediction'] = [str(x) for x in data['prediction']]
78
+ # If not choice label, then use lower case
79
+ for k in data.keys():
80
+ data[k.lower() if k not in list(string.ascii_uppercase) else k] = data.pop(k)
81
+
82
+ meta = self.data
83
+ meta_q_map = {x: y for x, y in zip(meta['index'], meta['question'])}
84
+ data_map = {x: y for x, y in zip(data['index'], data['question'])}
85
+ for k in data_map:
86
+ assert k in meta_q_map, (
87
+ f'eval_file should be the same as or a subset of dataset {self.dataset_name}'
88
+ )
89
+
90
+ if circular:
91
+ data = mcq_circular_eval(model, data, meta, nproc, result_file, self.dataset_name)
92
+ else:
93
+ data = mcq_vanilla_eval(model, data, meta, nproc, result_file, self.dataset_name)
94
+
95
+ # load split
96
+ eval_name_result = get_intermediate_file_path(eval_file, f'_{name_str}_result')
97
+ dump(data, eval_name_result)
98
+ data = load(eval_name_result)
99
+
100
+ # May have different report acc functions for different datasets
101
+ if 'MMT' in dataset:
102
+ acc = report_acc_MMT(data)
103
+ else:
104
+ acc = report_acc(data)
105
+
106
+ score_file = get_intermediate_file_path(eval_file, '_acc', 'csv')
107
+ dump(acc, score_file)
108
+
109
+ return acc
110
+
111
+
112
+ class CustomTextMCQDataset(TextMCQDataset):
113
+
114
+ def load_data(self, dataset):
115
+ data_path = osp.join(LMUDataRoot(), f'{dataset}.tsv')
116
+
117
+ if file_size(data_path, 'GB') > 1:
118
+ local_path = data_path.replace('.tsv', '_local.tsv')
119
+ if not osp.exists(local_path) or os.environ.get('FORCE_LOCAL', None):
120
+ from ..tools import LOCALIZE
121
+ LOCALIZE(data_path, local_path)
122
+ data_path = local_path
123
+ return load(data_path)
VLMEvalKit-sudoku/vlmeval/dataset/utils/ccocr_evaluator/__init__.py ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .kie_evaluator import KieEvaluator
2
+ from .doc_parsing_evaluator import ParsingEvaluator
3
+ from .ocr_evaluator import OcrEvaluator
4
+ from .common import summary
5
+
6
+
7
+ evaluator_map_info = {
8
+ "kie": KieEvaluator("kie"),
9
+ "doc_parsing": ParsingEvaluator("doc_parsing"),
10
+ "multi_lan_ocr": OcrEvaluator("multi_lan_ocr"),
11
+ "multi_scene_ocr": OcrEvaluator("multi_scene_ocr")
12
+ }
VLMEvalKit-sudoku/vlmeval/dataset/utils/ccocr_evaluator/common.py ADDED
@@ -0,0 +1,222 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import json
3
+ import time
4
+ import sys
5
+ from abc import abstractmethod
6
+ from tabulate import tabulate
7
+
8
+
9
+ def pick_response_text(json_path):
10
+ """
11
+ """
12
+ try:
13
+ with open(json_path, "r") as f:
14
+ json_data = json.load(f)
15
+ except Exception as e:
16
+ print("--> file error: msg: {}, path: {}".format(e, json_path))
17
+ return None
18
+
19
+ for required_key in ["model_name", "response"]:
20
+ if required_key not in json_data:
21
+ print("--> required key not exists, name: {}, path: {}".format(required_key, json_path))
22
+ return None
23
+
24
+ model_name = json_data["model_name"]
25
+ model_response = json_data["response"]
26
+
27
+ response_text = None
28
+ if model_name.startswith("gpt") or model_name.startswith("o1"):
29
+ response_text = model_response.get("data", {}).get("response", {}).get("choices", [{}])[0].get("message", {}).get("content", None) # noqa: E501
30
+ elif model_name.startswith("local_"):
31
+ response_text = model_response
32
+ else:
33
+ if model_name.startswith("claude"):
34
+ content_list = model_response.get("content", None)
35
+ elif model_name.startswith("gemini"):
36
+ content_list = model_response.get("candidates", [{}])[0].get("content", {}).get("parts", None)
37
+ elif model_name.startswith("qwen"):
38
+ content_list = model_response.get("output", {}).get("choices", [{}])[0].get("message", {}).get("content", None) # noqa: E501
39
+ else:
40
+ raise NotImplementedError("The pick_response_text NOT implemented for model: {}".format(model_name))
41
+
42
+ if isinstance(content_list, list) and len(content_list) > 0:
43
+ response_text = content_list[0].get("text", None)
44
+
45
+ if response_text is None:
46
+ print("--> [error][{}] text pick error, path: {}".format(model_name, json_path))
47
+ return response_text
48
+
49
+
50
+ def load_response_from_dir(res_dir):
51
+ """
52
+ """
53
+ response_info = {}
54
+ for file_name in os.listdir(res_dir):
55
+ file_path = os.path.abspath(os.path.join(res_dir, file_name))
56
+ if not file_name.endswith(".json"):
57
+ print("--> skip: result file should be a json: but got: {}".format(file_path))
58
+ continue
59
+
60
+ response_text = pick_response_text(file_path)
61
+ if response_text is None:
62
+ continue
63
+
64
+ file_name_wo_ext, ext = os.path.splitext(file_name)
65
+ response_info[file_name_wo_ext] = response_text
66
+ return response_info
67
+
68
+
69
+ class BaseMetric(object):
70
+ """ BaseMetric """
71
+ """ OCRMetric """
72
+ def __init__(self, group_name, **kwargs):
73
+ self.group_name = group_name
74
+ self.kwargs = kwargs
75
+
76
+ def response_post_func(self, response_text, **kwargs):
77
+ return response_text
78
+
79
+ @abstractmethod
80
+ # Given the prediction and gt, return the evaluation results in the format of a dictionary
81
+ # results should contain a 'summary' key, for example:
82
+ # {
83
+ # "summary": {
84
+ # "f1-score": 99.99,
85
+ # "metric_name": "metric_value" # used for summary,only metric info could be placed in this dict.
86
+ # },
87
+ # "your other info": "xxx"
88
+ # }
89
+ def evaluate(self, response_info, gt_info, normalize_func=None, **kwargs):
90
+ pass
91
+
92
+ def __call__(self, pdt_res_dir, gt_info, with_response_ratio=True, **kwargs):
93
+ if isinstance(pdt_res_dir, dict):
94
+ raw_response_info = pdt_res_dir
95
+ elif os.path.exists(pdt_res_dir) and os.path.isdir(pdt_res_dir):
96
+ raw_response_info = load_response_from_dir(pdt_res_dir)
97
+ else:
98
+ return ValueError("invalid input: response dict or folder are required, but got {}".format(pdt_res_dir))
99
+
100
+ post_error_list, response_info = [], {}
101
+ response_error_list = list(gt_info.keys() - raw_response_info.keys())
102
+ for file_name, single_pdt_str in raw_response_info.items():
103
+ single_pdt_str = self.response_post_func(single_pdt_str, **kwargs)
104
+ if single_pdt_str is None:
105
+ post_error_list.append(file_name)
106
+ continue
107
+ response_info[file_name] = single_pdt_str
108
+
109
+ meta_info = {
110
+ "gt_total_num": len(gt_info), "pdt_total_num": len(response_info),
111
+ "post_error_list": post_error_list, "response_error_list": response_error_list,
112
+ }
113
+ eval_info = self.evaluate(response_info, gt_info, **kwargs)
114
+
115
+ # add response_success_ratio
116
+ if "summary" in eval_info and with_response_ratio:
117
+ success_ratio = (len(response_info) + len(post_error_list)) / (len(gt_info) + 1e-9)
118
+ eval_info["summary"].update({"response_success_ratio": success_ratio})
119
+ return meta_info, eval_info
120
+
121
+
122
+ def summary(index_path, exp_dir_base, is_weighted_sum=False):
123
+ """
124
+ """
125
+ with open(index_path, "r") as f:
126
+ data_list = json.load(f)
127
+
128
+ all_data_info = {}
129
+ for data_info_item in data_list:
130
+ data_name = data_info_item["dataset"]
131
+ if not data_info_item.get("release", True):
132
+ continue
133
+ all_data_info[data_name] = data_info_item
134
+ dataset_list = list(all_data_info.keys())
135
+ summary_path = summary_multi_exp(exp_dir_base, dataset_list, is_weighted_sum=is_weighted_sum)
136
+ return summary_path
137
+
138
+
139
+ def summary_multi_exp(exp_dir_base, dataset_list=None, is_weighted_sum=False):
140
+ """
141
+ """
142
+ if dataset_list is None:
143
+ all_dataset_name = []
144
+ for exp_name in os.listdir(exp_dir_base):
145
+ dir_status_path = os.path.join(exp_dir_base, exp_name, "status.json")
146
+ if not os.path.exists(dir_status_path):
147
+ continue
148
+ with open(dir_status_path, "r") as f:
149
+ data_status_info = json.load(f)
150
+ all_dataset_name.extend(data_status_info.keys())
151
+ dataset_list = sorted(set(all_dataset_name))
152
+
153
+ # summary main code
154
+ all_evaluate_info, _ = {}, 0
155
+ for exp_name in os.listdir(exp_dir_base):
156
+ dir_status_path = os.path.join(exp_dir_base, exp_name, "status.json")
157
+ if not os.path.exists(dir_status_path):
158
+ print("--> skip: status.json not exist: {}".format(dir_status_path))
159
+ continue
160
+
161
+ with open(dir_status_path, "r") as f:
162
+ all_status_info = json.load(f)
163
+
164
+ for data_name in dataset_list:
165
+ total_num = all_status_info.get(data_name, {}).get("config", {}).get("num", "-1")
166
+ summary_info = all_status_info.get(data_name, {}).get("evaluation", {}).get("summary", {})
167
+ for metric_name, metric_value in summary_info.items():
168
+ if metric_name not in all_evaluate_info:
169
+ all_evaluate_info[metric_name] = {}
170
+ if exp_name not in all_evaluate_info[metric_name]:
171
+ all_evaluate_info[metric_name][exp_name] = {}
172
+ all_evaluate_info[metric_name][exp_name][data_name] = (metric_value, total_num)
173
+
174
+ all_table_md = []
175
+ for metric_name, metric_info in all_evaluate_info.items():
176
+ formatted_time = time.strftime("%Y-%m-%d %H:%M", time.localtime(time.time()))
177
+ summary_line_list = []
178
+ summary_key_name = "summary(weighted)" if is_weighted_sum else "summary"
179
+ summary_head = [f"exp_name({metric_name}_{formatted_time})"] + dataset_list + [summary_key_name]
180
+ for exp_name, data_eval_info in metric_info.items():
181
+ summary_line = [exp_name, ]
182
+
183
+ all_metric_value = 0
184
+ is_summary_valid, all_total_num, all_weighted_metric = True, 0, 0
185
+ for data_name in dataset_list:
186
+ metric_value, total_num = data_eval_info.get(data_name, ("-1", "-1"))
187
+ summary_line.append("{:.2f}".format(float(metric_value) * 100))
188
+ if str(metric_value) == "-1" or str(metric_value) == "-1":
189
+ is_summary_valid = False
190
+ continue
191
+
192
+ all_total_num += float(total_num)
193
+ all_weighted_metric += float(total_num) * float(metric_value)
194
+ all_metric_value += float(metric_value)
195
+
196
+ summary_value_valid = ((all_weighted_metric / (all_total_num + 1e-9)) * 100) if is_weighted_sum \
197
+ else (all_metric_value / (len(dataset_list) + 1e-9) * 100)
198
+ summary_value = "-" if not is_summary_valid else "{:.2f}".format(summary_value_valid)
199
+ summary_line.append(summary_value)
200
+ summary_line_list.append(summary_line)
201
+
202
+ md_table_info = tabulate(summary_line_list, headers=summary_head, tablefmt='pipe')
203
+ all_table_md.append(md_table_info)
204
+
205
+ print("\n\n".join(all_table_md))
206
+ summary_path = os.path.abspath(os.path.join(exp_dir_base, "summary.md"))
207
+ with open(summary_path, "w") as f:
208
+ f.write("\n\n".join(all_table_md))
209
+ return summary_path
210
+
211
+
212
+ if __name__ == '__main__':
213
+ if len(sys.argv) != 2:
214
+ print("Usage: python {} exp_base_dir".format(__file__))
215
+ exit(-1)
216
+ else:
217
+ print('--> info: {}'.format(sys.argv))
218
+ exp_base_dir = sys.argv[1]
219
+
220
+ summary_path = summary_multi_exp(exp_base_dir, dataset_list=None, is_weighted_sum=False)
221
+ print("--> info: summary saved at : {}".format(summary_path))
222
+ print("happy coding.")
VLMEvalKit-sudoku/vlmeval/dataset/utils/ccocr_evaluator/kie_evaluator.py ADDED
@@ -0,0 +1,385 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ """
3
+ Donut
4
+ Copyright (c) 2022-present NAVER Corp.
5
+ MIT License
6
+ """
7
+ import json
8
+ import os
9
+ import sys
10
+ import re
11
+ import time
12
+ from typing import Any, Dict, List, Tuple, Union
13
+
14
+ import zss
15
+ from zss import Node
16
+ from collections import Counter
17
+ from nltk import edit_distance
18
+
19
+ # local import
20
+ from .common import BaseMetric
21
+
22
+
23
+ def flatten(data: dict):
24
+ """
25
+ Convert Dictionary into Non-nested Dictionary
26
+ Example:
27
+ input(dict)
28
+ {
29
+ "menu": [
30
+ {"name" : ["cake"], "count" : ["2"]},
31
+ {"name" : ["juice"], "count" : ["1"]},
32
+ ]
33
+ }
34
+ output(list)
35
+ [
36
+ ("menu.name", "cake"),
37
+ ("menu.count", "2"),
38
+ ("menu.name", "juice"),
39
+ ("menu.count", "1"),
40
+ ]
41
+ """
42
+ flatten_data = list()
43
+
44
+ def _flatten(value, key=""):
45
+ if type(value) is dict:
46
+ for child_key, child_value in value.items():
47
+ _flatten(child_value, f"{key}.{child_key}" if key else child_key)
48
+ elif type(value) is list:
49
+ for value_item in value:
50
+ _flatten(value_item, key)
51
+ else:
52
+ flatten_data.append((key, value))
53
+
54
+ _flatten(data)
55
+ return flatten_data
56
+
57
+
58
+ def update_cost(node1: Node, node2: Node):
59
+ """
60
+ Update cost for tree edit distance.
61
+ If both are leaf node, calculate string edit distance between two labels (special token '<leaf>' will be ignored).
62
+ If one of them is leaf node, cost is length of string in leaf node + 1.
63
+ If neither are leaf node, cost is 0 if label1 is same with label2 othewise 1
64
+ """
65
+ label1 = node1.label
66
+ label2 = node2.label
67
+ label1_leaf = "<leaf>" in label1
68
+ label2_leaf = "<leaf>" in label2
69
+ if label1_leaf and label2_leaf:
70
+ return edit_distance(label1.replace("<leaf>", ""), label2.replace("<leaf>", ""))
71
+ elif not label1_leaf and label2_leaf:
72
+ return 1 + len(label2.replace("<leaf>", ""))
73
+ elif label1_leaf and not label2_leaf:
74
+ return 1 + len(label1.replace("<leaf>", ""))
75
+ else:
76
+ return int(label1 != label2)
77
+
78
+
79
+ def insert_and_remove_cost(node: Node):
80
+ """
81
+ Insert and remove cost for tree edit distance.
82
+ If leaf node, cost is length of label name.
83
+ Otherwise, 1
84
+ """
85
+ label = node.label
86
+ if "<leaf>" in label:
87
+ return len(label.replace("<leaf>", ""))
88
+ else:
89
+ return 1
90
+
91
+
92
+ def normalize_dict(data: Union[Dict, List, Any]):
93
+ """
94
+ Sort by value, while iterate over element if data is list
95
+ """
96
+ # if not data:
97
+ # return {}
98
+
99
+ if isinstance(data, dict):
100
+ new_data = dict()
101
+ for key in sorted(data.keys(), key=lambda k: (len(k), k)):
102
+ value = normalize_dict(data[key])
103
+ if value:
104
+ if not isinstance(value, list):
105
+ value = [value]
106
+ new_data[key] = value
107
+
108
+ elif isinstance(data, list):
109
+ if all(isinstance(item, dict) for item in data):
110
+ new_data = []
111
+ for item in data:
112
+ item = normalize_dict(item)
113
+ if item:
114
+ new_data.append(item)
115
+ else:
116
+ new_data = [str(item).strip() for item in data if type(item) in {str, int, float} and str(item).strip()]
117
+ else:
118
+ new_data = [str(data).strip()]
119
+ return new_data
120
+
121
+
122
+ def cal_f1_all(preds, answers):
123
+ """
124
+ Calculate global F1 accuracy score (field-level, micro-averaged) by counting all true positives,
125
+ false negatives and false positives
126
+ """
127
+ metric_info, error_info = {}, {}
128
+ total_tp, total_fn_or_fp = 0, 0
129
+ for file_name, answer in answers.items():
130
+ sample_error_info = {"fp": [], "fn": [], "tp": []}
131
+ pred = preds.get(file_name, {})
132
+ pred, answer = flatten(normalize_dict(pred)), flatten(normalize_dict(answer))
133
+ for field in pred:
134
+ field_name = field[0]
135
+ if field_name not in metric_info:
136
+ metric_info[field_name] = {"total_tp": 0, "total_fn_or_fp": 0}
137
+ if field in answer:
138
+ total_tp += 1
139
+ metric_info[field_name]["total_tp"] += 1
140
+ sample_error_info["tp"].append(field)
141
+ answer.remove(field)
142
+ else:
143
+ total_fn_or_fp += 1
144
+ metric_info[field_name]["total_fn_or_fp"] += 1
145
+ sample_error_info["fp"].append(field)
146
+
147
+ total_fn_or_fp += len(answer)
148
+ for field in answer:
149
+ field_name = field[0]
150
+ if field_name not in metric_info:
151
+ metric_info[field_name] = {"total_tp": 0, "total_fn_or_fp": 0}
152
+ metric_info[field_name]["total_fn_or_fp"] += 1
153
+ sample_error_info["fn"].append(field)
154
+
155
+ sample_error_num = sum([len(v) for k, v in sample_error_info.items() if k != "tp"])
156
+ if sample_error_num > 0:
157
+ sample_error_info["error_num"] = sample_error_num
158
+ error_class_list = ["counter_" + x[0] for x in (sample_error_info["fn"] + sample_error_info["fp"])]
159
+ counter = Counter(error_class_list)
160
+ sample_error_info["error_info"] = dict(counter)
161
+ error_info[file_name] = sample_error_info
162
+
163
+ # summary
164
+ for field_name, field_info in metric_info.items():
165
+ field_tp, field_fn_or_fp = field_info["total_tp"], field_info["total_fn_or_fp"]
166
+ metric_info[field_name]["acc"] = field_tp / (field_tp + field_fn_or_fp / 2 + 1e-6)
167
+
168
+ print("donut_evaluator: total_tp: {}, total_fn_or_fp: {}, ptd_num: {}, gt_num: {}".format(total_tp, total_fn_or_fp,
169
+ len(preds), len(answers)))
170
+ error_info = {k: v for k, v in
171
+ sorted(error_info.items(), key=lambda item: item[1].get("error_num", 0), reverse=True)}
172
+ metric_info = {k: v for k, v in
173
+ sorted(metric_info.items(), key=lambda item: item[1].get("total_fn_or_fp", 0), reverse=True)}
174
+ return total_tp / (total_tp + total_fn_or_fp / 2 + 1e-6), metric_info, error_info
175
+
176
+
177
+ def construct_tree_from_dict(data: Union[Dict, List], node_name: str = None):
178
+ """
179
+ Convert Dictionary into Tree
180
+
181
+ Example:
182
+ input(dict)
183
+
184
+ {
185
+ "menu": [
186
+ {"name" : ["cake"], "count" : ["2"]},
187
+ {"name" : ["juice"], "count" : ["1"]},
188
+ ]
189
+ }
190
+
191
+ output(tree)
192
+ <root>
193
+ |
194
+ menu
195
+ / \
196
+ <subtree> <subtree>
197
+ / | | \
198
+ name count name count
199
+ / | | \
200
+ <leaf>cake <leaf>2 <leaf>juice <leaf>1
201
+ """
202
+ if node_name is None:
203
+ node_name = "<root>"
204
+
205
+ node = Node(node_name)
206
+
207
+ if isinstance(data, dict):
208
+ for key, value in data.items():
209
+ kid_node = construct_tree_from_dict(value, key)
210
+ node.addkid(kid_node)
211
+ elif isinstance(data, list):
212
+ if all(isinstance(item, dict) for item in data):
213
+ for item in data:
214
+ kid_node = construct_tree_from_dict(
215
+ item,
216
+ "<subtree>",
217
+ )
218
+ node.addkid(kid_node)
219
+ else:
220
+ for item in data:
221
+ node.addkid(Node(f"<leaf>{item}"))
222
+ else:
223
+ raise Exception(data, node_name)
224
+ return node
225
+
226
+
227
+ def cal_acc(pred: dict, answer: dict):
228
+ """
229
+ Calculate normalized tree edit distance(nTED) based accuracy.
230
+ 1) Construct tree from dict,
231
+ 2) Get tree distance with insert/remove/update cost,
232
+ 3) Divide distance with GT tree size (i.e., nTED),
233
+ 4) Calculate nTED based accuracy. (= max(1 - nTED, 0 ).
234
+ """
235
+ pred = construct_tree_from_dict(normalize_dict(pred))
236
+ answer = construct_tree_from_dict(normalize_dict(answer))
237
+ val1 = zss.distance(
238
+ pred,
239
+ answer,
240
+ get_children=zss.Node.get_children,
241
+ insert_cost=insert_and_remove_cost,
242
+ remove_cost=insert_and_remove_cost,
243
+ update_cost=update_cost,
244
+ return_operations=False,
245
+ )
246
+ val2 = zss.distance(
247
+ construct_tree_from_dict(normalize_dict({})),
248
+ answer,
249
+ get_children=zss.Node.get_children,
250
+ insert_cost=insert_and_remove_cost,
251
+ remove_cost=insert_and_remove_cost,
252
+ update_cost=update_cost,
253
+ return_operations=False,
254
+ )
255
+ return max(0, 1 - val1 / val2)
256
+
257
+
258
+ def cal_acc_all(pred_info, answer_info):
259
+ acc_info, error_info = {}, {}
260
+ for file_name, answer in answer_info.items():
261
+ # if file_name not in pred_info:
262
+ # print("---> error: pdt not found: {}".format(file_name))
263
+ # continue
264
+ pred = pred_info.get(file_name, {})
265
+ acc = cal_acc(pred, answer)
266
+ acc_info[file_name] = acc
267
+ if acc < 1.0:
268
+ error_info[file_name] = {"acc": acc, "pred": pred, "answer": answer}
269
+
270
+ error_info = {k: v for k, v in sorted(error_info.items(), key=lambda item: item[1].get("acc", 0))}
271
+ acc_averge = sum(list(acc_info.values())) / (len(acc_info) + 1e-6)
272
+ return acc_averge, error_info
273
+
274
+
275
+ def normalize_values_of_nested_dict(d, normalize_func):
276
+ """
277
+ """
278
+ if isinstance(d, dict):
279
+ return {k: normalize_values_of_nested_dict(v, normalize_func) for k, v in d.items()}
280
+ elif isinstance(d, list):
281
+ return [normalize_values_of_nested_dict(x, normalize_func) if isinstance(x, dict) else x for x in d]
282
+ elif isinstance(d, str):
283
+ return normalize_func(d)
284
+ else:
285
+ return d
286
+
287
+
288
+ def eval_donut(pdt_info, gt_info, normalize_func=None, data_name=None):
289
+ """
290
+ """
291
+ if normalize_func is not None:
292
+ print("--> info: normalize_func executed.")
293
+ pdt_info = normalize_values_of_nested_dict(pdt_info, normalize_func)
294
+ gt_info = normalize_values_of_nested_dict(gt_info, normalize_func)
295
+
296
+ f1_score, class_eval_info, error_info = cal_f1_all(pdt_info, gt_info)
297
+ acc_average, acc_error_info = cal_acc_all(pdt_info, gt_info)
298
+ eval_info = {"f1_score": f1_score, "acc": acc_average, "class_f1_score": class_eval_info,
299
+ "f1_error_info": error_info, "acc_error_info": acc_error_info}
300
+ print(data_name, "f1_score", f1_score, "acc", acc_average)
301
+ return eval_info
302
+
303
+
304
+ def post_process_to_json(qwen_info_str, file_name=None):
305
+ try:
306
+ if "```json" in qwen_info_str:
307
+ if "```" not in qwen_info_str:
308
+ qwen_info_str += "```"
309
+ qwen_info_group = re.search(r'```json(.*?)```', qwen_info_str, re.DOTALL)
310
+ json_str = qwen_info_group.group(1).strip().replace("\n", "")
311
+ else:
312
+ json_str = qwen_info_str.strip().replace("\n", "")
313
+ json_data = json.loads(json_str)
314
+ return json_data
315
+ except Exception as err: # noqa: F841
316
+ return None
317
+
318
+
319
+ def fullwidth_to_halfwidth(text):
320
+ # 全角转半角
321
+ result = ''
322
+ for char in text:
323
+ code_point = ord(char)
324
+ # 全角空格直接转化
325
+ if code_point == 0x3000:
326
+ code_point = 0x0020
327
+ # 其他全角字符(除空格)转换为半角
328
+ elif 0xFF01 <= code_point <= 0xFF5E:
329
+ code_point -= 0xFEE0
330
+ result += chr(code_point)
331
+ result = result.replace("、", ",")
332
+ return result
333
+
334
+
335
+ def remove_unnecessary_spaces(text):
336
+ # 去掉中文字符之间的空格
337
+ text = re.sub(r'(?<=[\u4e00-\u9fff])\s+(?=[\u4e00-\u9fff])', '', text)
338
+ # 去掉中文和英文、数字之间的空格
339
+ text = re.sub(r'(?<=[\u4e00-\u9fff])\s+(?=[a-zA-Z0-9])', '', text)
340
+ text = re.sub(r'(?<=[a-zA-Z0-9])\s+(?=[\u4e00-\u9fff])', '', text)
341
+ # 去掉符号前的不必要空格,保留符号后的一个空格
342
+ text = re.sub(r'(?<![0-9])\s*([,.!?:;])\s*', r'\1 ', text) # 非数字前后的符号
343
+ # 在数字和英文之间添加空格
344
+ text = re.sub(r'(?<=[0-9])(?=[a-zA-Z])', ' ', text)
345
+ text = re.sub(r'(?<=[a-zA-Z])(?=[0-9])', ' ', text)
346
+ text = re.sub(r'\s+', ' ', text)
347
+ return text
348
+
349
+
350
+ class KieEvaluator(BaseMetric):
351
+ def response_post_func(self, response_text, **kwargs):
352
+ response_text = post_process_to_json(response_text, file_name=kwargs.get('file_name', None))
353
+ return response_text
354
+
355
+ def normalize_func(self, text, **kwargs):
356
+ halfwidth_text = fullwidth_to_halfwidth(str(text))
357
+ cleaned_text = remove_unnecessary_spaces(halfwidth_text)
358
+ return cleaned_text
359
+
360
+ def evaluate(self, response_info, gt_info, **kwargs):
361
+ """
362
+ response_info: dict: {"file_name_1": response, "file_name_2": gt}
363
+ gt_info: dict: {"file_name_1": gt, "file_name_2": gt}
364
+ kwargs: dataset index config: {'dataset': 'kie_benchmark_POIE', 'group': 'kie', 'op': 'poie', 'num': 250}
365
+ """
366
+ # gt should be a dict for kie task, fix for VLMEvalKit
367
+ for image_name, label_content in gt_info.items():
368
+ if isinstance(label_content, str):
369
+ gt_info[image_name] = json.loads(label_content)
370
+
371
+ response_info = normalize_values_of_nested_dict(response_info, self.normalize_func)
372
+ gt_info = normalize_values_of_nested_dict(gt_info, self.normalize_func)
373
+
374
+ f1_score, class_eval_info, error_info = cal_f1_all(response_info, gt_info)
375
+ acc_average, acc_error_info = cal_acc_all(response_info, gt_info)
376
+
377
+ # summary info
378
+ summary_info = {"f1_score": f1_score, "acc": acc_average}
379
+ eval_info = {"summary": summary_info, "class_f1_score": class_eval_info,
380
+ "f1_error_info": error_info, "acc_error_info": acc_error_info}
381
+ return eval_info
382
+
383
+
384
+ if __name__ == '__main__':
385
+ pass
VLMEvalKit-sudoku/vlmeval/dataset/utils/judge_util.py ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from ...smp import load_env
3
+
4
+ INTERNAL = os.environ.get('INTERNAL', 0)
5
+
6
+
7
+ def build_judge(**kwargs):
8
+ from ...api import OpenAIWrapper, SiliconFlowAPI, HFChatModel
9
+ model = kwargs.pop('model', None)
10
+ kwargs.pop('nproc', None)
11
+ load_env()
12
+ LOCAL_LLM = os.environ.get('LOCAL_LLM', None)
13
+ if LOCAL_LLM is None:
14
+ model_map = {
15
+ 'gpt-4-turbo': 'gpt-4-1106-preview',
16
+ 'gpt-4-0613': 'gpt-4-0613',
17
+ 'gpt-4-0125': 'gpt-4-0125-preview',
18
+ 'gpt-4-0409': 'gpt-4-turbo-2024-04-09',
19
+ 'chatgpt-1106': 'gpt-3.5-turbo-1106',
20
+ 'chatgpt-0125': 'gpt-3.5-turbo-0125',
21
+ 'gpt-4o': 'gpt-4o-2024-05-13',
22
+ 'gpt-4o-0806': 'gpt-4o-2024-08-06',
23
+ 'gpt-4o-1120': 'gpt-4o-2024-11-20',
24
+ 'gpt-4o-mini': 'gpt-4o-mini-2024-07-18',
25
+ 'qwen-7b': 'Qwen/Qwen2.5-7B-Instruct',
26
+ 'qwen-72b': 'Qwen/Qwen2.5-72B-Instruct',
27
+ 'deepseek': 'deepseek-ai/DeepSeek-V3',
28
+ 'llama31-8b': 'meta-llama/Llama-3.1-8B-Instruct',
29
+ }
30
+ model_version = model_map[model] if model in model_map else model
31
+ else:
32
+ model_version = LOCAL_LLM
33
+
34
+ if model in ['qwen-7b', 'qwen-72b', 'deepseek']:
35
+ model = SiliconFlowAPI(model_version, **kwargs)
36
+ elif model == 'llama31-8b':
37
+ model = HFChatModel(model_version, **kwargs)
38
+ else:
39
+ model = OpenAIWrapper(model_version, **kwargs)
40
+ return model
41
+
42
+
43
+ DEBUG_MESSAGE = """
44
+ To debug the OpenAI API, you can try the following scripts in python:
45
+ ```python
46
+ from vlmeval.api import OpenAIWrapper
47
+ model = OpenAIWrapper('gpt-4o', verbose=True)
48
+ msgs = [dict(type='text', value='Hello!')]
49
+ code, answer, resp = model.generate_inner(msgs)
50
+ print(code, answer, resp)
51
+ ```
52
+ You cam see the specific error if the API call fails.
53
+ """
VLMEvalKit-sudoku/vlmeval/dataset/utils/llavabench.py ADDED
@@ -0,0 +1,88 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import pandas as pd
3
+ from ...smp import *
4
+
5
+ rule_dict = {
6
+ 'llava_bench_conv': {'role': 'Assistant', 'prompt': 'We would like to request your feedback on the performance of two AI assistants in response to the user question displayed above. The user asks the question on observing an image. For your reference, the visual content in the image is represented with a few sentences describing the image. \nPlease rate the helpfulness, relevance, accuracy, level of details of their responses. Each assistant receives an overall score on a scale of 1 to 10, where a higher score indicates better overall performance.\nPlease first output a single line containing only two values indicating the scores for Assistant 1 and 2, respectively. The two scores are separated by a space.\nIn the subsequent line, please provide a comprehensive explanation of your evaluation, avoiding any potential bias and ensuring that the order in which the responses were presented does not affect your judgment.'}, # noqa: E501
7
+ 'llava_bench_detail': {'role': 'Assistant', 'prompt': 'We would like to request your feedback on the performance of two AI assistants in response to the user question displayed above. The user asks the question on observing an image. For your reference, the visual content in the image is represented with a few sentences describing the image. \nPlease rate the helpfulness, relevance, accuracy, level of details of their responses. Each assistant receives an overall score on a scale of 1 to 10, where a higher score indicates better overall performance.\nPlease first output a single line containing only two values indicating the scores for Assistant 1 and 2, respectively. The two scores are separated by a space.\nIn the subsequent line, please provide a comprehensive explanation of your evaluation, avoiding any potential bias and ensuring that the order in which the responses were presented does not affect your judgment.'}, # noqa: E501
8
+ 'llava_bench_complex': {'role': 'Assistant', 'prompt': 'We would like to request your feedback on the performance of two AI assistants in response to the user question displayed above. The user asks the question on observing an image. For your reference, the visual content in the image is represented with a few sentences describing the image. \nPlease rate the helpfulness, relevance, accuracy, level of details of their responses. Each assistant receives an overall score on a scale of 1 to 10, where a higher score indicates better overall performance.\nPlease first output a single line containing only two values indicating the scores for Assistant 1 and 2, respectively. The two scores are separated by a space.\nIn the subsequent line, please provide a comprehensive explanation of your evaluation, avoiding any potential bias and ensuring that the order in which the responses were presented does not affect your judgment.'} # noqa: E501
9
+ }
10
+
11
+ rule_dict_ko = {
12
+ 'llava_bench_conv': {'role': '어시스턴트', 'prompt': '두 인공지능 어시스턴트의 성능을 [질문]에 대한 응답에 기반하여 평가하세요. 해당 [질문]은 특정 이미지를 보고 생성되었습니다. `유용성`, `관련성`, `정확성`, `세부 수준`, `한국어 생성능력`을 기준으로 응답을 평가하세요. 각각의 어시스턴트에게 1에서 10까지의 전반적인 점수를 부여하며, 높은 점수일수록 더 나은 전반적인 성능을 나타냅니다.\n\n# 단계\n1. 제공된 이미지 [설명]을 검토하세요.\n2. 각 어시스턴트의 응답을 다음 기준으로 분석하세요:\n - `유용성`: 응답이 사용자의 질문을 얼마나 잘 해결하는가?\n - `관련성`: 응답이 사용자의 질문에 얼마나 적절한가?\n - `정확성`: 응답에서 제공한 정보가 얼마나 정확한가?\n - `세부 수준`: 응답이 과하지 않게 충분히 자세한가?\n - `한국어 생성능력`: 생성된 한국어 문장이 자연스럽고 문법적으로 올바른가?\n3. 분석에 기반하여 각 어시스턴트에게 1에서 10까지의 점수를 부여하세요.\n4. 두 점수를 공백으로 구분하여 한 줄로 제공하세요.\n5. 점수에 대한 이유를 강조하면서 포괄적인 평가를 제공하고, 편견을 피하며 응답의 순서가 판단에 영향을 미치지 않도록 하세요.\n\n# 출력 형식\n- 첫 번째 줄: `어시스턴트1_점수 어시스턴트2_점수` (예: `8 9`)\n- 두 번째 줄: `유용성`, `관련성`, `정확성`, `세부 수준`, `한국어 생성능력` 기준으로 점수를 설명하는 자세한 문단을 제공합니다.\n\n# 주의사항\n- 평가 시 잠재적 편견을 방지하여 객관성을 확보하세요.\n- 분석과 설명에서 일관성과 명확성을 유지하세요.'}, # noqa: E501
13
+ 'llava_bench_detail': {'role': '어시스턴트', 'prompt': '두 인공지능 어시스턴트의 성능을 [질문]에 대한 응답에 기반하여 평가하세요. 해당 [질문]은 특정 이미지를 보고 생성되었습니다. `유용성`, `관련성`, `정확성`, `세부 수준`, `한국어 생성능력`을 기준으로 응답을 평가하세요. 각각의 어시스턴트에게 1에서 10까지의 전반적인 점수를 부여하며, 높은 점수일수록 더 나은 전반적인 성능을 나타냅니다.\n\n# 단계\n1. 제공된 이미지 [설명]을 검토하세요.\n2. 각 어시스턴트의 응답을 다음 기준으로 분석하세요:\n - `유용성`: 응답이 사용자의 질문을 얼마나 잘 해결하는가?\n - `관련성`: 응답이 사용자의 질문에 얼마나 적절한가?\n - `정확성`: 응답에서 제공한 정보가 얼마나 정확한가?\n - `세부 수준`: 응답이 과하지 않게 충분히 자세한가?\n - `한국어 생성능력`: 생성된 한국어 문장이 자연스럽고 문법적으로 올바른가?\n3. 분석에 기반하여 각 어시스턴트에게 1에서 10까지의 점수를 부여하세요.\n4. 두 점수를 공백으로 구분하여 한 줄로 제공하세요.\n5. 점수에 대한 이유를 강조하면서 포괄적인 평가를 제공하고, 편견을 피하며 응답의 순서가 판단에 영향을 미치지 않도록 하세요.\n\n# 출력 형식\n- 첫 번째 줄: `어시스턴트1_점수 어시스턴트2_점수` (예: `8 9`)\n- 두 번째 줄: `유용성`, `관련성`, `정확성`, `세부 수준`, `한국어 생성능력` 기준으로 점수를 설명하는 자세한 문단을 제공합니다.\n\n# 주의사항\n- 평가 시 잠재적 편견을 방지하여 객관성을 확보하세요.\n- 분석과 설명에서 일관성과 명확성을 유지하세요.'}, # noqa: E501
14
+ 'llava_bench_complex': {'role': '어시스턴트', 'prompt': '두 인공지능 어시스턴트의 성능을 [질문]에 대한 응답에 기반하여 평가하세요. 해당 [질문]은 특정 이미지를 보고 생성되었습니다. `유용성`, `관련성`, `정확성`, `세부 수준`, `한국어 생성능력`을 기준으로 응답을 평가하세요. 각각의 어시스턴트에게 1에서 10까지의 전반적인 점수를 부여하며, 높은 점수일수록 더 나은 전반적인 성능을 나타냅니다.\n\n# 단계\n1. 제공된 이미지 [설명]을 검토하세요.\n2. 각 어시스턴트의 응답을 다음 기준으로 분석하세요:\n - `유용성`: 응답이 사용자의 질문을 얼마나 잘 해결하는가?\n - `관련성`: 응답이 사용자의 질문에 얼마나 적절한가?\n - `정확성`: 응답에서 제공한 정보가 얼마나 정확한가?\n - `세부 수준`: 응답이 과하지 않게 충분히 자세한가?\n - `한국어 생성능력`: 생성된 한국어 문장이 자연스럽고 문법적으로 올바른가?\n3. 분석에 기반하여 각 어시스턴트에게 1에서 10까지의 점수를 부여하세요.\n4. 두 점수를 공백으로 구분하여 한 줄로 제공하세요.\n5. 점수에 대한 이유를 강조하면서 포괄적인 평가를 제공하고, 편견을 피하며 응답의 순서가 판단에 영향을 미치지 않도록 하세요.\n\n# 출력 형식\n- 첫 번째 줄: `어시스턴트1_점수 어시스턴트2_점수` (예: `8 9`)\n- 두 번째 줄: `유용성`, `관련성`, `정확성`, `세부 수준`, `한국어 생성능력` 기준으로 점수를 설명하는 자세한 문단을 제공합니다.\n\n# 주의사항\n- 평가 시 잠재적 편견을 방지하여 객관성을 확보하세요.\n- 분석과 설명에서 일관성과 명확성을 유지하세요.'} # noqa: E501
15
+ }
16
+
17
+
18
+ def get_eval(judge, content):
19
+ return judge.generate(content)
20
+
21
+
22
+ def parse_score(review):
23
+ logger = get_logger('Evaluation')
24
+ try:
25
+ score_pair = review.split('\n')[0]
26
+ score_pair = score_pair.replace(',', ' ')
27
+ sp = score_pair.split(' ')
28
+ if len(sp) == 2:
29
+ return [float(sp[0]), float(sp[1])]
30
+ else:
31
+ logger.error('error', review)
32
+ return [-1, -1]
33
+ except Exception as e:
34
+ logger.error(e, 'error', review)
35
+ return [-1, -1]
36
+
37
+
38
+ def build_prompt(line):
39
+ cap_str = line['caption']
40
+ question = line['question']
41
+ ans1 = line['gpt4_ans']
42
+ ans2 = line['prediction']
43
+ category = 'llava_bench_' + line['category']
44
+ rule = rule_dict[category]
45
+ role, prompt = rule['role'], rule['prompt']
46
+
47
+ content = (f'[Context]\n{cap_str}\n\n'
48
+ f'[Question]\n{question}\n\n'
49
+ f'[{role} 1]\n{ans1}\n\n[End of {role} 1]\n\n'
50
+ f'[{role} 2]\n{ans2}\n\n[End of {role} 2]\n\n'
51
+ f'[System]\n{prompt}\n\n')
52
+ return content
53
+
54
+
55
+ def build_prompt_ko(line):
56
+ cap_str = line['caption']
57
+ question = line['question']
58
+ ans1 = line['gpt4_ans']
59
+ ans2 = line['prediction']
60
+ category = 'llava_bench_' + line['category']
61
+ rule = rule_dict_ko[category]
62
+ role, prompt = rule['role'], rule['prompt']
63
+
64
+ content = (f'[설명]\n{cap_str}\n\n'
65
+ f'[질문]\n{question}\n\n'
66
+ f'[{role} 1]\n{ans1}\n\n[{role} 1 끝]\n\n'
67
+ f'[{role} 2]\n{ans2}\n\n[{role} 2 끝]\n\n'
68
+ f'[System]\n{prompt}\n\n')
69
+ return content
70
+
71
+
72
+ def LLaVABench_atomeval(model, prompt):
73
+ review = get_eval(model, prompt)
74
+ scores = parse_score(review)
75
+ return scores
76
+
77
+
78
+ def LLaVABench_score(data):
79
+ cates = ['overall'] + list(set(data['category']))
80
+ ret = defaultdict(list)
81
+
82
+ for c in cates:
83
+ ret['split'].append(c)
84
+ sub = data[data['category'] == c] if c != 'overall' else data
85
+ ret['Relative Score (main)'].append(np.mean(sub['score']) / np.mean(sub['gpt4_score']) * 100)
86
+ ret['VLM Score'].append(np.mean(sub['score']) * 10)
87
+ ret['GPT4 Score'].append(np.mean(sub['gpt4_score']) * 10)
88
+ return pd.DataFrame(ret)
VLMEvalKit-sudoku/vlmeval/dataset/utils/mathv.py ADDED
@@ -0,0 +1,179 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from ...smp import *
2
+ from ...utils import can_infer
3
+ import timeout_decorator
4
+ try:
5
+ from latex2sympy2 import latex2sympy
6
+ except Exception as e:
7
+ logging.critical(f'{type(e)}: {e}')
8
+ logging.critical('Please install latex2sympy2 by running "pip install latex2sympy2"')
9
+
10
+
11
+ FAIL_MSG = 'Failed to obtain answer via API.'
12
+
13
+
14
+ @timeout_decorator.timeout(30)
15
+ def is_equal(asw: str, gt_asw: str) -> bool:
16
+ if not isinstance(asw, str) != str or not isinstance(gt_asw, str):
17
+ print('Warning: input is not string')
18
+ print(asw, gt_asw)
19
+ asw = str(asw).lower().strip()
20
+ gt_asw = str(gt_asw).lower().strip()
21
+ if gt_asw == asw:
22
+ return True
23
+ try:
24
+ a = eval(gt_asw)
25
+ b = eval(asw)
26
+ if abs(a - b) < 1e-6:
27
+ return True
28
+ except:
29
+ pass
30
+ try:
31
+ a = latex2sympy(gt_asw)
32
+ b = latex2sympy(asw)
33
+ if abs(eval(str(a)) - eval(str(b))) < 1e-6:
34
+ return True
35
+ if abs(a - b) < 1e-6:
36
+ return True
37
+ except:
38
+ pass
39
+ return False
40
+
41
+
42
+ def get_gpt4_ICE():
43
+ example_1 = """
44
+ Hint: Please answer the question and provide the final answer at the end.\n
45
+ Question: Which number is missing?\n
46
+ Model response: The number missing in the sequence is 14.\n
47
+ Extracted answer: 14
48
+ """
49
+
50
+ example_2 = """
51
+ Hint: Please answer the question and provide the final answer at the end.\n
52
+ Question: What is the fraction of females facing the camera?\n
53
+ Model response: The fraction of females facing the camera is 0.6,
54
+ which means that six out of ten females in the group are facing the camera.\n
55
+ Extracted answer: 0.6
56
+ """
57
+
58
+ example_3 = """
59
+ Hint: Please answer the question and provide the final answer at the end.\n
60
+ Question: How much money does Luca need to buy a sour apple candy and a butter-scotch candy? (Unit: $)\n
61
+ Model response: Luca needs $1.45 to buy a sour apple candy and a butterscotch candy.\n
62
+ Extracted answer: 1.45
63
+ """
64
+
65
+ example_4 = """
66
+ Hint: Please answer the question and provide the final answer at the end.\n
67
+ Question: Between which two years does the line graph saw its maximum peak?\n
68
+ Model response: The line graph saw its maximum peak between 2007 and 2008.\n
69
+ Extracted answer: [2007, 2008]
70
+ """
71
+
72
+ example_5 = """
73
+ Hint: Please answer the question and provide the correct option letter, e.g., A, B, C, D, at the end.\n
74
+ Question: What fraction of the shape is blue?\n
75
+ Choices: (A) 3/11 (B) 8/11 (C) 6/11 (D) 3/5\n
76
+ Model response: The correct answer is (B) 8/11.\n
77
+ Extracted answer: B
78
+ """
79
+
80
+ return [example_1, example_2, example_3, example_4, example_5]
81
+
82
+
83
+ def build_mathv_gpt4_prompt(line):
84
+ task_description = """
85
+ Please read the following example.
86
+ Then extract the answer from the model response and type it at the end of the prompt.\n
87
+ """
88
+ question = line['question']
89
+ prediction = str(line['prediction'])
90
+ prompt = task_description
91
+ examples = get_gpt4_ICE()
92
+ for example in examples:
93
+ prompt += example + '\n'
94
+ prompt += question + '\n'
95
+ prompt += 'Model respone: ' + prediction
96
+ prompt += 'Extracted answer:'
97
+ return prompt
98
+
99
+
100
+ def list_to_dict(lst):
101
+ return {chr(65 + i): val for i, val in enumerate(lst)}
102
+
103
+
104
+ def post_check(line, prefetch=False):
105
+ res = None
106
+ ans = line['answer']
107
+ response = line['prediction'] if prefetch else line['res']
108
+ try:
109
+ if len(eval(line['choices'])) > 0:
110
+ ans = line['answer']
111
+ choices = list_to_dict(eval(line['choices']))
112
+ res = can_infer(response, choices)
113
+ if prefetch:
114
+ return res
115
+ else:
116
+ res = str(response)
117
+ ans = str(ans)
118
+ except ValueError:
119
+ pass
120
+
121
+ try:
122
+ if is_equal(res, ans):
123
+ return res if prefetch else True
124
+ else:
125
+ return False
126
+ except Exception as err:
127
+ logging.warning(f'{type(err)}: {err}')
128
+ return False
129
+
130
+
131
+ def MATH_V_auxeval(model, line):
132
+ prompt = build_mathv_gpt4_prompt(line)
133
+ log = ''
134
+ retry = 5
135
+ if post_check(line, prefetch=True):
136
+ res = post_check(line, prefetch=True)
137
+ return dict(log='Prefetch succeed', res=res)
138
+ for i in range(retry):
139
+ prediction = line['prediction']
140
+ res = model.generate(prompt, temperature=i * 0.5)
141
+
142
+ if FAIL_MSG in res:
143
+ log += f'Try {i}: output is {prediction}, failed to parse.\n'
144
+ else:
145
+ log += 'Succeed'
146
+ return dict(log=log, res=res)
147
+ log += 'All 5 retries failed.\n'
148
+ return dict(log=log, res='')
149
+
150
+
151
+ def MATH_V_acc(result_file):
152
+ data = load(result_file)
153
+ tot = defaultdict(lambda: 0)
154
+ fetch = defaultdict(lambda: 0)
155
+ hit = defaultdict(lambda: 0)
156
+ lt = len(data)
157
+ from tqdm import tqdm
158
+ for i in tqdm(range(lt)):
159
+ item = data.iloc[i]
160
+ cate = item['category']
161
+ tot['Overall'] += 1
162
+ tot[cate] += 1
163
+ if item['log'] == 'Prefetch succeed':
164
+ fetch['Overall'] += 1
165
+ fetch[cate] += 1
166
+ if post_check(item, prefetch=False):
167
+ hit['Overall'] += 1
168
+ hit[cate] += 1
169
+
170
+ res = defaultdict(list)
171
+ for k in tot.keys():
172
+ res['Subject'].append(k)
173
+ res['tot'].append(tot[k])
174
+ res['prefetch'].append(fetch[k])
175
+ res['hit'].append(hit[k])
176
+ res['prefetch_rate'].append(fetch[k] / tot[k] * 100)
177
+ res['acc'].append(hit[k] / tot[k] * 100)
178
+ res = pd.DataFrame(res).sort_values('Subject', ignore_index=True)
179
+ return res
VLMEvalKit-sudoku/vlmeval/dataset/utils/mlvu.py ADDED
@@ -0,0 +1,189 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from ...smp import *
2
+ from .multiple_choice import extract_answer_from_item
3
+ from PIL import Image, ImageOps
4
+ import numpy as np
5
+
6
+ FAIL_MSG = 'Failed to obtain answer via API.'
7
+
8
+ system_prompt_sub_scene = """
9
+ ##TASK DESCRIPTION:
10
+ You are required to evaluate a respondent's answer based on a provided question, some scoring points, and the respondent's answer. You should provide two scores. The first is the accuracy score, which should range from 1 to 5. The second is the relevance score, which should also range from 1 to 5. Below are the criteria for each scoring category.
11
+ ##ACCURACY Scoring Criteria:
12
+ Evaluate the respondent's answer against specific scoring points as follows:
13
+ Score 1: The response completely misses the scoring point.
14
+ Score 3: The response mentions content related to the scoring point but is not entirely correct.
15
+ Score 5: The response accurately addresses the scoring point.
16
+ Calculate the average score across all scoring points to determine the final accuracy score.
17
+ ##RELEVANCE Scoring Criteria:
18
+ Assess how the respondent's answer relates to the original question:
19
+ Score 1: The response is completely off-topic from the question.
20
+ Score 2: The response is partially related to the question but contains a significant amount of irrelevant content.
21
+ Score 3: The response primarily addresses the question, but the respondent seems uncertain about their own answer.
22
+ Score 4: The response mostly addresses the question and the respondent appears confident in their answer.
23
+ Score 5: The response is fully focused on addressing the question with no irrelevant content and demonstrates complete certainty.
24
+ ----
25
+ ##INSTRUCTION:
26
+ 1. Evaluate Accuracy: First, assess and score each scoring point based on the respondent's answer. Calculate the average of these scores to establish the final accuracy score. Provide a detailed rationale before assigning your score.
27
+ 2. Evaluate RELEVANCE: Assess the relevance of the respondent’s answer to the question. Note that when evaluating relevance, the correctness of the answer is not considered; focus solely on how relevant the answer is to the question. Provide a comprehensive rationale before assigning your score.
28
+ 3. Output Scores in JSON Format: Present the scores in JSON format as follows:
29
+ {'score_accuracy': score_acc, 'score_relevance': score_rele, 'total_score': score_acc + score_rele}
30
+ """ # noqa
31
+
32
+ system_prompt_summary = """
33
+ ##TASK DESCRIPTION:
34
+ You are required to evaluate the performance of the respondent in the video summarization task based on the standard answer and the respondent's answer. You should provide two scores. The first is the COMPLETENESS score, which should range from 1 to 5. The second is the RELIABILITY score, which should also range from 1 to 5. Below are the criteria for each scoring category:
35
+ ##COMPLETENESS Scoring Criteria:
36
+ The completeness score focuses on whether the summary covers all key points and main information from the video.
37
+ Score 1: The summary hardly covers any of the main content or key points of the video.
38
+ Score 2: The summary covers some of the main content and key points but misses many.
39
+ Score 3: The summary covers most of the main content and key points.
40
+ Score 4: The summary is very comprehensive, covering most to nearly all of the main content and key points.
41
+ Score 5: The summary completely covers all the main content and key points of the video.
42
+ ##RELIABILITY Scoring Criteria:
43
+ The reliability score evaluates the correctness and clarity of the video summary. It checks for factual errors, misleading statements, and contradictions with the video content. If the respondent's answer includes details that are not present in the standard answer, as long as these details do not conflict with the correct answer and are reasonable, points should not be deducted.
44
+ Score 1: Contains multiple factual errors and contradictions; presentation is confusing.
45
+ Score 2: Includes several errors and some contradictions; needs clearer presentation.
46
+ Score 3: Generally accurate with minor errors; minimal contradictions; reasonably clear presentation.
47
+ Score 4: Very accurate with negligible inaccuracies; no contradictions; clear and fluent presentation.
48
+ Score 5: Completely accurate with no errors or contradictions; presentation is clear and easy to understand.
49
+ ----
50
+ ##INSTRUCTION:
51
+ 1. Evaluate COMPLETENESS: First, analyze the respondent's answer according to the scoring criteria, then provide an integer score between 1 and 5 based on sufficient evidence.
52
+ 2. Evaluate RELIABILITY: First, analyze the respondent's answer according to the scoring criteria, then provide an integer score between 1 and 5 based on sufficient evidence.
53
+ 3. Output Scores in JSON Format: Present the scores in JSON format as follows:
54
+ {'score_completeness': score_comp, 'score_reliability': score_reli, 'total_score': score_comp + score_reli}
55
+ """ # noqa
56
+
57
+
58
+ def check_ans_with_model(pred, gt, model, item, dataset_name='MLVU_MCQ'):
59
+ flag = False
60
+
61
+ index = gt.index("(") # noqa
62
+ index2 = gt.index(")") # noqa
63
+ gt_option = gt[index + 1: index2]
64
+
65
+ if ")" in pred:
66
+ index3 = pred.index(")")
67
+ pred = pred[index3 - 1: index3]
68
+ if pred == gt_option:
69
+ flag = True
70
+ elif extract_answer_from_item(model, item, dataset_name)['opt'] == item['answer']:
71
+ flag = True
72
+
73
+ return flag
74
+
75
+
76
+ def extract_scores_summary(text):
77
+ # Define the keys to locate in the text
78
+ keys = ["score_completeness", "score_reliability"]
79
+ scores = []
80
+
81
+ for key in keys:
82
+ # Find the index where each key starts
83
+ start_index = text.find(key)
84
+ if start_index == -1:
85
+ continue # Skip if key is not found
86
+
87
+ # Find the start of the number which is after the colon and space
88
+ start_number_index = text.find(":", start_index) + 2
89
+ end_number_index = text.find(",", start_number_index) # Assuming the number ends before a comma
90
+
91
+ # Extract and convert the number to float
92
+ score = float(text[start_number_index:end_number_index])
93
+ scores.append(score)
94
+
95
+ return scores
96
+
97
+
98
+ def check_ans_with_model_summary(pred, gt, model, item, dataset_name='MLVU_OpenEnded'):
99
+ user_prompt = f"""
100
+ Please score the respondent's answer according to the steps in the Instructions. You must end with a JSON dict to store the scores.
101
+ Standard Answer: {gt}
102
+ Respondent's Answer: {pred}
103
+ """ # noqa
104
+ result = model.generate(user_prompt)
105
+ result = extract_scores_summary(result)
106
+ result = np.sum(result)
107
+ return result
108
+
109
+
110
+ def extract_scores_sub_scene(text):
111
+ # Define the keys to locate in the text
112
+ keys = ["score_accuracy", "score_relevance"]
113
+ scores = []
114
+
115
+ for key in keys:
116
+ # Find the index where each key starts
117
+ start_index = text.find(key)
118
+ if start_index == -1:
119
+ continue # Skip if key is not found
120
+
121
+ # Find the start of the number which is after the colon and space
122
+ start_number_index = text.find(":", start_index) + 2
123
+ end_number_index = text.find(",", start_number_index) # Assuming the number ends before a comma
124
+
125
+ # Extract and convert the number to float
126
+ score = float(text[start_number_index:end_number_index])
127
+ scores.append(score)
128
+
129
+ return scores
130
+
131
+
132
+ def check_ans_with_model_sub_scene(pred, gt, model, item, dataset_name='MLVU_OpenEnded'):
133
+ user_prompt = f"""
134
+ Please score the respondent's answer according to the steps in the Instructions. You must end with a JSON dict to store the scores.
135
+ Question: {item['question']}
136
+ Scoring Points: {item['scoring_points']}
137
+ Respondent's Answer: {pred}
138
+ """ # noqa
139
+ result = model.generate(user_prompt)
140
+ result = extract_scores_sub_scene(result)
141
+ result = np.sum(result)
142
+ return result
143
+
144
+
145
+ def MLVU_OpenEnded_generate(model, line):
146
+ task_type = line['task_type']
147
+ if task_type == 'summary':
148
+ user_prompt = (
149
+ f"Please score the respondent's answer according to the steps in the Instructions. "
150
+ f"You must end with a JSON dict to store the scores.\n"
151
+ f"Standard Answer: {line['answer']}\n"
152
+ f"Respondent's Answer: {line['prediction']}\n"
153
+ )
154
+ elif task_type == 'sub_scene':
155
+ user_prompt = (
156
+ f"Please score the respondent's answer according to the steps in the Instructions. "
157
+ f"You must end with a JSON dict to store the scores.\n"
158
+ f"Question: {line['question']}\n"
159
+ f"Scoring Points: {line['scoring_points']}\n"
160
+ f"Respondent's Answer: {line['prediction']}\n"
161
+ )
162
+ else:
163
+ AssertionError(f'MLVU don\'t have {task_type} open ended task!')
164
+ result = model.generate(user_prompt)
165
+ return result
166
+
167
+
168
+ def MLVU_OpenEnded_extract(gpt_generate_data, org_data):
169
+ extract_func = {
170
+ 'sub_scene': extract_scores_sub_scene,
171
+ 'summary': extract_scores_summary
172
+ }
173
+ for idx, item in org_data.iterrows():
174
+ func = extract_func[item['task_type']]
175
+ text = gpt_generate_data[idx]
176
+ org_data.loc[idx, 'score'] = np.sum(func(text))
177
+
178
+ return org_data
179
+
180
+
181
+ def get_dimension_rating(data_path):
182
+ data = load(data_path)
183
+ result_dict = {}
184
+ for idx, item in data.iterrows():
185
+ if item['task_type'] not in result_dict:
186
+ result_dict[item['task_type']] = [0,0]
187
+ result_dict[item['task_type']][0] += int(item['score'])
188
+ result_dict[item['task_type']][1] += 1
189
+ return result_dict