diff --git a/Shapegrid/ShapeGrid_dis.parquet b/Shapegrid/ShapeGrid_dis.parquet
new file mode 100644
index 0000000000000000000000000000000000000000..5f197d3bbc7267dadc682ba277d80ee75f1a8a75
--- /dev/null
+++ b/Shapegrid/ShapeGrid_dis.parquet
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c16d47667c9c5d2a97d70370610e54733861ec9043f2bb8aa6107c927de2367d
+size 102012404
diff --git a/Shapegrid/ShapeGrid_loc.parquet b/Shapegrid/ShapeGrid_loc.parquet
new file mode 100644
index 0000000000000000000000000000000000000000..0bfe6e7658422d41b76363701adda7e9c8bee898
--- /dev/null
+++ b/Shapegrid/ShapeGrid_loc.parquet
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8549444eaf072e051373241edca5e00a5d141c012c9a33fee6f353c3e203abc4
+size 66166188
diff --git a/VLMEvalKit-sudoku/.github/scripts/assert_score.py b/VLMEvalKit-sudoku/.github/scripts/assert_score.py
new file mode 100644
index 0000000000000000000000000000000000000000..5d5fbfe5c529efd69a0fb589ab5f6b3d827923bb
--- /dev/null
+++ b/VLMEvalKit-sudoku/.github/scripts/assert_score.py
@@ -0,0 +1,61 @@
+import argparse
+import ast
+import json
+import os
+
+import pandas as pd
+
+
+def validate_scores(dataset_list, assert_score, model_name):
+ for dataset in dataset_list:
+ base_score = assert_score[dataset][model_name]
+ if dataset == "OCRBench_MINI":
+ score_file = os.path.join("outputs", f"{model_name}/{model_name}_{dataset}_score.json")
+ cur_score = 0
+ with open(score_file, "r") as f:
+ total_score = json.load(f)
+ cur_score = total_score["Final Score Norm"]
+ assert (
+ abs(cur_score - float(base_score)) <= 0.01
+ ), f"{dataset} on {model_name}: cur_score is {cur_score}, base_score is {base_score}"
+ else:
+ score_file = os.path.join("outputs", f"{model_name}/{model_name}_{dataset}_acc.csv")
+ df = pd.read_csv(score_file)
+ cur_score = df["Overall"].iloc[0]
+ if dataset == "MMBench_V11_MINI":
+ cur_score = df.loc[df["split"] == "dev", "Overall"].values
+ assert (
+ abs(cur_score - float(base_score)) <= 0.01
+ ), f"{dataset} on {model_name}: cur_score is {cur_score}, base_score is {base_score}"
+ print(f"cur_score is {cur_score}, base_score is {base_score}")
+
+
+def parse_arguments():
+ parser = argparse.ArgumentParser(description="Validate model scores against csv/json data")
+
+ parser.add_argument("--dataset", type=str, required=True, help="Space-separated list of datasets")
+
+ parser.add_argument(
+ "--base_score", type=str, required=True, help="Dictionary string in format {dataset:{model:score}}"
+ )
+
+ parser.add_argument("--model-name", type=str, required=True, help="Name of the model to validate")
+
+ return parser.parse_args()
+
+
+def main():
+ args = parse_arguments()
+
+ try:
+ dataset_list = args.dataset.split()
+ base_score = ast.literal_eval(args.base_score)
+ except Exception as e:
+ print(f"Parameter parsing error: {str(e)}")
+ return
+
+ validate_scores(dataset_list, base_score, args.model_name)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/VLMEvalKit-sudoku/.github/workflows/lint.yml b/VLMEvalKit-sudoku/.github/workflows/lint.yml
new file mode 100644
index 0000000000000000000000000000000000000000..1eb46dcbb3fc7191259c017069e3206f3638398d
--- /dev/null
+++ b/VLMEvalKit-sudoku/.github/workflows/lint.yml
@@ -0,0 +1,23 @@
+name: lint
+
+on: [push, pull_request]
+
+concurrency:
+ group: ${{ github.workflow }}-${{ github.ref }}
+ cancel-in-progress: true
+
+jobs:
+ lint:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v2
+ - name: Set up Python 3.10
+ uses: actions/setup-python@v2
+ with:
+ python-version: 3.10.15
+ - name: Install pre-commit hook
+ run: |
+ pip install pre-commit
+ pre-commit install
+ - name: Linting
+ run: pre-commit run --all-files
diff --git a/VLMEvalKit-sudoku/.github/workflows/pr-run-test.yml b/VLMEvalKit-sudoku/.github/workflows/pr-run-test.yml
new file mode 100644
index 0000000000000000000000000000000000000000..04b2b249071d91c0ea19f63d02a1e1b100872dee
--- /dev/null
+++ b/VLMEvalKit-sudoku/.github/workflows/pr-run-test.yml
@@ -0,0 +1,70 @@
+name: pr_run_test
+
+on:
+ pull_request:
+ branches:
+ - "main"
+ paths-ignore:
+ - "docs/**"
+ - "**.md"
+ workflow_dispatch:
+ schedule:
+ - cron: '56 01 * * *'
+
+concurrency:
+ group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
+ cancel-in-progress: true
+
+env:
+ BASE_SCORE: '{"MMBench_V11_MINI":{"Qwen2-VL-7B-Instruct":0.8727272727272727,"InternVL2_5-8B":0.89090909,"llava_onevision_qwen2_7b_si":0.8363636363636363},"MMStar_MINI":{"Qwen2-VL-7B-Instruct":0.6266666666666667,"InternVL2_5-8B":0.6333333333333333,"llava_onevision_qwen2_7b_si":0.49333333333333335},"AI2D_MINI":{"Qwen2-VL-7B-Instruct":0.7975708502024291,"InternVL2_5-8B":0.854251012145749,"llava_onevision_qwen2_7b_si":0.8178137651821862},"OCRBench_MINI":{"Qwen2-VL-7B-Instruct":16.6,"InternVL2_5-8B":16.7,"llava_onevision_qwen2_7b_si":13.0}}'
+ HF_HUB_CACHE: /fs-computility/llm/shared/llmeval/models/opencompass_hf_hub
+ HF_HUB_OFFLINE: 1
+ CONDA_PATH: /fs-computility/llm/qa-llm-cicd/miniconda3
+ CONDA_ENV: vlm_pr_test
+
+jobs:
+ vlm_test:
+ if: ${{!cancelled()}}
+ runs-on: [volc_cu12_mllm]
+ strategy:
+ fail-fast: false
+ matrix:
+ model: [Qwen/Qwen2-VL-7B-Instruct,OpenGVLab/InternVL2_5-8B,lmms-lab/llava-onevision-qwen2-7b-si]
+ dataset: ["MMBench_V11_MINI MMStar_MINI AI2D_MINI","OCRBench_MINI"]
+ steps:
+ - name: clone_repo
+ uses: actions/checkout@v3
+ - name: evaluation_model
+ uses: nick-fields/retry@v3
+ with:
+ max_attempts: 3
+ timeout_minutes: 30
+ command: |
+ . ${{env.CONDA_PATH}}/bin/activate
+ conda activate ${{env.CONDA_ENV}}
+ pip uninstall vlmeval -y
+ pip install -e .
+ pre_model=$(echo ${{matrix.model}} | awk -F'/' '{print $1}')
+ if [ "${{matrix.model}}" = "lmms-lab/llava-onevision-qwen2-7b-si" ];then
+ model_name="llava_onevision_qwen2_7b_si"
+ else
+ model_name=$(echo ${{matrix.model}} | awk -F'/' '{print $2}')
+ fi
+ pip list
+ nvidia-smi
+ LOG=$(python run.py --data ${{matrix.dataset}} --model $model_name 2>&1)
+ echo "$LOG"
+ if echo "$LOG" | grep -q "CUDA out of memory"; then
+ sleep 300
+ exit 1 # retry becuase of oom
+ fi
+ - name: assert_result
+ run: |
+ . ${{env.CONDA_PATH}}/bin/activate
+ conda activate ${{env.CONDA_ENV}}
+ if [ "${{matrix.model}}" = "lmms-lab/llava-onevision-qwen2-7b-si" ];then
+ model_name="llava_onevision_qwen2_7b_si"
+ else
+ model_name=$(echo ${{matrix.model}} | awk -F'/' '{print $2}')
+ fi
+ python .github/scripts/assert_score.py --dataset "${{matrix.dataset}}" --base_score $BASE_SCORE --model-name $model_name
diff --git a/VLMEvalKit-sudoku/llava/serve/__init__.py b/VLMEvalKit-sudoku/llava/serve/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/VLMEvalKit-sudoku/llava/serve/examples/extreme_ironing.jpg b/VLMEvalKit-sudoku/llava/serve/examples/extreme_ironing.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..cf1071a1fbfa904309335e3521cecbcec341b37f
--- /dev/null
+++ b/VLMEvalKit-sudoku/llava/serve/examples/extreme_ironing.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a54caa21bc513ed25c8ca7f5747555c05dfd4e33f6a3cf5c08b3d9138a4da1d9
+size 62587
diff --git a/VLMEvalKit-sudoku/llava/train/train_dpo.py b/VLMEvalKit-sudoku/llava/train/train_dpo.py
new file mode 100644
index 0000000000000000000000000000000000000000..037eec42fb75a5f04f4d46502906731f6c9d5011
--- /dev/null
+++ b/VLMEvalKit-sudoku/llava/train/train_dpo.py
@@ -0,0 +1,1782 @@
+# Adopted from https://github.com/lm-sys/FastChat. Below is the original copyright:
+# Adopted from tatsu-lab@stanford_alpaca. Below is the original copyright:
+# Copyright 2023 Rohan Taori, Ishaan Gulrajani, Tianyi Zhang, Yann Dubois, Xuechen Li
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import copy
+import deepspeed
+from dataclasses import dataclass, field
+import json
+import logging
+import pathlib
+from typing import Dict, Optional, Sequence, List
+import ast
+
+import yaml
+import time
+import random
+import yaml
+import math
+import re
+import torch
+
+import transformers
+import tokenizers
+
+from llava.constants import IGNORE_INDEX, DEFAULT_IMAGE_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN, IMAGE_TOKEN_INDEX
+from torch.utils.data import Dataset
+from llava.train.llava_trainer import LLaVADPOTrainer
+from data_processing.utils import load_jsonl, load_json
+from llava import conversation as conversation_lib
+from llava.model import *
+from llava.model.language_model.llava_qwen import LlavaQwenConfig
+from llava.model.language_model.llava_llama import LlavaConfig
+from llava.model.language_model.llava_mistral import LlavaMistralConfig
+from llava.mm_utils import process_highres_image, process_anyres_image, process_highres_image_crop_split, tokenizer_image_token
+from llava.utils import rank0_print
+from transformers import AutoConfig
+import pickle
+
+from trl.trainer.utils import DPODataCollatorWithPadding
+from PIL import Image, ImageFile
+from decord import VideoReader, cpu
+
+ImageFile.LOAD_TRUNCATED_IMAGES = True
+from packaging import version
+from typing import Any
+
+local_rank = None
+import numpy as np
+
+IS_TOKENIZER_GREATER_THAN_0_14 = version.parse(tokenizers.__version__) >= version.parse("0.14")
+
+
+@dataclass
+class ModelArguments:
+ model_name_or_path: Optional[str] = field(default="facebook/opt-125m")
+ model_class_name: Optional[str] = field(default=None, metadata={"help": "Used to init model class, format is XXXXForCausalLM. e.g. currently XXXX is chosen from LlavaLlama, LlavaMixtral, LlavaMistral, Llama"})
+
+ mm_tunable_parts: Optional[str] = field(
+ default=None, metadata={"help": 'Could be "mm_mlp_adapter", "mm_vision_resampler", "mm_vision_tower,mm_mlp_adapter,mm_language_model", "mm_vision_tower,mm_mlp_adapter,mm_language_model", "mm_mlp_adapter,mm_language_model"'}
+ )
+ # deciding which part of the multimodal model to tune, will overwrite other previous settings
+
+ version: Optional[str] = field(default="v0")
+ freeze_backbone: bool = field(default=False)
+ tune_mm_mlp_adapter: bool = field(default=False)
+ tune_mm_vision_resampler: bool = field(default=False)
+ vision_tower: Optional[str] = field(default=None)
+ vision_tower_pretrained: Optional[str] = field(default=None) # default to the last layer
+
+ unfreeze_mm_vision_tower: bool = field(default=False)
+ unfreeze_language_model: bool = field(default=False)
+ mm_vision_select_layer: Optional[int] = field(default=-1) # default to the last layer
+ pretrain_mm_mlp_adapter: Optional[str] = field(default=None)
+ mm_projector_type: Optional[str] = field(default="linear")
+ mm_use_im_start_end: bool = field(default=False)
+ mm_use_im_patch_token: bool = field(default=True)
+ mm_patch_merge_type: Optional[str] = field(default="flat")
+ mm_vision_select_feature: Optional[str] = field(default="patch")
+ mm_resampler_type: Optional[str] = field(default=None)
+ mm_mask_drop_mode: str = field(default="fixed")
+ mm_mask_drop_skip_percentage: float = field(default=0.0)
+ mm_mask_drop_ratio: float = field(default=0.25)
+ mm_mask_drop_ratio_upper: Optional[float] = field(default=None)
+ mm_mask_drop_ratio_lower: Optional[float] = field(default=None)
+ mm_spatial_pool_stride: Optional[int] = field(default=None)
+ mm_spatial_pool_mode: str = field(default="average")
+ mm_spatial_pool_out_channels: Optional[int] = field(default=None)
+ mm_perceiver_depth: Optional[int] = field(default=3)
+ mm_perceiver_latents: Optional[int] = field(default=32)
+ mm_perceiver_ff_mult: Optional[float] = field(default=4)
+ mm_perceiver_pretrained: Optional[str] = field(default=None)
+ mm_qformer_depth: Optional[int] = field(default=3)
+ mm_qformer_latents: Optional[int] = field(default=32)
+ mm_qformer_pretrained: Optional[str] = field(default=None)
+
+ rope_scaling_factor: Optional[float] = field(default=None)
+ rope_scaling_type: Optional[str] = field(default=None)
+
+ s2: Optional[bool] = field(default=False)
+ s2_scales: Optional[str] = field(default="336,672,1008")
+
+
+@dataclass
+class DataArguments:
+ data_path: str = field(default=None, metadata={"help": "Path to the training data, in llava's instruction.json format. Supporting multiple json files via /path/to/{a,b,c}.json"})
+ lazy_preprocess: bool = False
+ is_multimodal: bool = False
+ image_folder: Optional[str] = field(default=None)
+ video_folder: Optional[str] = field(default=None)
+ video_fps: Optional[int] = field(default=1)
+ image_aspect_ratio: str = "square"
+ image_grid_pinpoints: Optional[str] = field(default=None)
+ image_crop_resolution: int = 384
+ image_split_resolution: int = 384
+ input_prompt: Optional[str] = field(default=None)
+ refine_prompt: Optional[bool] = field(default=False)
+ frames_upbound: Optional[int] = field(default=0)
+ num_sample: Optional[int] = field(default=None)
+
+
+@dataclass
+class TrainingArguments(transformers.TrainingArguments):
+ cache_dir: Optional[str] = field(default=None)
+ optim: str = field(default="adamw_torch")
+ remove_unused_columns: bool = field(default=False)
+ freeze_mm_mlp_adapter: bool = field(default=False)
+ freeze_mm_vision_resampler: bool = field(default=False)
+ mpt_attn_impl: Optional[str] = field(default="triton")
+ model_max_length: int = field(
+ default=4096,
+ metadata={"help": "Maximum sequence length. Sequences will be right padded (and possibly truncated)."},
+ )
+ double_quant: bool = field(default=True, metadata={"help": "Compress the quantization statistics through double quantization."})
+ quant_type: str = field(default="nf4", metadata={"help": "Quantization data type to use. Should be one of `fp4` or `nf4`."})
+ bits: int = field(default=16, metadata={"help": "How many bits to use."})
+ lora_enable: bool = False
+ lora_r: int = 64
+ lora_alpha: int = 16
+ lora_dropout: float = 0.05
+ lora_weight_path: str = ""
+ lora_bias: str = "none"
+ mm_projector_lr: Optional[float] = None
+ mm_vision_tower_lr: Optional[float] = None
+ group_by_varlen: bool = field(default=False)
+ group_by_modality_length: bool = field(default=False)
+ group_by_modality_length_auto: bool = field(default=False)
+ auto_find_batch_size: bool = field(default=False)
+ gradient_checkpointing: bool = field(default=True)
+ verbose_logging: bool = field(default=False)
+ attn_implementation: str = field(default="flash_attention_2", metadata={"help": "Use transformers attention implementation."})
+ dpo_alpha: float = field(default=1.0)
+ beta: float = field(default=0.1)
+ gamma: float = field(default=1.0)
+ generate_during_eval: bool = field(default=False)
+ precompute_ref_log_probs: bool = field(default=False)
+
+
+def maybe_zero_3(param, ignore_status=False, name=None):
+ from deepspeed import zero
+ from deepspeed.runtime.zero.partition_parameters import ZeroParamStatus
+
+ if hasattr(param, "ds_id"):
+ if param.ds_status == ZeroParamStatus.NOT_AVAILABLE:
+ if not ignore_status:
+ logging.warning(f"{name}: param.ds_status != ZeroParamStatus.NOT_AVAILABLE: {param.ds_status}")
+ with zero.GatheredParameters([param]):
+ param = param.data.detach().cpu().clone()
+ else:
+ param = param.detach().cpu().clone()
+ return param
+
+
+# Borrowed from peft.utils.get_peft_model_state_dict
+def get_peft_state_maybe_zero_3(named_params, bias):
+ if bias == "none":
+ to_return = {k: t for k, t in named_params if "lora_" in k}
+ elif bias == "all":
+ to_return = {k: t for k, t in named_params if "lora_" in k or "bias" in k}
+ elif bias == "lora_only":
+ to_return = {}
+ maybe_lora_bias = {}
+ lora_bias_names = set()
+ for k, t in named_params:
+ if "lora_" in k:
+ to_return[k] = t
+ bias_name = k.split("lora_")[0] + "bias"
+ lora_bias_names.add(bias_name)
+ elif "bias" in k:
+ maybe_lora_bias[k] = t
+ for k, t in maybe_lora_bias:
+ if bias_name in lora_bias_names:
+ to_return[bias_name] = t
+ else:
+ raise NotImplementedError
+ to_return = {k: maybe_zero_3(v, ignore_status=True) for k, v in to_return.items()}
+ return to_return
+
+
+def get_peft_state_non_lora_maybe_zero_3(named_params, require_grad_only=True):
+ to_return = {k: t for k, t in named_params if "lora_" not in k}
+ if require_grad_only:
+ to_return = {k: t for k, t in to_return.items() if t.requires_grad}
+ to_return = {k: maybe_zero_3(v, ignore_status=True).cpu() for k, v in to_return.items()}
+ return to_return
+
+
+def get_mm_adapter_state_maybe_zero_3(named_params, keys_to_match):
+ to_return = {k: t for k, t in named_params if any(key_match in k for key_match in keys_to_match)}
+ to_return = {k: maybe_zero_3(v, ignore_status=True).cpu() for k, v in to_return.items()}
+ return to_return
+
+
+def find_all_linear_names(model):
+ cls = torch.nn.Linear
+ lora_module_names = set()
+ multimodal_keywords = ["mm_projector", "vision_tower", "vision_resampler"]
+ for name, module in model.named_modules():
+ if any(mm_keyword in name for mm_keyword in multimodal_keywords):
+ continue
+ if isinstance(module, cls):
+ names = name.split(".")
+ lora_module_names.add(names[0] if len(names) == 1 else names[-1])
+
+ if "lm_head" in lora_module_names: # needed for 16-bit
+ lora_module_names.remove("lm_head")
+ return list(lora_module_names)
+
+
+def safe_save_model_for_hf_trainer(trainer: transformers.Trainer, output_dir: str):
+ """Collects the state dict and dump to disk."""
+ if hasattr(trainer.args, "tune_mm_mlp_adapter") and trainer.args.tune_mm_mlp_adapter:
+ check_only_save_mm_adapter_tunnable = True
+ # only has mm_mlp_adapter and mm_vision_resampler in the tuneable parts
+ elif hasattr(trainer.args, "mm_tunable_parts") and (len(trainer.args.mm_tunable_parts.split(",")) == 1 and ("mm_mlp_adapter" in trainer.args.mm_tunable_parts or "mm_vision_resampler" in trainer.args.mm_tunable_parts)):
+ check_only_save_mm_adapter_tunnable = True
+ else:
+ check_only_save_mm_adapter_tunnable = False
+
+ trainer.accelerator.wait_for_everyone()
+ torch.cuda.synchronize()
+ rank0_print(f"Only save projectors: {check_only_save_mm_adapter_tunnable}")
+ if check_only_save_mm_adapter_tunnable:
+ # Only save Adapter
+ keys_to_match = ["mm_projector", "vision_resampler"]
+ if getattr(trainer.args, "use_im_start_end", False):
+ keys_to_match.extend(["embed_tokens", "embed_in"])
+
+ weight_to_save = get_mm_adapter_state_maybe_zero_3(trainer.model.named_parameters(), keys_to_match)
+ trainer.model.config.save_pretrained(output_dir)
+
+ current_folder = output_dir.split("/")[-1]
+ parent_folder = os.path.dirname(output_dir)
+ if trainer.args.local_rank == 0 or trainer.args.local_rank == -1:
+ if current_folder.startswith("checkpoint-"):
+ mm_projector_folder = os.path.join(parent_folder, "mm_projector")
+ os.makedirs(mm_projector_folder, exist_ok=True)
+ torch.save(weight_to_save, os.path.join(mm_projector_folder, f"{current_folder}.bin"))
+ else:
+ torch.save(weight_to_save, os.path.join(output_dir, f"mm_projector.bin"))
+ return
+
+ if trainer.deepspeed:
+ trainer.save_model(output_dir)
+ return
+
+ state_dict = trainer.model.state_dict()
+ if trainer.args.should_save:
+ cpu_state_dict = {key: value.cpu() for key, value in state_dict.items()}
+ del state_dict
+ trainer._save(output_dir, state_dict=cpu_state_dict) # noqa
+
+
+def smart_tokenizer_and_embedding_resize(
+ special_tokens_dict: Dict,
+ tokenizer: transformers.PreTrainedTokenizer,
+ model: transformers.PreTrainedModel,
+):
+ """Resize tokenizer and embedding.
+
+ Note: This is the unoptimized version that may make your embedding size not be divisible by 64.
+ """
+ num_new_tokens = tokenizer.add_special_tokens(special_tokens_dict)
+ model.resize_token_embeddings(len(tokenizer))
+
+ if num_new_tokens > 0:
+ input_embeddings = model.get_input_embeddings().weight.data
+ output_embeddings = model.get_output_embeddings().weight.data
+
+ input_embeddings_avg = input_embeddings[:-num_new_tokens].mean(dim=0, keepdim=True)
+ output_embeddings_avg = output_embeddings[:-num_new_tokens].mean(dim=0, keepdim=True)
+
+ input_embeddings[-num_new_tokens:] = input_embeddings_avg
+ output_embeddings[-num_new_tokens:] = output_embeddings_avg
+
+
+def _tokenize_fn(strings: Sequence[str], tokenizer: transformers.PreTrainedTokenizer) -> Dict:
+ """Tokenize a list of strings."""
+ tokenized_list = [
+ tokenizer(
+ text,
+ return_tensors="pt",
+ padding="longest",
+ max_length=tokenizer.model_max_length,
+ truncation=True,
+ )
+ for text in strings
+ ]
+ input_ids = labels = [tokenized.input_ids[0] for tokenized in tokenized_list]
+ input_ids_lens = labels_lens = [tokenized.input_ids.ne(tokenizer.pad_token_id).sum().item() for tokenized in tokenized_list]
+ return dict(
+ input_ids=input_ids,
+ labels=labels,
+ input_ids_lens=input_ids_lens,
+ labels_lens=labels_lens,
+ )
+
+
+def _mask_targets(target, tokenized_lens, speakers):
+ # cur_idx = 0
+ cur_idx = tokenized_lens[0]
+ tokenized_lens = tokenized_lens[1:]
+ target[:cur_idx] = IGNORE_INDEX
+ for tokenized_len, speaker in zip(tokenized_lens, speakers):
+ if speaker == "human":
+ target[cur_idx + 2 : cur_idx + tokenized_len] = IGNORE_INDEX
+ cur_idx += tokenized_len
+
+
+def _add_speaker_and_signal(header, source, get_conversation=True):
+ """Add speaker and start/end signal on each round."""
+ BEGIN_SIGNAL = "### "
+ END_SIGNAL = "\n"
+ conversation = header
+ for sentence in source:
+ from_str = sentence["from"]
+ if from_str.lower() == "human":
+ from_str = conversation_lib.default_conversation.roles[0]
+ elif from_str.lower() == "gpt":
+ from_str = conversation_lib.default_conversation.roles[1]
+ else:
+ from_str = "unknown"
+ sentence["value"] = BEGIN_SIGNAL + from_str + ": " + sentence["value"] + END_SIGNAL
+ if get_conversation:
+ conversation += sentence["value"]
+ conversation += BEGIN_SIGNAL
+ return conversation
+
+
+def preprocess_multimodal(sources: Sequence[str], data_args: DataArguments) -> Dict:
+ is_multimodal = data_args.is_multimodal
+ if not is_multimodal:
+ return sources
+
+ for source in sources:
+ for sentence in source:
+ if DEFAULT_IMAGE_TOKEN in sentence["value"] and not sentence["value"].startswith(DEFAULT_IMAGE_TOKEN):
+ sentence["value"] = sentence["value"].replace(DEFAULT_IMAGE_TOKEN, "").strip()
+ sentence["value"] = DEFAULT_IMAGE_TOKEN + "\n" + sentence["value"]
+ sentence["value"] = sentence["value"].strip()
+ if "mmtag" in conversation_lib.default_conversation.version:
+ sentence["value"] = sentence["value"].replace(DEFAULT_IMAGE_TOKEN, "" + DEFAULT_IMAGE_TOKEN + "")
+ replace_token = DEFAULT_IMAGE_TOKEN
+ if data_args.mm_use_im_start_end:
+ replace_token = DEFAULT_IM_START_TOKEN + replace_token + DEFAULT_IM_END_TOKEN
+ sentence["value"] = sentence["value"].replace(DEFAULT_IMAGE_TOKEN, replace_token)
+
+ return sources
+
+
+def preprocess_multimodal_movie(sources: Sequence[str], data_args: DataArguments, video_inputs: str) -> Dict:
+ is_multimodal = data_args.is_multimodal
+ if not is_multimodal:
+ return sources
+
+ for source in sources:
+ for sentence in source:
+ if DEFAULT_IMAGE_TOKEN in sentence["value"]:
+ prompt = sentence["value"].replace(DEFAULT_IMAGE_TOKEN, "").strip()
+ replace_token = video_inputs
+ if data_args.mm_use_im_start_end:
+ replace_token = DEFAULT_IM_START_TOKEN + replace_token + DEFAULT_IM_END_TOKEN
+ sentence["value"] = sentence["value"].replace(DEFAULT_IMAGE_TOKEN, replace_token)
+
+ return sources, prompt
+
+
+def preprocess_llama_2(sources, tokenizer: transformers.PreTrainedTokenizer, has_image: bool = False) -> Dict:
+ conv = conversation_lib.default_conversation.copy()
+ roles = {"human": conv.roles[0], "gpt": conv.roles[1]}
+
+ # Apply prompt templates
+ conversations = []
+ for i, source in enumerate(sources):
+ if roles[source[0]["from"]] != conv.roles[0]:
+ # Skip the first one if it is not from human
+ source = source[1:]
+
+ conv.messages = []
+ for j, sentence in enumerate(source):
+ role = roles[sentence["from"]]
+ assert role == conv.roles[j % 2], f"{i}"
+ conv.append_message(role, sentence["value"])
+ conversations.append(conv.get_prompt())
+
+ # Tokenize conversations
+
+ if has_image:
+ input_ids = torch.stack([tokenizer_image_token(prompt, tokenizer, return_tensors="pt") for prompt in conversations], dim=0)
+ else:
+ input_ids = tokenizer(
+ conversations,
+ return_tensors="pt",
+ padding="longest",
+ max_length=tokenizer.model_max_length,
+ truncation=True,
+ ).input_ids
+
+ targets = input_ids.clone()
+
+ assert conv.sep_style == conversation_lib.SeparatorStyle.LLAMA_2
+
+ # Mask targets
+ sep = "[/INST] "
+ for conversation, target in zip(conversations, targets):
+ total_len = int(target.ne(tokenizer.pad_token_id).sum())
+
+ rounds = conversation.split(conv.sep2)
+ cur_len = 1
+ target[:cur_len] = IGNORE_INDEX
+ for i, rou in enumerate(rounds):
+ if rou == "":
+ break
+
+ parts = rou.split(sep)
+ if len(parts) != 2:
+ break
+ parts[0] += sep
+
+ if has_image:
+ round_len = len(tokenizer_image_token(rou, tokenizer))
+ instruction_len = len(tokenizer_image_token(parts[0], tokenizer)) - 2
+ else:
+ round_len = len(tokenizer(rou).input_ids)
+ instruction_len = len(tokenizer(parts[0]).input_ids) - 2
+
+ target[cur_len : cur_len + instruction_len] = IGNORE_INDEX
+
+ cur_len += round_len
+ target[cur_len:] = IGNORE_INDEX
+
+ if cur_len < tokenizer.model_max_length:
+ if cur_len != total_len:
+ target[:] = IGNORE_INDEX
+ rank0_print(f"WARNING: tokenization mismatch: {cur_len} vs. {total_len}." f" (ignored)")
+
+ return dict(
+ input_ids=input_ids,
+ labels=targets,
+ )
+
+
+def make_conv(prompt, answer):
+ return [
+ {
+ "from": "human",
+ "value": prompt,
+ },
+ {
+ "from": "gpt",
+ "value": answer,
+ },
+ ]
+
+
+def preprocess_gemma(sources: List[List[Dict[str, str]]], tokenizer: transformers.PreTrainedTokenizer, has_image: bool = False) -> Dict:
+ conv: conversation_lib.Conversation = conversation_lib.default_conversation.copy()
+ roles: Dict[str, str] = {"human": conv.roles[0], "gpt": conv.roles[1]}
+
+ # Apply prompt templates
+ conversations: List[str] = []
+ for i, source in enumerate(sources):
+ if roles[source[0]["from"]] != conv.roles[0]:
+ # Skip the first one if it is not from human
+ source: List[Dict[str, str]] = source[1:]
+
+ conv.messages = []
+ for j, sentence in enumerate(source):
+ role: str = roles[sentence["from"]]
+ assert role == conv.roles[j % 2], f"{i}"
+ conv.append_message(role, sentence["value"])
+ conversations.append(conv.get_prompt())
+
+ # Tokenize conversations
+ if has_image:
+ input_ids: torch.Tensor = torch.stack([tokenizer_image_token(prompt, tokenizer, return_tensors="pt") for prompt in conversations], dim=0)
+ else:
+ input_ids: torch.Tensor = tokenizer(
+ conversations,
+ return_tensors="pt",
+ padding="longest",
+ max_length=tokenizer.model_max_length,
+ truncation=True,
+ ).input_ids
+
+ targets: torch.Tensor = input_ids.clone()
+ assert conv.sep_style == conversation_lib.SeparatorStyle.GEMMA
+
+ # Mask target
+ sep: str = conv.sep + conv.roles[1]
+ for conversation, target in zip(conversations, targets):
+ total_len: int = int(target.ne(tokenizer.pad_token_id).sum())
+
+ rounds: List[str] = conversation.split(conv.sep)
+ re_rounds = []
+ for conv_idx in range(0, len(rounds), 2):
+ re_rounds.append(conv.sep.join(rounds[conv_idx : conv_idx + 2]))
+
+ cur_len = 1 # Ignore
+ target[:cur_len] = IGNORE_INDEX
+ for i, rou in enumerate(re_rounds):
+ if rou == "":
+ break
+
+ parts = rou.split(sep)
+ if len(parts) != 2:
+ break
+ parts[0] += sep # Re-append sep because split on this
+ # Now "".join(parts)==rou
+
+ if has_image:
+ round_len = len(tokenizer_image_token(rou, tokenizer)) - 1 # Ignore
+ instruction_len = len(tokenizer_image_token(parts[0], tokenizer)) - 1 # Ignore
+ else:
+ round_len = len(tokenizer(rou).input_ids) - 1 # Ignore
+ instruction_len = len(tokenizer(parts[0]).input_ids) - 1 # Ignore
+
+ round_len += 2 # sep: \n takes 2 tokens
+ target[cur_len : cur_len + instruction_len] = IGNORE_INDEX
+ cur_len += round_len
+
+ target[cur_len:] = IGNORE_INDEX
+
+ if cur_len < tokenizer.model_max_length:
+ if cur_len != total_len:
+ target[:] = IGNORE_INDEX
+ rank0_print(f"WARNING: tokenization mismatch: {cur_len} vs. {total_len}." f" (ignored)")
+
+ return dict(
+ input_ids=input_ids,
+ labels=targets,
+ )
+
+
+def preprocess_qwen(sources, tokenizer: transformers.PreTrainedTokenizer, has_image: bool = False, max_len=2048, system_message: str = "You are a helpful assistant.") -> Dict:
+ roles = {"human": "<|im_start|>user", "gpt": "<|im_start|>assistant"}
+
+ im_start, im_end = tokenizer.additional_special_tokens_ids
+ nl_tokens = tokenizer("\n").input_ids
+ _system = tokenizer("system").input_ids + nl_tokens
+ _user = tokenizer("user").input_ids + nl_tokens
+ _assistant = tokenizer("assistant").input_ids + nl_tokens
+
+ # Apply prompt templates
+ input_ids, targets = [], []
+ for i, source in enumerate(sources):
+ if roles[source[0]["from"]] != roles["human"]:
+ source = source[1:]
+
+ input_id, target = [], []
+ system = [im_start] + _system + tokenizer(system_message).input_ids + [im_end] + nl_tokens
+ input_id += system
+ target += [im_start] + [IGNORE_INDEX] * (len(system) - 3) + [im_end] + nl_tokens
+ assert len(input_id) == len(target)
+ for j, sentence in enumerate(source):
+ role = roles[sentence["from"]]
+ if has_image and "" in sentence["value"]:
+ assert sentence["value"].startswith(""), print(sentence["value"])
+
+ _input_id = tokenizer(role).input_ids + nl_tokens + [IMAGE_TOKEN_INDEX] + nl_tokens + tokenizer(sentence["value"][len("") :]).input_ids + [im_end] + nl_tokens
+ else:
+ _input_id = tokenizer(role).input_ids + nl_tokens + tokenizer(sentence["value"]).input_ids + [im_end] + nl_tokens
+ input_id += _input_id
+ if role == "<|im_start|>user":
+ _target = [im_start] + [IGNORE_INDEX] * (len(_input_id) - 3) + [im_end] + nl_tokens
+ elif role == "<|im_start|>assistant":
+ _target = [im_start] + [IGNORE_INDEX] * len(tokenizer(role).input_ids) + _input_id[len(tokenizer(role).input_ids) + 1 : -2] + [im_end] + nl_tokens
+ else:
+ raise NotImplementedError
+ target += _target
+ assert len(input_id) == len(target)
+ # input_id += [tokenizer.pad_token_id] * (max_len - len(input_id))
+ # target += [IGNORE_INDEX] * (max_len - len(target))
+ input_ids.append(input_id)
+ targets.append(target)
+ input_ids = torch.tensor(input_ids, dtype=torch.long)
+ targets = torch.tensor(targets, dtype=torch.long)
+
+ return dict(
+ input_ids=input_ids, # tensor(bs x seq_len)
+ labels=targets, # tensor(bs x seq_len)
+ # attention_mask=input_ids.ne(tokenizer.pad_token_id), # tensor(bs x seq_len)
+ )
+
+
+def preprocess_llama3(
+ sources,
+ tokenizer: transformers.PreTrainedTokenizer,
+ has_image: bool = False,
+ max_len=2048,
+ system_message: str = "You are a helpful language and vision assistant. You are able to understand the visual content that the user provides, and assist the user with a variety of tasks using natural language.",
+) -> Dict:
+ roles = {"human": "<|start_header_id|>user<|end_header_id|>", "gpt": "<|start_header_id|>assistant<|end_header_id|>"}
+
+ eot_id = tokenizer.convert_tokens_to_ids("<|eot_id|>")
+ nl_tokens = tokenizer("\n").input_ids
+
+ # Apply prompt templates
+ input_ids, targets = [], []
+ for i, source in enumerate(sources):
+ if roles[source[0]["from"]] != roles["human"]:
+ source = source[1:]
+
+ input_id, target = [], []
+ system = tokenizer("<|begin_of_text|>").input_ids + tokenizer("<|start_header_id|>system<|end_header_id|>").input_ids + nl_tokens * 2 + tokenizer(system_message).input_ids + [eot_id]
+ input_id += system
+ target += [IGNORE_INDEX] * len(system)
+ for j, sentence in enumerate(source):
+ role = roles[sentence["from"]]
+ if has_image and "" in sentence["value"]:
+ assert sentence["value"].startswith(""), print(sentence["value"])
+ _input_id = tokenizer(role).input_ids + nl_tokens * 2 + [IMAGE_TOKEN_INDEX] + tokenizer(sentence["value"][len("") :]).input_ids + [eot_id]
+ else:
+ _input_id = tokenizer(role).input_ids + nl_tokens * 2 + tokenizer(sentence["value"]).input_ids + [eot_id]
+ input_id += _input_id
+ if role == "<|start_header_id|>user<|end_header_id|>":
+ _target = [IGNORE_INDEX] * len(_input_id)
+ elif role == "<|start_header_id|>assistant<|end_header_id|>":
+ _target = [IGNORE_INDEX] * (len(tokenizer(role).input_ids) + 2) + _input_id[len(tokenizer(role).input_ids) + 2 : -1] + [eot_id]
+ else:
+ raise NotImplementedError
+ target += _target
+ assert len(input_id) == len(target), f"{len(input_id)} != {len(target)}"
+ input_ids.append(input_id)
+ targets.append(target)
+ input_ids = torch.tensor(input_ids, dtype=torch.long)
+ targets = torch.tensor(targets, dtype=torch.long)
+
+ return dict(
+ input_ids=input_ids, # tensor(bs x seq_len)
+ labels=targets, # tensor(bs x seq_len)
+ )
+
+
+def preprocess_v1(sources, tokenizer: transformers.PreTrainedTokenizer, has_image: bool = False) -> Dict:
+ conv = conversation_lib.default_conversation.copy()
+ roles = {"human": conv.roles[0], "gpt": conv.roles[1]}
+
+ # Apply prompt templates
+ conversations = []
+ for i, source in enumerate(sources):
+ if roles[source[0]["from"]] != conv.roles[0]:
+ # Skip the first one if it is not from human
+ source = source[1:]
+
+ conv.messages = []
+ for j, sentence in enumerate(source):
+ role = roles[sentence["from"]]
+ assert role == conv.roles[j % 2], f"{i}"
+ conv.append_message(role, sentence["value"])
+ conversations.append(conv.get_prompt())
+
+ # Tokenize conversations
+
+ if has_image:
+ input_ids = torch.stack([tokenizer_image_token(prompt, tokenizer, return_tensors="pt") for prompt in conversations], dim=0)
+ else:
+ input_ids = tokenizer(
+ conversations,
+ return_tensors="pt",
+ padding="longest",
+ max_length=tokenizer.model_max_length,
+ truncation=True,
+ ).input_ids
+
+ targets = input_ids.clone()
+
+ assert conv.sep_style == conversation_lib.SeparatorStyle.TWO
+
+ # Mask targets
+ sep = conv.sep + conv.roles[1] + ": "
+ for conversation, target in zip(conversations, targets):
+ total_len = int(target.ne(tokenizer.pad_token_id).sum())
+
+ rounds = conversation.split(conv.sep2)
+ cur_len = 1
+ target[:cur_len] = IGNORE_INDEX
+ for i, rou in enumerate(rounds):
+ if rou == "":
+ break
+
+ parts = rou.split(sep)
+ if len(parts) != 2:
+ break
+ parts[0] += sep
+
+ if has_image:
+ round_len = len(tokenizer_image_token(rou, tokenizer))
+ instruction_len = len(tokenizer_image_token(parts[0], tokenizer)) - 2
+ else:
+ round_len = len(tokenizer(rou).input_ids)
+ instruction_len = len(tokenizer(parts[0]).input_ids) - 2
+
+ if i != 0 and not tokenizer.legacy and IS_TOKENIZER_GREATER_THAN_0_14:
+ round_len -= 1
+ instruction_len -= 1
+
+ target[cur_len : cur_len + instruction_len] = IGNORE_INDEX
+
+ cur_len += round_len
+ target[cur_len:] = IGNORE_INDEX
+
+ if cur_len < tokenizer.model_max_length:
+ if cur_len != total_len:
+ target[:] = IGNORE_INDEX
+ print(f"WARNING: tokenization mismatch: {cur_len} vs. {total_len}." f" (ignored)")
+
+ return dict(
+ input_ids=input_ids,
+ labels=targets,
+ )
+
+
+def preprocess_mpt(sources, tokenizer: transformers.PreTrainedTokenizer, has_image: bool = False) -> Dict:
+ conv = conversation_lib.default_conversation.copy()
+ roles = {"human": conv.roles[0], "gpt": conv.roles[1]}
+
+ # Apply prompt templates
+ conversations = []
+ for i, source in enumerate(sources):
+ if roles[source[0]["from"]] != conv.roles[0]:
+ # Skip the first one if it is not from human
+ source = source[1:]
+
+ conv.messages = []
+ for j, sentence in enumerate(source):
+ role = roles[sentence["from"]]
+ assert role == conv.roles[j % 2], f"{i}"
+ conv.append_message(role, sentence["value"])
+ conversations.append(conv.get_prompt())
+
+ # Tokenize conversations
+
+ if has_image:
+ input_ids = torch.stack([tokenizer_image_token(prompt, tokenizer, return_tensors="pt") for prompt in conversations], dim=0)
+ else:
+ input_ids = tokenizer(
+ conversations,
+ return_tensors="pt",
+ padding="longest",
+ max_length=tokenizer.model_max_length,
+ truncation=True,
+ ).input_ids
+
+ targets = input_ids.clone()
+ assert conv.sep_style == conversation_lib.SeparatorStyle.MPT
+
+ # Mask targets
+ sep = conv.sep + conv.roles[1]
+ for conversation, target in zip(conversations, targets):
+ total_len = int(target.ne(tokenizer.pad_token_id).sum())
+
+ rounds = conversation.split(conv.sep)
+ re_rounds = [conv.sep.join(rounds[:3])] # system + user + gpt
+ for conv_idx in range(3, len(rounds), 2):
+ re_rounds.append(conv.sep.join(rounds[conv_idx : conv_idx + 2])) # user + gpt
+ cur_len = 1
+ target[:cur_len] = IGNORE_INDEX
+ for i, rou in enumerate(re_rounds):
+ if rou == "":
+ break
+
+ parts = rou.split(sep)
+ if len(parts) != 2:
+ break
+ parts[0] += sep
+
+ if has_image:
+ round_len = len(tokenizer_image_token(rou, tokenizer))
+ instruction_len = len(tokenizer_image_token(parts[0], tokenizer)) - 1
+ else:
+ round_len = len(tokenizer(rou).input_ids)
+ instruction_len = len(tokenizer(parts[0]).input_ids) - 1
+
+ if i != 0 and getattr(tokenizer, "legacy", False) and IS_TOKENIZER_GREATER_THAN_0_14:
+ round_len += 1
+ instruction_len += 1
+
+ target[cur_len : cur_len + instruction_len] = IGNORE_INDEX
+
+ cur_len += round_len
+ target[cur_len:] = IGNORE_INDEX
+
+ if cur_len < tokenizer.model_max_length:
+ if cur_len != total_len:
+ target[:] = IGNORE_INDEX
+ print(f"WARNING: tokenization mismatch: {cur_len} vs. {total_len}." f"(#turns={len(re_rounds)} ignored)")
+
+ return dict(
+ input_ids=input_ids,
+ labels=targets,
+ )
+
+
+def preprocess_plain(
+ sources: Sequence[str],
+ tokenizer: transformers.PreTrainedTokenizer,
+) -> Dict:
+ # add end signal and concatenate together
+ conversations = []
+ for source in sources:
+ assert len(source) == 2
+ assert DEFAULT_IMAGE_TOKEN in source[0]["value"]
+ source[0]["value"] = DEFAULT_IMAGE_TOKEN
+ conversation = source[0]["value"] + source[1]["value"] + conversation_lib.default_conversation.sep
+ conversations.append(conversation)
+ # tokenize conversations
+ input_ids = [tokenizer_image_token(prompt, tokenizer, return_tensors="pt") for prompt in conversations]
+ targets = copy.deepcopy(input_ids)
+ for target, source in zip(targets, sources):
+ tokenized_len = len(tokenizer_image_token(source[0]["value"], tokenizer))
+ target[:tokenized_len] = IGNORE_INDEX
+
+ return dict(input_ids=input_ids, labels=targets)
+
+
+def preprocess(sources: Sequence[str], tokenizer: transformers.PreTrainedTokenizer, has_image: bool = False) -> Dict:
+ """
+ Given a list of sources, each is a conversation list. This transform:
+ 1. Add signal '### ' at the beginning each sentence, with end signal '\n';
+ 2. Concatenate conversations together;
+ 3. Tokenize the concatenated conversation;
+ 4. Make a deepcopy as the target. Mask human words with IGNORE_INDEX.
+ """
+ if conversation_lib.default_conversation.sep_style == conversation_lib.SeparatorStyle.PLAIN:
+ return preprocess_plain(sources, tokenizer)
+ if conversation_lib.default_conversation.sep_style == conversation_lib.SeparatorStyle.LLAMA_2:
+ return preprocess_llama_2(sources, tokenizer, has_image=has_image)
+ if conversation_lib.default_conversation.version.startswith("v1"):
+ return preprocess_v1(sources, tokenizer, has_image=has_image)
+ if conversation_lib.default_conversation.version == "mpt":
+ return preprocess_mpt(sources, tokenizer, has_image=has_image)
+ if conversation_lib.default_conversation.version == "qwen":
+ return preprocess_qwen(sources, tokenizer, has_image=has_image)
+ if conversation_lib.default_conversation.version == "gemma":
+ return preprocess_gemma(sources, tokenizer, has_image=has_image)
+ if conversation_lib.default_conversation.version == "llama_v3":
+ return preprocess_llama3(sources, tokenizer, has_image=has_image)
+ # add end signal and concatenate together
+ conversations = []
+ for source in sources:
+ header = f"{conversation_lib.default_conversation.system}\n\n"
+ conversation = _add_speaker_and_signal(header, source)
+ conversations.append(conversation)
+
+ # tokenize conversations
+ def get_tokenize_len(prompts):
+ return [len(tokenizer_image_token(prompt, tokenizer)) for prompt in prompts]
+
+ if has_image:
+ input_ids = [tokenizer_image_token(prompt, tokenizer, return_tensors="pt") for prompt in conversations]
+ else:
+ conversations_tokenized = _tokenize_fn(conversations, tokenizer)
+ input_ids = conversations_tokenized["input_ids"]
+
+ targets = copy.deepcopy(input_ids)
+ for target, source in zip(targets, sources):
+ if has_image:
+ tokenized_lens = get_tokenize_len([header] + [s["value"] for s in source])
+ else:
+ tokenized_lens = _tokenize_fn([header] + [s["value"] for s in source], tokenizer)["input_ids_lens"]
+ speakers = [sentence["from"] for sentence in source]
+ _mask_targets(target, tokenized_lens, speakers)
+
+ return dict(input_ids=input_ids, labels=targets)
+
+
+def load_data(data_path):
+ if "jsonl" in data_path:
+ data_list = load_jsonl(data_path)
+ else:
+ data_list = load_json(data_path)
+ return data_list
+
+
+class DPODataset(Dataset):
+ """Dataset for DPODataset fine-tuning."""
+
+ def __init__(self, data_path: str, tokenizer: transformers.PreTrainedTokenizer, data_args: DataArguments):
+ super(DPODataset, self).__init__()
+ # Handle multiple JSON files specified in the data_path
+ self.list_data_dict = []
+
+ if "{" in data_path and "}" in data_path:
+ base_path, file_pattern = re.match(r"^(.*)\{(.*)\}\.json$", data_path).groups()
+ file_names = file_pattern.split(",")
+ rank0_print(f"Loading {file_names} from {base_path}")
+ data_args.dataset_paths = []
+ for file_name in file_names:
+ data_args.dataset_paths.append(f"{base_path}{file_name}.json")
+ full_path = f"{base_path}{file_name}.json"
+ rank0_print(f"Loading {full_path}")
+ cur_data_dict = load_data(full_path)
+ rank0_print(f"Loaded {len(cur_data_dict)} samples from {full_path}")
+ self.list_data_dict.extend(cur_data_dict)
+ elif data_path.endswith(".yaml"):
+ with open(data_path, "r") as file:
+ yaml_data = yaml.safe_load(file)
+ datasets = yaml_data.get("datasets")
+ # file should be in the format of:
+ # datasets:
+ # - json_path: xxxx1.json
+ # sampling_strategy: first:1000
+ # - json_path: xxxx2.json
+ # sampling_strategy: end:3000
+ # - json_path: xxxx3.json
+ # sampling_strategy: random:999
+ data_args.dataset_paths = [dataset.get("json_path") for dataset in datasets]
+ for dataset in datasets:
+ json_path = dataset.get("json_path")
+ sampling_strategy = dataset.get("sampling_strategy", "all")
+ sampling_number = None
+
+ rank0_print(f"Loading {json_path} with {sampling_strategy} sampling strategy")
+ cur_data_dict = load_data(json_path)
+
+ if ":" in sampling_strategy:
+ sampling_strategy, sampling_number = sampling_strategy.split(":")
+ if "%" in sampling_number:
+ sampling_number = math.ceil(int(sampling_number.split("%")[0]) * len(cur_data_dict) / 100)
+ else:
+ sampling_number = int(sampling_number)
+
+ # Apply the sampling strategy
+ if sampling_strategy == "first" and sampling_number is not None:
+ cur_data_dict = cur_data_dict[:sampling_number]
+ elif sampling_strategy == "end" and sampling_number is not None:
+ cur_data_dict = cur_data_dict[-sampling_number:]
+ elif sampling_strategy == "random" and sampling_number is not None:
+ random.shuffle(cur_data_dict)
+ cur_data_dict = cur_data_dict[:sampling_number]
+
+ rank0_print(f"Loaded {len(cur_data_dict)} samples from {json_path}")
+ self.list_data_dict.extend(cur_data_dict)
+ else:
+ data_args.dataset_paths = [data_path]
+ rank0_print(f"Loading {data_path}")
+ cur_data_dict = load_data(data_path)
+ rank0_print(f"Loaded {len(cur_data_dict)} samples from {data_path}")
+ self.list_data_dict.extend(cur_data_dict)
+
+ rank0_print("Formatting inputs...Skip in lazy mode")
+ self.tokenizer = tokenizer
+ self.data_args = data_args
+
+ def __len__(self):
+ return len(self.list_data_dict)
+
+ @property
+ def lengths(self):
+ length_list = []
+ for sample in self.list_data_dict:
+ # Calculate the length of the prompt, answer, chosen, and rejected text
+ cur_len = len(sample["prompt"].split()) + len(sample["answer"].split()) + len(sample["chosen"].split()) + len(sample["rejected"].split())
+ # Add additional tokens if an image is present
+ img_tokens = 128 if "image" in sample else 0
+ length_list.append(cur_len + img_tokens)
+ return length_list
+
+ @property
+ def modality_lengths(self):
+ length_list = []
+ for sample in self.list_data_dict:
+ # Calculate the length of the prompt, answer, chosen, and rejected text
+ cur_len = len(sample["prompt"].split()) + len(sample["answer"].split()) + len(sample["chosen"].split()) + len(sample["rejected"].split())
+ # If the sample includes a video, the length is positive; otherwise, it is negative
+ cur_len = cur_len if ("video" in sample or "image" in sample) else -cur_len
+ length_list.append(cur_len)
+ return length_list
+
+ def process_image(self, image_file):
+ image_folder = self.data_args.image_folder
+ processor = self.data_args.image_processor
+ # print(f"\n\nInspecting the image path, folder = {image_folder}, image={image_file}\n\n")
+ try:
+ image = Image.open(os.path.join(image_folder, image_file)).convert("RGB")
+ except Exception as exn:
+ print(f"Failed to open image {image_file}. Exception:", exn)
+ raise exn
+
+ image_size = image.size
+ if self.data_args.image_aspect_ratio == "highres":
+ image = process_highres_image(image, self.data_args.image_processor, self.data_args.image_grid_pinpoints)
+ elif self.data_args.image_aspect_ratio == "anyres" or "anyres" in self.data_args.image_aspect_ratio:
+ image = process_anyres_image(image, self.data_args.image_processor, self.data_args.image_grid_pinpoints)
+ elif self.data_args.image_aspect_ratio == "crop_split":
+ image = process_highres_image_crop_split(image, self.data_args)
+ elif self.data_args.image_aspect_ratio == "pad":
+
+ def expand2square(pil_img, background_color):
+ width, height = pil_img.size
+ if width == height:
+ return pil_img
+ elif width > height:
+ result = Image.new(pil_img.mode, (width, width), background_color)
+ result.paste(pil_img, (0, (width - height) // 2))
+ return result
+ else:
+ result = Image.new(pil_img.mode, (height, height), background_color)
+ result.paste(pil_img, ((height - width) // 2, 0))
+ return result
+
+ image = expand2square(image, tuple(int(x * 255) for x in processor.image_mean))
+ image = processor.preprocess(image, return_tensors="pt")["pixel_values"][0]
+ else:
+ image = processor.preprocess(image, return_tensors="pt")["pixel_values"][0]
+ return image, image_size, "image"
+
+ def __getitem__(self, i) -> Dict[str, torch.Tensor]:
+ # TODO: define number of retries somewhere else
+ num_base_retries = 3
+ num_final_retries = 300
+
+ # try the current sample first
+ for attempt_idx in range(num_base_retries):
+ try:
+ sample = self._get_item(i)
+ return sample
+ except Exception as e:
+ # sleep 1s in case it is a cloud disk issue
+ print(f"[Try #{attempt_idx}] Failed to fetch sample {i}. Exception:", e)
+ time.sleep(1)
+
+ # try other samples, in case it is file corruption issue
+ for attempt_idx in range(num_base_retries):
+ try:
+ next_index = min(i + 1, len(self.list_data_dict) - 1)
+ # sample_idx = random.choice(range(len(self)))
+ sample = self._get_item(next_index)
+ return sample
+ except Exception as e:
+ # no need to sleep
+ print(f"[Try other #{attempt_idx}] Failed to fetch sample {next_index}. Exception:", e)
+ pass
+
+ # still fail, most likely to be path issue or cloud disk issue, retry the same sample for longer
+ # for attempt_idx in range(num_final_retries):
+ # try:
+ # sample = self._get_item(i)
+ # return sample
+ # except Exception as e:
+ # # sleep 1s in case it is a cloud disk issue
+ # print(f"[Final try #{attempt_idx}] Failed to fetch sample {i}. Exception:", e)
+ # time.sleep(1)
+
+ # Finally raise exception on failing.
+ assert False, "Failed to fetch sample."
+
+ def _get_item(self, i) -> Dict[str, torch.Tensor]:
+ sources = self.list_data_dict[i]
+ if isinstance(i, int):
+ sources = [sources]
+ assert len(sources) == 1, "Don't know why it is wrapped to a list" # FIXME
+
+ suffix = None
+ if "image" in sources[0]:
+ image_file = self.list_data_dict[i]["image"]
+ if type(image_file) is list:
+ image = [self.process_image(f) for f in image_file]
+ else:
+ image = [self.process_image(image_file)]
+ # sources = preprocess_multimodal(copy.deepcopy([e["conversations"] for e in sources]), self.data_args)
+
+ elif "video" in sources[0]: # FIXME: This logic should be largely improved by Yuanhan. It's too messy now.
+ video_file = self.list_data_dict[i]["video"]
+ video_folder = self.data_args.video_folder
+ video_file = os.path.join(video_folder, video_file)
+ suffix = video_file.split(".")[-1]
+ if not os.path.exists(video_file):
+ print("File {} not exist!".format(video_file))
+
+ if suffix == "pkl":
+ video_info = pickle.load(open(video_file, "rb"))
+ image = torch.from_numpy(video_info["feats"][:, 1:])
+ input_prompt = video_info["inputs"].replace("...", "")
+ # replace the default image token with multiple tokens
+ input_prompt = input_prompt.replace(DEFAULT_IMAGE_TOKEN, DEFAULT_IMAGE_TOKEN * self.data_args.video_token)
+ sources, query_prompt = preprocess_multimodal_movie(copy.deepcopy([e["conversations"] for e in sources]), self.data_args, input_prompt)
+ else: # using videoreader
+ if "shareVideoGPTV" not in video_file and "liangke" not in video_file:
+ vr = VideoReader(video_file, ctx=cpu(0))
+ total_frame_num = len(vr)
+ avg_fps = round(vr.get_avg_fps() / self.data_args.video_fps)
+ frame_idx = [i for i in range(0, total_frame_num, avg_fps)]
+ if self.data_args.frames_upbound > 0:
+ if len(frame_idx) > self.data_args.frames_upbound:
+ uniform_sampled_frames = np.linspace(0, total_frame_num - 1, self.data_args.frames_upbound, dtype=int)
+ frame_idx = uniform_sampled_frames.tolist()
+ video = vr.get_batch(frame_idx).asnumpy()
+ video = np.array(video)
+ else:
+ if "liangke" in video_file:
+ video_file = self.list_data_dict[i]["video"]
+ frame_files = [os.path.join(video_file, f) for f in os.listdir(video_file) if os.path.isfile(os.path.join(video_file, f))]
+ frame_files.sort() # Ensure the frames are sorted if they are named sequentially
+
+ # TODO: Hard CODE: Determine the indices for uniformly sampling 10 frames
+ num_frames_to_sample = 10
+
+ total_frames = len(frame_files)
+
+ sampled_indices = np.linspace(0, total_frames - 1, num_frames_to_sample, dtype=int)
+
+ # Read and store the sampled frames
+ video = []
+ for idx in sampled_indices:
+ frame_path = frame_files[idx]
+ try:
+ with Image.open(frame_path) as img:
+ frame = img.convert("RGB")
+ video.append(frame)
+ except IOError:
+ print(f"Failed to read frame at path: {frame_path}")
+
+ processor = self.data_args.image_processor
+ image = processor.preprocess(video, return_tensors="pt")["pixel_values"]
+ image = [(image, video[0].size, "video")]
+ # sources = preprocess_multimodal(copy.deepcopy([e["conversations"] for e in sources]), self.data_args)
+
+ else:
+ sources = copy.deepcopy([e["conversations"] for e in sources])
+
+ has_image = ("image" in self.list_data_dict[i]) or ("video" in self.list_data_dict[i])
+ # data_dict = preprocess(sources, self.tokenizer, has_image=has_image)
+ data_dict = copy.deepcopy(self.list_data_dict[i]) # inplace modification following
+
+ if "prompt" in data_dict:
+ prompt = data_dict["prompt"]
+ prompt = prompt.replace("", "").strip()
+ prompt = "\n" + prompt
+ data_dict["prompt"] = prompt
+ else:
+ prompt = None
+
+ if suffix == "pkl":
+ prompt = [query_prompt]
+
+ # image exist in the data
+ if "image" in self.list_data_dict[i]:
+ data_dict["image"] = image
+ elif "video" in self.list_data_dict[i]:
+ data_dict["image"] = image
+ elif self.data_args.is_multimodal:
+ # image does not exist in the data, but the model is multimodal
+ crop_size = self.data_args.image_processor.crop_size
+ data_dict["image"] = [
+ (torch.zeros(1, 3, crop_size["height"], crop_size["width"]), (crop_size["width"], crop_size["height"]), "text"),
+ ]
+ # prompt exist in the data
+ data_dict["has_image"] = has_image
+ return data_dict
+
+
+@dataclass
+class DPODataCollator(DPODataCollatorWithPadding):
+ """Collate examples for DPO fine-tuning."""
+
+ # tokenizer: transformers.PreTrainedTokenizer
+
+ def collate(self, batch):
+ # first, pad everything to the same length
+ # input_ids, labels = tuple([instance[key] for instance in instances]
+ # for key in ("input_ids", "labels"))
+ # input_ids = torch.nn.utils.rnn.pad_sequence(
+ # input_ids,
+ # batch_first=True,
+ # padding_value=self.tokenizer.pad_token_id)
+ # labels = torch.nn.utils.rnn.pad_sequence(labels,
+ # batch_first=True,
+ # padding_value=IGNORE_INDEX)
+ # input_ids = input_ids[:, :self.tokenizer.model_max_length]
+ # labels = labels[:, :self.tokenizer.model_max_length]
+ # batch = dict(
+ # input_ids=input_ids,
+ # labels=labels,
+ # attention_mask=input_ids.ne(self.tokenizer.pad_token_id),
+ # )
+ padded_batch = {}
+ for k in batch[0].keys():
+ if k.endswith("_input_ids") or k.endswith("_attention_mask") or k.endswith("_labels"):
+ # if "prompt" in k:
+ # to_pad = [torch.LongTensor(ex[k][::-1]) for ex in batch]
+ # else:
+ to_pad = [torch.LongTensor(ex[k]) for ex in batch]
+ if k.endswith("_input_ids"):
+ padding_value = self.tokenizer.pad_token_id
+ elif k.endswith("_labels"):
+ padding_value = self.label_pad_token_id
+ else:
+ continue
+ # elif k.endswith("_attention_mask"):
+ # padding_value = self.padding_value
+ # else:
+ # raise ValueError(f"Unexpected key in batch '{k}'")
+
+ padded_batch[k] = torch.nn.utils.rnn.pad_sequence(to_pad, batch_first=True, padding_value=padding_value)
+ # for the prompt, flip back so padding is on left side
+ # if "prompt" in k:
+ # padded_batch[k] = padded_batch[k].flip(dims=[1])
+ else:
+ padded_batch[k] = [ex[k] for ex in batch]
+ for k in ["chosen_input_ids", "rejected_input_ids"]:
+ attn_k = k.replace("input_ids", "attention_mask")
+ padded_batch[attn_k] = padded_batch[k].ne(self.tokenizer.pad_token_id)
+ return padded_batch
+
+ def tokenize_batch_element(self, prompt: str, chosen: str, rejected: str, has_image: bool = True) -> Dict:
+ """Tokenize a single batch element.
+
+ At this stage, we don't convert to PyTorch tensors yet; we just handle the truncation
+ in case the prompt + chosen or prompt + rejected responses is/are too long. First
+ we truncate the prompt; if we're still too long, we truncate the chosen/rejected.
+
+ We also create the labels for the chosen/rejected responses, which are of length equal to
+ the sum of the length of the prompt and the chosen/rejected response, with
+ label_pad_token_id for the prompt tokens.
+ """
+ # import pdb; pdb.set_trace()
+ batch = {}
+
+ chosen_sources = make_conv(prompt, chosen)
+ rejected_sources = make_conv(prompt, rejected)
+ chosen_data_dict = preprocess([chosen_sources], self.tokenizer, has_image=has_image)
+ # chosen_data_dict['attention_mask'] = chosen_data_dict["input_ids"].ne(self.tokenizer.pad_token_id)
+
+ rejected_data_dict = preprocess([rejected_sources], self.tokenizer, has_image=has_image)
+ # rejected_data_dict['attention_mask'] = rejected_data_dict["input_ids"].ne(self.tokenizer.pad_token_id)
+
+ chosen_data_dict = {k: v[0] for k, v in chosen_data_dict.items()}
+ rejected_data_dict = {k: v[0] for k, v in rejected_data_dict.items()}
+
+ for k, toks in {
+ "chosen": chosen_data_dict,
+ "rejected": rejected_data_dict,
+ }.items():
+ for type_key, tokens in toks.items():
+ if type_key == "token_type_ids":
+ continue
+ batch[f"{k}_{type_key}"] = tokens
+ return batch
+
+ def __call__(self, features: List[Dict[str, Any]]) -> Dict[str, Any]:
+
+ tokenized_batch = []
+ Xs, keys = [], []
+ for feature in features:
+ prompt = feature["prompt"]
+ chosen = feature["chosen"]
+ rejected = feature["rejected"]
+ has_image = feature["has_image"]
+ # Xs.append(feature[has_X])
+ # keys.append(has_X)
+
+ batch_element = self.tokenize_batch_element(prompt, chosen, rejected, has_image=has_image)
+ tokenized_batch.append(batch_element)
+
+ # return collated batch
+ padded_batch = self.collate(tokenized_batch)
+ # import pdb;pdb.set_trace()
+ if "image" in features[0]:
+ # instances[1]['image'][0][0].shape
+ # torch.Size([5, 3, 224, 224])
+ images = [instance["image"] for instance in features]
+
+ padded_batch["image_sizes"] = [im[1] for im_list in images for im in im_list]
+ padded_batch["modalities"] = [im[2] for im_list in images for im in im_list]
+ images = [im[0] for im_list in images for im in im_list]
+ # import pdb;pdb.set_trace()
+
+ padded_batch["images"] = images
+ # padded_batch["images"] =[padded_batch["modalities"], images]
+
+ return padded_batch
+
+
+def make_dpo_data_module(tokenizer: transformers.PreTrainedTokenizer, data_args) -> Dict:
+ """Make dataset and collator for supervised fine-tuning."""
+ train_dataset = DPODataset(tokenizer=tokenizer, data_path=data_args.data_path, data_args=data_args)
+ return train_dataset
+
+
+def get_model(model_args, training_args, bnb_model_from_pretrained_args):
+ assert training_args.attn_implementation
+ if training_args.attn_implementation == "sdpa" and torch.__version__ < "2.1.2":
+ raise ValueError("The 'sdpa' attention implementation requires torch version 2.1.2 or higher.")
+
+ ######################### Overwrite config #########################
+ customized_kwargs = dict()
+ customized_kwargs.update(bnb_model_from_pretrained_args)
+ overwrite_config = {}
+ cfg_pretrained = None
+ if "qwen" in model_args.model_name_or_path.lower():
+ cfg_pretrained = LlavaQwenConfig.from_pretrained(model_args.model_name_or_path)
+ elif "mistral" in model_args.model_name_or_path.lower() or "zephyr" in model_args.model_name_or_path.lower():
+ cfg_pretrained = LlavaMistralConfig.from_pretrained(model_args.model_name_or_path)
+ elif (
+ "wizardlm-2" in model_args.model_name_or_path.lower()
+ or "vicuna" in model_args.model_name_or_path.lower()
+ or "llama" in model_args.model_name_or_path.lower()
+ or "yi" in model_args.model_name_or_path.lower()
+ or "nous-hermes" in model_args.model_name_or_path.lower()
+ and "wizard-2" in model_args.model_name_or_path.lower()
+ ):
+ cfg_pretrained = LlavaConfig.from_pretrained(model_args.model_name_or_path)
+ else:
+ cfg_pretrained = AutoConfig.from_pretrained(model_args.model_name_or_path)
+
+ if model_args.rope_scaling_factor is not None and model_args.rope_scaling_type is not None and cfg_pretrained is not None:
+ overwrite_config["rope_scaling"] = {
+ "factor": model_args.rope_scaling_factor,
+ "type": model_args.rope_scaling_type,
+ }
+ if training_args.model_max_length is None:
+ training_args.model_max_length = cfg_pretrained.max_position_embeddings * model_args.rope_scaling_factor
+ overwrite_config["max_sequence_length"] = training_args.model_max_length
+ assert training_args.model_max_length == int(cfg_pretrained.max_position_embeddings * model_args.rope_scaling_factor), print(
+ f"model_max_length: {training_args.model_max_length}, max_position_embeddings: {cfg_pretrained.max_position_embeddings}, rope_scaling_factor: {model_args.rope_scaling_factor}"
+ )
+ # overwrite_config["max_sequence_length"] = model_args.max_sequence_length
+ # overwrite_config["tokenizer_model_max_length"] = model_args.tokenizer_model_max_length
+
+ if model_args.mm_spatial_pool_stride is not None and model_args.mm_spatial_pool_out_channels is not None and model_args.mm_spatial_pool_mode is not None and model_args.mm_resampler_type is not None and cfg_pretrained is not None:
+ overwrite_config["mm_resampler_type"] = model_args.mm_resampler_type
+ overwrite_config["mm_spatial_pool_stride"] = model_args.mm_spatial_pool_stride
+ overwrite_config["mm_spatial_pool_out_channels"] = model_args.mm_spatial_pool_out_channels
+ overwrite_config["mm_spatial_pool_mode"] = model_args.mm_spatial_pool_mode
+
+ if overwrite_config:
+ rank0_print(f"Overwriting config with {overwrite_config}")
+ for k, v in overwrite_config.items():
+ setattr(cfg_pretrained, k, v)
+
+ customized_kwargs["config"] = cfg_pretrained
+
+ ######################### Finish Overwrite ###########################
+
+ ref_model = None
+ if model_args.model_class_name is not None:
+ actual_model_class_name = f"{model_args.model_class_name}ForCausalLM"
+ model_class = getattr(transformers, actual_model_class_name)
+ rank0_print(f"Using model class {model_class} from {model_args.model_class_name}")
+ model = model_class.from_pretrained(
+ model_args.model_name_or_path,
+ cache_dir=training_args.cache_dir,
+ attn_implementation=training_args.attn_implementation,
+ torch_dtype=(torch.bfloat16 if training_args.bf16 else None),
+ low_cpu_mem_usage=False,
+ **customized_kwargs,
+ )
+ elif model_args.vision_tower is not None:
+ if "mixtral" in model_args.model_name_or_path.lower():
+ model = LlavaMixtralForCausalLM.from_pretrained(
+ model_args.model_name_or_path,
+ cache_dir=training_args.cache_dir,
+ attn_implementation=training_args.attn_implementation,
+ torch_dtype=(torch.bfloat16 if training_args.bf16 else None),
+ low_cpu_mem_usage=False,
+ **customized_kwargs,
+ )
+ from transformers.models.mixtral.modeling_mixtral import MixtralSparseMoeBlock
+
+ deepspeed.utils.set_z3_leaf_modules(model, [MixtralSparseMoeBlock])
+ elif "mistral" in model_args.model_name_or_path.lower() or "zephyr" in model_args.model_name_or_path.lower():
+ model = LlavaMistralForCausalLM.from_pretrained(
+ model_args.model_name_or_path,
+ cache_dir=training_args.cache_dir,
+ attn_implementation=training_args.attn_implementation,
+ torch_dtype=(torch.bfloat16 if training_args.bf16 else None),
+ low_cpu_mem_usage=False,
+ **customized_kwargs,
+ )
+ elif (
+ "wizardlm-2" in model_args.model_name_or_path.lower()
+ or "vicuna" in model_args.model_name_or_path.lower()
+ or "llama" in model_args.model_name_or_path.lower()
+ or "yi" in model_args.model_name_or_path.lower()
+ or "nous-hermes" in model_args.model_name_or_path.lower()
+ and "wizard-2" in model_args.model_name_or_path.lower()
+ ):
+ model = LlavaLlamaForCausalLM.from_pretrained(
+ model_args.model_name_or_path,
+ cache_dir=training_args.cache_dir,
+ attn_implementation=training_args.attn_implementation,
+ torch_dtype=(torch.bfloat16 if training_args.bf16 else None),
+ low_cpu_mem_usage=False,
+ **customized_kwargs,
+ )
+
+ if "zero3" in training_args.deepspeed:
+ rank0_print("#### Initialize reference model #####")
+ ref_model = LlavaLlamaForCausalLM.from_pretrained(
+ model_args.model_name_or_path,
+ cache_dir=training_args.cache_dir,
+ attn_implementation=training_args.attn_implementation,
+ torch_dtype=(torch.bfloat16 if training_args.bf16 else None),
+ low_cpu_mem_usage=False,
+ **customized_kwargs,
+ )
+
+ elif "qwen" in model_args.model_name_or_path.lower() or "quyen" in model_args.model_name_or_path.lower():
+ if "moe" in model_args.model_name_or_path.lower():
+ model = LlavaQwenMoeForCausalLM.from_pretrained(
+ model_args.model_name_or_path,
+ cache_dir=training_args.cache_dir,
+ attn_implementation=training_args.attn_implementation,
+ torch_dtype=(torch.bfloat16 if training_args.bf16 else None),
+ low_cpu_mem_usage=False,
+ **customized_kwargs,
+ )
+ from transformers.models.qwen2_moe.modeling_qwen2_moe import Qwen2MoeSparseMoeBlock
+
+ deepspeed.utils.set_z3_leaf_modules(model, [Qwen2MoeSparseMoeBlock])
+ else:
+ model = LlavaQwenForCausalLM.from_pretrained(
+ model_args.model_name_or_path,
+ cache_dir=training_args.cache_dir,
+ attn_implementation=training_args.attn_implementation,
+ torch_dtype=(torch.bfloat16 if training_args.bf16 else None),
+ low_cpu_mem_usage=False,
+ **customized_kwargs,
+ )
+
+ if "zero3" in training_args.deepspeed:
+ rank0_print("#### Initialize reference model #####")
+ ref_model = LlavaQwenForCausalLM.from_pretrained(
+ model_args.model_name_or_path,
+ cache_dir=training_args.cache_dir,
+ attn_implementation=training_args.attn_implementation,
+ torch_dtype=(torch.bfloat16 if training_args.bf16 else None),
+ low_cpu_mem_usage=False,
+ **customized_kwargs,
+ )
+
+ elif "gemma" in model_args.model_name_or_path.lower():
+ model = LlavaGemmaForCausalLM.from_pretrained(
+ model_args.model_name_or_path,
+ cache_dir=training_args.cache_dir,
+ attn_implementation=training_args.attn_implementation,
+ torch_dtype=(torch.bfloat16 if training_args.bf16 else None),
+ low_cpu_mem_usage=False,
+ **customized_kwargs,
+ )
+ else:
+ raise ValueError(f"Unknown model class {model_args}")
+ else:
+ model = transformers.LlamaForCausalLM.from_pretrained(
+ model_args.model_name_or_path, cache_dir=training_args.cache_dir, attn_implementation=training_args.attn_implementation, torch_dtype=(torch.bfloat16 if training_args.bf16 else None), **customized_kwargs
+ )
+ return model, ref_model
+
+
+def train(attn_implementation=None):
+ global local_rank
+
+ parser = transformers.HfArgumentParser((ModelArguments, DataArguments, TrainingArguments))
+ model_args, data_args, training_args = parser.parse_args_into_dataclasses()
+
+ if training_args.verbose_logging:
+ rank0_print(f"Inspecting experiment hyperparameters:\n")
+ rank0_print(f"model_args = {vars(model_args)}\n\n")
+ rank0_print(f"data_args = {vars(data_args)}\n\n")
+ rank0_print(f"training_args = {vars(training_args)}\n\n")
+ # rank0_print(f"evaluation_args = {vars(evaluation_args)}\n\n")
+
+ local_rank = training_args.local_rank
+ compute_dtype = torch.float16 if training_args.fp16 else (torch.bfloat16 if training_args.bf16 else torch.float32)
+
+ bnb_model_from_pretrained_args = {}
+ if training_args.bits in [4, 8]:
+ from transformers import BitsAndBytesConfig
+
+ bnb_model_from_pretrained_args.update(
+ dict(
+ device_map={"": training_args.device},
+ load_in_4bit=training_args.bits == 4,
+ load_in_8bit=training_args.bits == 8,
+ quantization_config=BitsAndBytesConfig(
+ load_in_4bit=training_args.bits == 4,
+ load_in_8bit=training_args.bits == 8,
+ llm_int8_threshold=6.0,
+ llm_int8_has_fp16_weight=False,
+ bnb_4bit_compute_dtype=compute_dtype,
+ bnb_4bit_use_double_quant=training_args.double_quant,
+ bnb_4bit_quant_type=training_args.quant_type, # {'fp4', 'nf4'}
+ ),
+ )
+ )
+
+ model, ref_model = get_model(model_args, training_args, bnb_model_from_pretrained_args)
+ model.config.use_cache = False
+
+ if model_args.freeze_backbone:
+ model.model.requires_grad_(False)
+
+ if training_args.bits in [4, 8]:
+ from peft import prepare_model_for_kbit_training
+
+ model.config.torch_dtype = torch.float32 if training_args.fp16 else (torch.bfloat16 if training_args.bf16 else torch.float32)
+ model = prepare_model_for_kbit_training(model, use_gradient_checkpointing=training_args.gradient_checkpointing)
+
+ if training_args.gradient_checkpointing:
+ if hasattr(model, "enable_input_require_grads"):
+ model.enable_input_require_grads()
+ if ref_model is not None:
+ ref_model.enable_input_require_grads()
+ else:
+
+ def make_inputs_require_grad(module, input, output):
+ output.requires_grad_(True)
+
+ model.get_input_embeddings().register_forward_hook(make_inputs_require_grad)
+
+ if ref_model is not None:
+ ref_model.get_input_embeddings().register_forward_hook(make_inputs_require_grad)
+
+ if training_args.lora_enable:
+ from peft import LoraConfig, get_peft_model
+
+ lora_config = LoraConfig(
+ r=training_args.lora_r,
+ lora_alpha=training_args.lora_alpha,
+ target_modules=find_all_linear_names(model),
+ lora_dropout=training_args.lora_dropout,
+ bias=training_args.lora_bias,
+ task_type="CAUSAL_LM",
+ )
+ if training_args.bits == 16:
+ if training_args.bf16:
+ model.to(torch.bfloat16)
+ if training_args.fp16:
+ model.to(torch.float16)
+ rank0_print("Adding LoRA adapters...")
+ model = get_peft_model(model, lora_config)
+
+ if "mpt" in model_args.model_name_or_path:
+ tokenizer = transformers.AutoTokenizer.from_pretrained(model_args.model_name_or_path, cache_dir=training_args.cache_dir, model_max_length=training_args.model_max_length, padding_side="right")
+ elif "mistral" in model_args.model_name_or_path.lower() or "mixtral" in model_args.model_name_or_path.lower() or "zephyr" in model_args.model_name_or_path.lower():
+ tokenizer = transformers.AutoTokenizer.from_pretrained(model_args.model_name_or_path, cache_dir=training_args.cache_dir, model_max_length=training_args.model_max_length, padding_side="left")
+ elif "qwen" in model_args.model_name_or_path.lower():
+ tokenizer = transformers.AutoTokenizer.from_pretrained(model_args.model_name_or_path, cache_dir=training_args.cache_dir, model_max_length=training_args.model_max_length, padding_side="right")
+ else: # for all other models
+ tokenizer = transformers.AutoTokenizer.from_pretrained(
+ model_args.model_name_or_path,
+ cache_dir=training_args.cache_dir,
+ model_max_length=training_args.model_max_length,
+ padding_side="right",
+ use_fast=False,
+ )
+
+ rank0_print(f"Prompt version: {model_args.version}")
+ if model_args.version == "v0":
+ if tokenizer.pad_token is None:
+ smart_tokenizer_and_embedding_resize(
+ special_tokens_dict=dict(pad_token="[PAD]"),
+ tokenizer=tokenizer,
+ model=model,
+ )
+ elif model_args.version == "v0.5":
+ tokenizer.pad_token = tokenizer.unk_token
+ else:
+ if tokenizer.unk_token is not None:
+ tokenizer.pad_token = tokenizer.unk_token
+ if model_args.version in conversation_lib.conv_templates:
+ conversation_lib.default_conversation = conversation_lib.conv_templates[model_args.version]
+ else:
+ conversation_lib.default_conversation = conversation_lib.conv_templates["vicuna_v1"]
+
+ if model_args.vision_tower is not None:
+ model.get_model().initialize_vision_modules(model_args=model_args, fsdp=training_args.fsdp)
+
+ vision_tower = model.get_vision_tower()
+ vision_tower.to(dtype=torch.bfloat16 if training_args.bf16 else torch.float16, device=training_args.device)
+
+ data_args.image_processor = vision_tower.image_processor
+ data_args.is_multimodal = True
+
+ model.config.image_aspect_ratio = data_args.image_aspect_ratio
+ if data_args.image_grid_pinpoints is not None:
+ # for input like "(1x1)...(3x3)", convert to [(1, 1), (2, 1), (3, 1), (1, 2), (2, 2), (3, 2), (1, 3), (2, 3), (3, 3)]
+ if "x" in data_args.image_grid_pinpoints and "..." in data_args.image_grid_pinpoints:
+ vis_encoder_size = data_args.image_processor.size[0]
+ matches = re.findall(r"\((\d+)x(\d+)\)", data_args.image_grid_pinpoints)
+ range_start = tuple(map(int, matches[0]))
+ range_end = tuple(map(int, matches[-1]))
+ grid_pinpoints = [(i, j) for i in range(range_start[0], range_end[0] + 1) for j in range(range_start[1], range_end[1] + 1)]
+ grid_pinpoints = [[dim * vis_encoder_size for dim in pair] for pair in grid_pinpoints]
+ data_args.image_grid_pinpoints = grid_pinpoints
+ elif "x" in data_args.image_grid_pinpoints:
+ vis_encoder_size = data_args.image_processor.size[0]
+ assert vis_encoder_size in [224, 336, 384, 448, 512], "vis_encoder_size should be in [224, 336, 384, 448, 512]"
+ grid_pinpoints = data_args.image_grid_pinpoints.replace(" ", "").replace("x", ",")[1:-1].split("),(")
+ data_args.image_grid_pinpoints = [[int(x) * vis_encoder_size for x in item.split(",")] for item in grid_pinpoints]
+ else:
+ data_args.image_grid_pinpoints = ast.literal_eval(data_args.image_grid_pinpoints) # for backward compatibility
+ model.config.image_grid_pinpoints = data_args.image_grid_pinpoints
+ model.config.image_crop_resolution = data_args.image_crop_resolution
+ model.config.image_split_resolution = data_args.image_split_resolution
+ model.config.tokenizer_padding_side = tokenizer.padding_side
+ model.config.tokenizer_model_max_length = tokenizer.model_max_length
+
+ ### Deciding train which part of the model
+ if model_args.mm_tunable_parts is None: # traditional way of deciding which part to train
+ model.config.tune_mm_mlp_adapter = training_args.tune_mm_mlp_adapter = model_args.tune_mm_mlp_adapter
+ model.config.tune_mm_vision_resampler = training_args.tune_mm_vision_resampler = model_args.tune_mm_vision_resampler
+ if model_args.tune_mm_mlp_adapter or model_args.tune_mm_vision_resampler:
+ model.requires_grad_(False)
+ if model_args.tune_mm_mlp_adapter:
+ for p in model.get_model().mm_projector.parameters():
+ p.requires_grad = True
+ if model_args.tune_mm_vision_resampler:
+ for p in model.get_model().vision_resampler.parameters():
+ p.requires_grad = True
+
+ model.config.freeze_mm_mlp_adapter = training_args.freeze_mm_mlp_adapter
+ if training_args.freeze_mm_mlp_adapter:
+ for p in model.get_model().mm_projector.parameters():
+ p.requires_grad = False
+
+ model.config.freeze_mm_vision_resampler = training_args.freeze_mm_vision_resampler
+ if training_args.freeze_mm_vision_resampler:
+ for p in model.get_model().vision_resampler.parameters():
+ p.requires_grad = False
+
+ model.config.unfreeze_mm_vision_tower = model_args.unfreeze_mm_vision_tower
+ if model_args.unfreeze_mm_vision_tower:
+ vision_tower.requires_grad_(True)
+ else:
+ vision_tower.requires_grad_(False)
+
+ else:
+ rank0_print(f"Using mm_tunable_parts: {model_args.mm_tunable_parts}")
+ model.config.mm_tunable_parts = training_args.mm_tunable_parts = model_args.mm_tunable_parts
+ # Set the entire model to not require gradients by default
+ model.requires_grad_(False)
+ vision_tower.requires_grad_(False)
+ model.get_model().mm_projector.requires_grad_(False)
+ model.get_model().vision_resampler.requires_grad_(False)
+ # Parse the mm_tunable_parts to decide which parts to unfreeze
+ tunable_parts = model_args.mm_tunable_parts.split(",")
+ if "mm_mlp_adapter" in tunable_parts:
+ for p in model.get_model().mm_projector.parameters():
+ p.requires_grad = True
+ if "mm_vision_resampler" in tunable_parts:
+ for p in model.get_model().vision_resampler.parameters():
+ p.requires_grad = True
+ if "mm_vision_tower" in tunable_parts:
+ for name, param in model.named_parameters():
+ if "vision_tower" in name:
+ param.requires_grad_(True)
+ if "mm_language_model" in tunable_parts:
+ for name, param in model.named_parameters():
+ if "vision_tower" not in name and "mm_projector" not in name and "vision_resampler" not in name:
+ param.requires_grad_(True)
+
+ total_params = sum(p.ds_numel if hasattr(p, "ds_numel") else p.numel() for p in model.parameters())
+ trainable_params = sum(p.ds_numel if hasattr(p, "ds_numel") else p.numel() for p in model.parameters() if p.requires_grad)
+ rank0_print(f"Total parameters: ~{total_params/1e6:.2f} MB)")
+ rank0_print(f"Trainable parameters: ~{trainable_params/1e6:.2f} MB)")
+ if training_args.bits in [4, 8]:
+ model.get_model().mm_projector.to(dtype=compute_dtype, device=training_args.device)
+
+ model.config.mm_use_im_start_end = data_args.mm_use_im_start_end = model_args.mm_use_im_start_end
+ model.config.mm_projector_lr = training_args.mm_projector_lr
+ model.config.mm_vision_tower_lr = training_args.mm_vision_tower_lr
+ training_args.use_im_start_end = model_args.mm_use_im_start_end
+ model.config.mm_use_im_patch_token = model_args.mm_use_im_patch_token
+ model.initialize_vision_tokenizer(model_args, tokenizer=tokenizer)
+
+ if ref_model is not None:
+ ref_model.get_model().initialize_vision_modules(model_args=model_args, fsdp=training_args.fsdp)
+ ref_vision_tower = ref_model.get_vision_tower()
+ ref_vision_tower.to(dtype=torch.bfloat16 if training_args.bf16 else torch.float16, device=training_args.device)
+ ref_model.config.image_aspect_ratio = data_args.image_aspect_ratio
+ ref_model.config.image_grid_pinpoints = data_args.image_grid_pinpoints
+ ref_model.config.image_crop_resolution = data_args.image_crop_resolution
+ ref_model.config.image_split_resolution = data_args.image_split_resolution
+ ref_model.config.tokenizer_padding_side = tokenizer.padding_side
+ ref_model.config.tokenizer_model_max_length = tokenizer.model_max_length
+ ref_model.config.mm_use_im_start_end = data_args.mm_use_im_start_end
+ ref_model.config.mm_use_im_patch_token = model_args.mm_use_im_patch_token
+ ref_model.initialize_vision_tokenizer(model_args, tokenizer=tokenizer)
+ parameter_names = [n for n, _ in ref_model.named_parameters()]
+ for param_name in parameter_names:
+ param = ref_model.get_parameter(param_name)
+ param.requires_grad = False
+ ref_model.eval()
+
+ if training_args.bits in [4, 8]:
+ from peft.tuners.lora import LoraLayer
+
+ for name, module in model.named_modules():
+ if isinstance(module, LoraLayer):
+ if training_args.bf16:
+ module = module.to(torch.bfloat16)
+ if "norm" in name:
+ module = module.to(torch.float32)
+ if "lm_head" in name or "embed_tokens" in name:
+ if hasattr(module, "weight"):
+ if training_args.bf16 and module.weight.dtype == torch.float32:
+ module = module.to(torch.bfloat16)
+
+ train_dataset = make_dpo_data_module(tokenizer=tokenizer, data_args=data_args)
+ data_collator = DPODataCollator(
+ tokenizer,
+ label_pad_token_id=IGNORE_INDEX,
+ pad_token_id=tokenizer.pad_token_id,
+ )
+
+ trainer = LLaVADPOTrainer(
+ model,
+ ref_model,
+ args=training_args,
+ dpo_alpha=training_args.dpo_alpha,
+ beta=training_args.beta,
+ gamma=training_args.gamma,
+ train_dataset=train_dataset,
+ eval_dataset=None,
+ data_collator=data_collator,
+ tokenizer=tokenizer,
+ max_length=training_args.model_max_length,
+ generate_during_eval=False, # training_args.generate_during_eval,
+ precompute_ref_log_probs=training_args.precompute_ref_log_probs,
+ )
+
+ if list(pathlib.Path(training_args.output_dir).glob("checkpoint-*")):
+ trainer.train(resume_from_checkpoint=True)
+ else:
+ trainer.train()
+ trainer.save_state()
+
+ model.config.use_cache = True
+
+ if training_args.lora_enable:
+ state_dict = get_peft_state_maybe_zero_3(model.named_parameters(), training_args.lora_bias)
+ non_lora_state_dict = get_peft_state_non_lora_maybe_zero_3(model.named_parameters())
+ if training_args.local_rank == 0 or training_args.local_rank == -1:
+ if hasattr(model, "config"):
+ model.config.save_pretrained(training_args.output_dir)
+ if hasattr(model, "generation_config"):
+ model.generation_config.save_pretrained(training_args.output_dir)
+ model.save_pretrained(training_args.output_dir, state_dict=state_dict)
+ torch.save(non_lora_state_dict, os.path.join(training_args.output_dir, "non_lora_trainables.bin"))
+ else:
+ safe_save_model_for_hf_trainer(trainer=trainer, output_dir=training_args.output_dir)
+
+ rank0_print(f"Model saved to {training_args.output_dir}")
+
+
+if __name__ == "__main__":
+ train()
diff --git a/VLMEvalKit-sudoku/requirements/docs.txt b/VLMEvalKit-sudoku/requirements/docs.txt
new file mode 100644
index 0000000000000000000000000000000000000000..02587e64881bbf53f7e29ee780a75a728cc8ac2f
--- /dev/null
+++ b/VLMEvalKit-sudoku/requirements/docs.txt
@@ -0,0 +1,11 @@
+docutils==0.18.1
+modelindex
+myst-parser
+-e git+https://github.com/open-compass/pytorch_sphinx_theme.git#egg=pytorch_sphinx_theme
+sphinx==6.1.3
+sphinx-copybutton
+sphinx-design
+sphinx-notfound-page
+sphinx-tabs
+sphinxcontrib-jquery
+tabulate
diff --git a/VLMEvalKit-sudoku/vlmeval/api/__pycache__/claude.cpython-310.pyc b/VLMEvalKit-sudoku/vlmeval/api/__pycache__/claude.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..06f7dea13efd19009a8673ee7b39ea478a33a1d6
Binary files /dev/null and b/VLMEvalKit-sudoku/vlmeval/api/__pycache__/claude.cpython-310.pyc differ
diff --git a/VLMEvalKit-sudoku/vlmeval/api/__pycache__/gemini.cpython-310.pyc b/VLMEvalKit-sudoku/vlmeval/api/__pycache__/gemini.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..8b387cecc2574bba9a0395a080425a9e8cc5193f
Binary files /dev/null and b/VLMEvalKit-sudoku/vlmeval/api/__pycache__/gemini.cpython-310.pyc differ
diff --git a/VLMEvalKit-sudoku/vlmeval/api/__pycache__/glm_vision.cpython-310.pyc b/VLMEvalKit-sudoku/vlmeval/api/__pycache__/glm_vision.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b54051fc647a8c6622562f2199a2896856634703
Binary files /dev/null and b/VLMEvalKit-sudoku/vlmeval/api/__pycache__/glm_vision.cpython-310.pyc differ
diff --git a/VLMEvalKit-sudoku/vlmeval/api/__pycache__/hf_chat_model.cpython-310.pyc b/VLMEvalKit-sudoku/vlmeval/api/__pycache__/hf_chat_model.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..592b8937b0682f77a5a4a37a030bf8a04d657f21
Binary files /dev/null and b/VLMEvalKit-sudoku/vlmeval/api/__pycache__/hf_chat_model.cpython-310.pyc differ
diff --git a/VLMEvalKit-sudoku/vlmeval/api/__pycache__/hunyuan.cpython-310.pyc b/VLMEvalKit-sudoku/vlmeval/api/__pycache__/hunyuan.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..68c68eecb5cacec20aa401ca5de30f430c0ac9d5
Binary files /dev/null and b/VLMEvalKit-sudoku/vlmeval/api/__pycache__/hunyuan.cpython-310.pyc differ
diff --git a/VLMEvalKit-sudoku/vlmeval/api/__pycache__/lmdeploy.cpython-310.pyc b/VLMEvalKit-sudoku/vlmeval/api/__pycache__/lmdeploy.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b5bf93f02ab68d2d523240c12cffdf348ce3e9c7
Binary files /dev/null and b/VLMEvalKit-sudoku/vlmeval/api/__pycache__/lmdeploy.cpython-310.pyc differ
diff --git a/VLMEvalKit-sudoku/vlmeval/api/__pycache__/qwen_vl_api.cpython-310.pyc b/VLMEvalKit-sudoku/vlmeval/api/__pycache__/qwen_vl_api.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..afcb12ee5d5def780fd4fc63e75f6b63cad40995
Binary files /dev/null and b/VLMEvalKit-sudoku/vlmeval/api/__pycache__/qwen_vl_api.cpython-310.pyc differ
diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/__pycache__/megabench.cpython-310.pyc b/VLMEvalKit-sudoku/vlmeval/dataset/__pycache__/megabench.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2c6d9f230654eea26f424bb13250bf14d94533d9
Binary files /dev/null and b/VLMEvalKit-sudoku/vlmeval/dataset/__pycache__/megabench.cpython-310.pyc differ
diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/__pycache__/sfebench.cpython-310.pyc b/VLMEvalKit-sudoku/vlmeval/dataset/__pycache__/sfebench.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..744a3e0f69c14074e0a8c2b9adf3522b41eba0b6
Binary files /dev/null and b/VLMEvalKit-sudoku/vlmeval/dataset/__pycache__/sfebench.cpython-310.pyc differ
diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/cmmmu.py b/VLMEvalKit-sudoku/vlmeval/dataset/cmmmu.py
new file mode 100644
index 0000000000000000000000000000000000000000..d96a241e61804ddec4495b5ae45cec0b795424c1
--- /dev/null
+++ b/VLMEvalKit-sudoku/vlmeval/dataset/cmmmu.py
@@ -0,0 +1,354 @@
+from .image_base import ImageBaseDataset
+import random
+from collections import Counter
+import os
+import re
+import tempfile
+from ..smp import *
+from ..smp.file import get_intermediate_file_path
+
+
+def get_multi_choice_prediction(response, all_choices, index2ans):
+ for char in [',', '.', '!', '?', ';', ':', "'"]:
+ response = response.strip(char)
+ response = " " + response + " " # add space to avoid partial match
+
+ candidates = []
+
+ for choice in all_choices: # (A) (B) (C) (D)
+ # Add the choice to candidates each time it appears in the response
+ candidates.extend([choice for _ in range(response.count(f'({choice})'))])
+
+ if len(candidates) == 0:
+ for choice in all_choices: # A B C D
+ # Similarly, add the choice for each occurrence
+ candidates.extend([choice for _ in range(response.count(f'{choice}'))])
+
+ if len(candidates) == 0 and len(response.split()) >= 1:
+ for index, ans in index2ans.items():
+ # Add index for each occurrence of ans in response
+ candidates.extend([index for _ in range(response.count(ans))])
+
+ # if all above doesn't get candidates, check if the content is larger than 5 tokens and try to parse the example
+ if len(candidates) == 0 and len(response.split()) >= 1:
+ for index, ans in index2ans.items():
+ if ans in response:
+ candidates.append(index)
+ # index_ans = False # it's content ans.
+
+ if len(candidates) == 0: # still not get answer, randomly choose one.
+ return random.choice(all_choices)
+ # return ''
+ else:
+ # Count the occurrence of each candidate
+ candidate_counts = Counter(candidates)
+
+ # Select the most frequent candidates
+ max_count = max(candidate_counts.values())
+ most_frequent_candidates = [c for c in all_choices if candidate_counts.get(c, 0) == max_count]
+
+ # Combine the most frequent candidates in ABCD order
+ return ''.join(most_frequent_candidates)
+
+
+def extract_numbers(string):
+ # Pattern for numbers with Chinese commas
+ pattern_commas = r'-?\d{1,3}(?:,\d{3})+'
+ # Pattern for scientific notation
+ pattern_scientific = r'-?\d+(?:\.\d+)?[eE][+-]?\d+'
+ # Pattern for simple numbers without Chinese commas
+ pattern_simple = r'-?(?:\d+\.\d+|\.\d+|\d+)(?![eE][+-]?\d+)(?!,\d)'
+
+ # Extract numbers with Chinese commas
+ numbers_with_commas = re.findall(pattern_commas, string)
+ # Extract numbers in scientific notation
+ numbers_scientific = re.findall(pattern_scientific, string)
+ # Extract simple numbers without Chinese commas
+ numbers_simple = re.findall(pattern_simple, string)
+
+ # Combine all extracted numbers
+ all_numbers = numbers_with_commas + numbers_scientific + numbers_simple
+ return all_numbers
+
+
+def check_is_number(string):
+ try:
+ float(string.replace(',', ''))
+ return True
+ except ValueError:
+ # check if there's comma inside
+ return False
+
+
+def count_letters(string):
+ return sum(c.isalpha() and 'a' <= c <= 'z' or 'A' <= c <= 'Z' for c in string)
+
+
+def normalize_str(string, answer):
+ # check if characters in the string
+
+ # if number, numerize it.
+ if string is None:
+ return [string]
+ string = string.strip()
+
+ is_number = check_is_number(string)
+
+ if is_number:
+ string = string.replace(',', '')
+ string = float(string)
+ # leave 2 decimal
+ string = round(string, 2)
+ return [string]
+ else: # it's likely to be a string
+ if len(string) > len(answer) + 20 or count_letters(string) > count_letters(answer) + 2:
+ return []
+ return [string]
+
+
+def get_fill_blank_prediction(response, answer):
+ """get the prediction from the generated response,
+ return a list of predicted strings or numbers"""
+
+ def get_key_subresponses(response):
+ response = response.strip("。").strip()
+ sub_responses = re.split(r'。|\n', response)
+ indicators_of_keys = ['是', '为', '所以', '等于', '方案', '选择',
+ '正确答案', '因此', '最后', '答案', '结果']
+ key_responses = []
+ for index, resp in enumerate(sub_responses):
+ # if last one, accept it's an equation (the entire response can be just one sentence with equation)
+ if index == len(sub_responses) - 1:
+ indicators_of_keys.extend(['='])
+ shortest_key_response = None
+ # the shortest response that may contain the answer (tail part of the response)
+ for indicator in indicators_of_keys:
+ if indicator in resp:
+ if not shortest_key_response:
+ shortest_key_response = resp.split(indicator)[-1].strip()
+ else:
+ if len(resp.split(indicator)[-1].strip()) < len(shortest_key_response):
+ shortest_key_response = resp.split(indicator)[-1].strip()
+
+ if shortest_key_response:
+ # and it's not trivial
+ if shortest_key_response.strip() not in [":", ",", ".", "!", "?", ";", ":", "'"]:
+ key_responses.append(shortest_key_response)
+ if len(key_responses) == 0: # did not found any
+ return [response]
+ return key_responses
+
+ key_responses = get_key_subresponses(response)
+
+ pred_list = key_responses.copy() # keep the original string response
+ for resp in key_responses:
+ pred_list.extend(extract_numbers(resp))
+
+ tmp_pred_list = []
+ for i in range(len(pred_list)):
+ tmp_pred_list.extend(normalize_str(pred_list[i], answer))
+ pred_list = tmp_pred_list
+
+ # remove duplicates
+ pred_list = list(set(pred_list))
+
+ return pred_list
+
+
+def get_TF_prediction(response):
+ """get the prediction from the generated response,
+ return a list of predicted strings or numbers"""
+
+ def get_key_subresponses(response):
+ response = response.strip("。").strip()
+ sub_responses = re.split(r'。|\n', response)
+ indicators_of_keys = ['是', '为', '所以', '判断',
+ '陈述', '说法', '表达', '答案', '结果']
+ key_responses = []
+ for index, resp in enumerate(sub_responses):
+ shortest_key_response = None
+ # the shortest response that may contain the answer (tail part of the response)
+ for indicator in indicators_of_keys:
+ if indicator in resp:
+ if not shortest_key_response:
+ shortest_key_response = resp.split(indicator)[-1].strip()
+ else:
+ if len(resp.split(indicator)[-1].strip()) < len(shortest_key_response):
+ shortest_key_response = resp.split(indicator)[-1].strip()
+
+ if shortest_key_response:
+ # and it's not trivial
+ if shortest_key_response.strip() not in [":", ",", ".", "!", "?", ";", ":", "'"]:
+ key_responses.append(shortest_key_response)
+ if len(key_responses) == 0: # did not found any
+ return [response]
+ return key_responses
+
+ key_responses = get_key_subresponses(response)
+
+ pred_list = key_responses.copy() # keep the original string response
+ # remove duplicates
+ pred_list = list(set(pred_list))
+
+ return pred_list
+
+
+class CMMMU(ImageBaseDataset):
+ TYPE = 'VQA'
+
+ DATASET_URL = {
+ 'CMMMU_VAL': 'https://opencompass.openxlab.space/utils/VLMEval/CMMMU_VAL.tsv'
+ }
+
+ DATASET_MD5 = {
+ 'CMMMU_VAL': 'b4727e2fce2415bf646379e60c11a726'
+ }
+
+ def dump_image(self, line):
+ os.makedirs(self.img_root, exist_ok=True)
+
+ tgt_path_z = []
+ if isinstance(line['image'], list):
+ for i in range(len(line['image'])):
+ tgt_path = osp.join(self.img_root, f"{line['index']}--{i + 1}.jpg")
+ if not read_ok(tgt_path):
+ decode_base64_to_image_file(line['image'][i], tgt_path)
+ tgt_path_z.append(tgt_path)
+ else:
+ tgt_path = osp.join(self.img_root, f"{line['index']}.jpg")
+ if not read_ok(tgt_path):
+ decode_base64_to_image_file(line['image'], tgt_path)
+ tgt_path_z.append(tgt_path)
+ return tgt_path_z
+
+ @classmethod
+ def evaluate(self, eval_file, **judge_kwargs):
+
+ result_file = get_intermediate_file_path(eval_file, '_acc', 'csv')
+
+ if not osp.exists(result_file):
+ data = load(eval_file)
+ assert 'answer' in data and 'prediction' in data
+ data['prediction'] = [str(x) for x in data['prediction']]
+ data['answer'] = [str(x) for x in data['answer']]
+
+ correct_count = 0
+ correct_category = {
+ '技术与工程': [0, 0],
+ '科学': [0, 0],
+ '健康与医学': [0, 0],
+ '商业': [0, 0],
+ '艺术与设计': [0, 0],
+ '人文社会科学': [0, 0],
+ }
+
+ for i in tqdm(data.iterrows()):
+ line = i[1]
+ correct_category[line['category']][0] += 1
+
+ # Options
+ if line['type'] == '选择':
+ index2ans = {
+ 'A': line['option1'],
+ 'B': line['option2'],
+ 'C': line['option3'],
+ 'D': line['option4']
+ }
+ fact_option = get_multi_choice_prediction(line['prediction'], ['A', 'B', 'C', 'D'], index2ans)
+ if fact_option == line['answer']:
+ correct_count += 1
+ correct_category[line['category']][1] += 1
+
+ # Binary
+ elif line['type'] == '判断':
+ positive_keywords = ['正确', '对', '准确', '肯定', '对的']
+ negative_keywords = ['不对', '错误', '不正确', '不准确', '不合适', '否定', '错的', '错']
+ ambiguous_keywords = ['对错', '是否正确', '否正确', '或者', '是否', '正确性', '对不']
+
+ def judge_similarity(pred_list, positive_keywords, negative_keywords):
+ positive_count = 0
+ negative_count = 0
+
+ for pred in pred_list:
+ if any(pos_word in pred for pos_word in positive_keywords):
+ positive_count += 1
+ elif any(neg_word in pred for neg_word in negative_keywords):
+ negative_count += 1
+
+ if positive_count > negative_count:
+ return "对"
+ elif negative_count > positive_count:
+ return "错"
+ else:
+ return random.choice(['对', '错'])
+
+ answer = get_TF_prediction(line['prediction'])
+ answer = [word for word in answer if not any(ambiguous in word for ambiguous in ambiguous_keywords)]
+ fact_answer = judge_similarity(answer, positive_keywords, negative_keywords)
+ if fact_answer == line['answer']:
+ correct_count += 1
+ correct_category[line['category']][1] += 1
+
+ # Fill the Blank
+ else:
+ norm_answers = normalize_str(line['answer'], line['answer'])
+ predicted_answer = get_fill_blank_prediction(line['prediction'], line['answer'])
+
+ for pred in predicted_answer:
+ # already normalized
+ if isinstance(pred, str): # if it's a string, then find if ans in the pred_i
+ for norm_ans in norm_answers:
+ # only see if the string answer in the string pred
+ # print(norm_ans, pred)
+ if isinstance(norm_ans, str) and norm_ans in pred:
+ correct_count += 1
+ correct_category[line['category']][1] += 1
+ else: # it's a number
+ if pred in norm_answers:
+ correct_count += 1
+ correct_category[line['category']][1] += 1
+
+ accuracyz = {}
+ accuracyz['总准确率'] = correct_count / len(data)
+ for i in correct_category.keys():
+ accuracyz[i] = correct_category[i][1] / correct_category[i][0]
+
+ accuracyz = d2df(accuracyz)
+ accuracyz.round(10)
+ dump(accuracyz, result_file)
+
+ result = pd.read_csv(result_file)
+ return result
+
+ def build_prompt(self, line):
+ if line['type'] == '选择':
+ tgt_path = self.dump_image(line)
+ question = line['question']
+ options_prompt = 'Options:\n'
+
+ for i in [['A', '1'], ['B', '2'], ['C', '3'], ['D', '4']]:
+ options_prompt += i[0] + '. ' + line['option' + i[1]] + '\n'
+
+ prompt = (f'问题: {question}\n' + options_prompt
+ + '请回答上述多项选择题,并选出正确选项。这些题目可能包括单选和多选题型。如果所提供的信息不足以确定一个明确的答案,那么请根据可用的数据和你的判断来选择最可能正确的选项。')
+
+ msgs = []
+ if isinstance(tgt_path, list):
+ msgs.extend([dict(type='image', value=p) for p in tgt_path])
+ else:
+ msgs = [dict(type='image', value=tgt_path)]
+ msgs.append(dict(type='text', value=prompt))
+
+ return msgs
+
+ elif line['type'] == '判断':
+ msgs = super().build_prompt(line)
+ assert msgs[-1]['type'] == 'text'
+ msgs[-1]['value'] += '\n请回答上述判断题,并根据题目描述和所给的信息来判断问题中陈述的对错。如果信息不完整或不足以作出绝对判断,请运用你的逻辑推理和现有信息来做出最可能的判断。'
+ return msgs
+
+ else:
+ msgs = super().build_prompt(line)
+ assert msgs[-1]['type'] == 'text'
+ msgs[-1]['value'] += '\n请回答上述填空题,并根据题目的要求和所提供的信息来给出最恰当的答案。如果信息不足以确切回答,那么请依据现有的数据和你的推理能力来填写最合理的答案。'
+ return msgs
diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/creation.py b/VLMEvalKit-sudoku/vlmeval/dataset/creation.py
new file mode 100644
index 0000000000000000000000000000000000000000..38a5d3d51487ea4d66810dc1d097f949761392ad
--- /dev/null
+++ b/VLMEvalKit-sudoku/vlmeval/dataset/creation.py
@@ -0,0 +1,741 @@
+# flake8: noqa
+from .image_base import ImageBaseDataset
+import numpy as np
+import pandas as pd
+from ..smp import *
+from ..smp.file import get_intermediate_file_path
+from .utils import build_judge, DEBUG_MESSAGE
+from ..utils import track_progress_rich
+import re
+
+prompt_dict = {}
+prompt_dict['LiveMMBench_Creation'] = {
+ # Subjective Judge [GPT-4o reference]
+ 'subjective':"""
+Please act as an impartial judge and evaluate the quality of two responses provided by AI assistants to the user prompt.
+
+Your task is to carefully assess two responses based on provided instructions and evaluation criteria. After evaluating both responses, determine which response features better quality and better meets the criteria. If both responses are similar or nearly identical in quality, you should indicate a tie. Avoid position bias toward the first or second response.
+
+Suggested Steps for Evaluation:
+1. Review both responses independently and then carefully compare their strengths and weaknesses. A good response should feature good language quality, follow the user instruction and meet as many criteria as possible.
+2. After completing the first evaluation, swap the positions of response A and B and repeat Step 1 and get the 2nd evaluation outcome. This helps to mitigate the potential position bias.
+3. After completing both evaluations (in the original and reversed order), combine your analysis and provide a final conclusion based on the overall assessment. If both responses are relatively similar, or the differences are minimal and hard to distinguish, your conclusion should indicate a tie ([[A=B]]).
+
+Your **conclusion** should be one of the following options (A, B are of the original order):
+1. [[A>>B]]: Response A is clearly better than Response B.
+2. [[A>B]]: Response A is slightly better than Response B.
+3. [[A=B]]: Response A is nearly identical to Response B.
+4. [[B>A]]: Response B is slightly better than Response A.
+5. [[B>>A]]: Response B is clearly better than Response A.
+
+User Instruction:\n[INSTRUCTIONS]\n{instructions}\n[END INSTRUCTIONS]\n\n
+Repsonse A:\n[RESPONSE A]\n{reference_answer_by_gpt4o}\n[END RESPONSE A]\n\n
+Response B:\n[RESPONSE B]\n{prediction}\n[END RESPONSE B]\n\n
+Evaluation Criteria:\n[CRITERIA]\n{criteria}\n[END CRITERIA]\n\n
+
+Your output should include:
+1. Conclusion: Your final conclusion based on the overall assessment.
+2. Reasoning: Your reasoning process and analysis of the two responses.
+
+Your output should follow the following format (CONCLUSION should be one of the five options: A>>B, A>B, A=B, B>A, B>>A):
+
+Final Conclusion: [[CONCLUSION]]
+Reasoning Process: [REASONING]\n
+""",
+
+ # Criteria Alignment w/o GT
+ 'objective_without_gt':"""
+Please act as an impartial judge and evaluate the **Criteria Alignment** of the two responses provided by AI assistants to the user prompt. The responses were generated based on the provided instructions and visual input from images.
+
+Suggested Steps for Evaluation:
+1. Evaluate **Criteria Alignment** of both responses based on the criteria.
+ • If a criterion consist of **X aspects**, each aspect is worth **10 / X points**.
+ • For each aspect, there may be multiple sub-criteria. If there are **Y sub-criteria for the aspect**, each sub-criterion worths **10 / (X * Y) points**.
+2. Assign a total score out of 10 for each response.
+
+User Instruction:\n[INSTRUCTIONS]\n{instructions}\n[END INSTRUCTIONS]\n\n
+Repsonse A:\n[RESPONSE A]\n{reference_answer_by_gpt4o}\n[END RESPONSE A]\n\n
+Response B:\n[RESPONSE B]\n{prediction}\n[END RESPONSE B]\n\n
+Criteria:\n[CRITERIA]\n{criteria}\n[END CRITERIA]\n\n
+
+Your output should evaluate alignment scores of each response and end with a conclusion in the following format (The full score is 10. X, Y are alignment scores for Response A and B):
+
+Response A Alignment Score: X/10
+Response B Alignment Score: Y/10\n
+""",
+
+ # Criteria Alignment w. GT
+ 'objective_with_gt':"""
+Please act as an impartial judge and evaluate the **Criteria Alignment** of the two responses provided by AI assistants to the user prompt. The responses were generated based on the provided instructions and visual input from images. There is also a ground truth corresponding to the instructions provided for reference.
+Take this context into account when making your judgment.
+
+Steps for Evaluation:
+1. Evaluate **Criteria Alignment** of both responses based on the criteria and the ground truth.
+ • If a criterion consist of **X aspects**, each aspect is worth **10 / X points**.
+ • For each aspect, there may be multiple sub-criteria. If there are **Y sub-criteria for the aspect**, each sub-criterion worths **10 / (X * Y) points**.
+2. Assign a total score out of 10 for each response.
+
+User Instruction:\n[INSTRUCTIONS]\n{instructions}\n[END INSTRUCTIONS]\n\n
+Ground Truth:\n[GROUND TRUTH]\n{groundtruth}\n[END GROUND TRUTH]\n\n
+Repsonse A:\n[RESPONSE A]\n{reference_answer_by_gpt4o}\n[END RESPONSE A]\n\n
+Response B:\n[RESPONSE B]\n{prediction}\n[END RESPONSE B]\n\n
+Criteria:\n[CRITERIA]\n{criteria}\n[END CRITERIA]\n\n
+
+Your output should evaluate alignment scores of each response and end with a conclusion in the following format (The full score is 10. X, Y are alignment scores for Response A and B):
+
+Response A Alignment Score: X/10
+Response B Alignment Score: Y/10\n
+""",
+}
+
+prompt_dict['Creation_MMBench'] = {
+ # Subjective Judge [GPT-4o reference, with image]
+ 'subjective':"""
+Please act as an impartial judge and evaluate the quality of the responses provided by two AI assistants to the user prompt below, considering both the provided criteria and the image.
+
+Your task is to carefully assess each response based on how well it meets the evaluation criteria, incorporating the visual context from the image. The criteria should be the primary basis for your judgment, with the image serving to complement and inform your analysis.
+
+Steps for Evaluation:
+ 1. Review Both Responses Independently:
+ Carefully analyze Assistant A’s and Assistant B’s responses with the criteria and the image. Do not assume any response is better just because it is listed first. Each response should be independently assessed based on the criteria and aided by images to help understand the context.
+
+ 2. Compare the Strengths and Weaknesses:
+ After evaluating each response independently, compare the two. Consider both the quality of the content and how closely it aligns with the criteria and image. Identify the strengths and weaknesses of each response, and highlight the key differences.
+
+ 3. Ensure Fairness:
+ To avoid positional bias, swap the positions of Assistant A and Assistant B after the first evaluation (i.e., make Assistant A become Assistant B and vice versa) and repeat the analysis and comparison. This ensures that each response is evaluated impartially under the same criteria.
+
+ 4. Provide a Conclusion Based on Both Evaluations:
+ After completing both evaluations (original and swapped positions), combine your analysis to provide a final verdict. If the responses are similar, with only minimal differences, your judgment should reflect that and indicate a tie.
+
+Possible Verdict Options:
+
+• If Assistant A is clearly better in both evaluations: [[A>>B]]
+• If Assistant A is slightly better in both evaluations: [[A>B]]
+• If both responses are nearly identical, showing minimal differences and no clear advantage: [[A=B]]
+• If Assistant B is slightly better in both evaluations: [[B>A]]
+• If Assistant B is clearly better in both evaluations: [[B>>A]]
+
+Instructions to the AI Assistants:
+
+[INSTRUCTIONS]
+{instructions}
+[END INSTRUCTIONS]
+
+Assistant A Response:
+
+[ASSISTANT A]
+{reference_answer_by_gpt4o}
+[END ASSISTANT A]
+
+Evaluation Criteria:
+
+[CRITERIA]
+{criteria}
+[END CRITERIA]
+
+Assistant B Response:
+
+[ASSISTANT B]
+{prediction}
+[END ASSISTANT B]
+
+Output Format:
+
+Your output should include:
+ 1. Evaluation of Assistant A’s Response: Provide a detailed qualitative evaluation, focusing on how well Assistant A’s response aligns with the criteria and the image.
+ 2. Evaluation of Assistant B’s Response: Provide a detailed qualitative evaluation, focusing on how well Assistant B’s response aligns with the criteria and the image.
+ 3. Final Verdict: After considering both evaluations, select one of the following verdicts and justify it based on your analysis:
+
+Your output format should end like this:
+Assistant A Evaluation: [qualitative comment]
+Assistant B Evaluation: [qualitative comment]
+Final Verdict is: [[VERDICT]]
+""",
+
+##### For Visual Factuality
+ 'objective_without_gt':"""
+Please act as an impartial judge and evaluate the **Visual Factuality** of the responses provided by two AI assistants to the user prompt displayed below.
+
+The responses were generated based on the provided instructions and visual input from images. Take this context into account when making your judgment.
+
+Steps for Evaluation:
+1. Evaluate visual factuality for both responses based on the visual factuality criteria.
+ • If the visual factuality criteria consist of **X aspects**, each aspect is worth **10/X points**.
+ • For each aspect, there may be multiple small criteria. If there are **Y small criteria in one aspect**, each small criterion is worth **10/X/Y points**.
+2. Assign a total score out of 10 for each response.
+
+Instructions to the AI assistants:
+[INSTRUCTIONS]
+{instructions}
+[END INSTRUCTIONS]
+
+Assistant A response:
+[ASSISTANT A]
+{reference_answer_by_gpt4o}
+[END ASSISTANT A]
+
+Visual Factuality Criteria:
+[VISUAL FACTUALITY CRITERIA]
+{criteria}
+[END CRITERIA]
+
+Assistant B response:
+[ASSISTANT B]
+{prediction}
+[END ASSISTANT B]
+
+Your output should evaluate visual factuality scores for each assistant and end like this:
+
+Response A Visual Factuality Score: X/10
+Response B Visual Factuality Score: Y/10
+""",
+
+ 'objective_with_gt':"""
+Please act as an impartial judge and evaluate the **Visual Factuality** of the responses provided by two AI assistants to the user prompt displayed below.
+
+The responses were generated based on the provided instructions and visual input from images.
+There is a provided ground truth for the instructions, but the ground truth was not given to the AI assistants when generating their responses.
+Take this context into account when making your judgment.
+
+Steps for Evaluation:
+1. Evaluate visual factuality for both responses based on the provided ground truth and visual factuality criteria.
+ • If the visual factuality criteria consist of **X aspects**, each aspect is worth **10/X points**.
+ • For each aspect, there may be multiple small criteria. If there are **Y small criteria in one aspect**, each small criterion is worth **10/X/Y points**.
+2. Assign a total score out of 10 for each response.
+
+Instructions to the AI assistants:
+[INSTRUCTIONS]
+{instructions}
+[END INSTRUCTIONS]
+
+Assistant A response:
+[ASSISTANT A]
+{reference_answer_by_gpt4o}
+[END ASSISTANT A]
+
+Visual Factuality Criteria:
+[VISUAL FACTUALITY CRITERIA]
+{criteria}
+[END CRITERIA]
+
+Assistant B response:
+[ASSISTANT B]
+{prediction}
+[END ASSISTANT B]
+
+Ground truth:
+[GROUND TRUTH]
+{groundtruth}
+[END GROUND TRUTH]
+
+Your output should evaluate visual factuality scores for each assistant and end like this:
+
+Response A Visual Factuality Score: X/10
+Response B Visual Factuality Score: Y/10
+""",
+}
+
+creation_mmbench_category_dict = {
+ 'CATEGORY_Literary_Writing': [
+ 'story_continue',
+ 'landscape_to_poem',
+ 'historical_story_creation',
+ 'story_novel_creation',
+ 'prose_writing_scenery',
+ 'art_inspired_prose',
+ 'daily_conversation_creation',
+ 'children_book_illustration_dialogue_creation'
+ ],
+ 'CATEGORY_Common_Functionality_Writing':[
+ 'ins_simple_daily_copywriter',
+ 'travel_journal',
+ 'short_video_scripts_for_social_media',
+ 'social_media_travel_content',
+ 'daily_achievement_show_off',
+ 'scientific_research_simple_promotion',
+ 'twitter_comment_on_daily_news',
+ 'personal_event_summaries',
+ 'daily_affairs_inquiries',
+ 'business_collaborative_email_writing',
+ 'daily_emotional_email_writing',
+ 'letter_of_complaint',
+ 'daily_invitation_email_writing',
+ 'holiday_card_writing',
+ 'letter_of_application',
+ 'product_usage_experience_review',
+ 'store_experience_review',
+ 'public_welfare_activity_participation_initiative'
+ ],
+ 'CATEGORY_Professional_Functionality_Writing': [
+ 'museum_guide_word_creation',
+ 'recipe_infer_and_guide',
+ 'landscape_introduction',
+ 'drafting_announcements_for_public_spaces',
+ 'floor_plan_renovation_design',
+ 'teaching_plan',
+ 'nutritional_formulation_of_recipe',
+ 'clothing_match_design',
+ 'software_engineering_diagram_explanation',
+ 'event_planning_and_venue_arrangement',
+ 'ui_design_analysis_and_optimization',
+ 'attraction_promotional_words',
+ 'product_marketing_strategy',
+ 'script_writing_for_product_advertisement_promotional_video',
+ 'residence_reasoning',
+ 'scientific_diagram_understanding',
+ 'pulitzer_prize_judge',
+ 'architecture_appreciation',
+ 'company_team_amuse_broadcast'
+ ],
+ 'CATEGORY_Creative_Multimodal_Understanding': [
+ 'travel_itinerary_planning_and_recommendations',
+ 'photography_appreciation',
+ 'meme_explanation',
+ 'advertisement_explanation',
+ 'document_understanding',
+ 'snapshot_analysis'
+ ]
+
+}
+
+def is_criteria_valid(criteria):
+ import re
+ for value in criteria.values():
+ if value == '\\' or value == '' or not re.search('[a-zA-Z]', value):
+ return False
+ return True
+
+key_mapping = {
+ "sub_parse_ok": "preference_parse_ok",
+ "sub_dist": "preference_dist",
+ "win_rate": "win_rate",
+ "sub_reward": "reward",
+ "obj_parse_ok": "visual_factuality_parse_ok",
+ "obj_score": "visual_factuality_score",
+ "obj_ref_score": "visual_factuality_ref_score"
+}
+
+def rename_keys(data, key_mapping):
+ if isinstance(data, dict):
+ new_data = {}
+ for key, value in data.items():
+ new_key = key_mapping.get(key, key)
+ new_data[new_key] = rename_keys(value, key_mapping)
+ return new_data
+ elif isinstance(data, list):
+ return [rename_keys(item, key_mapping) for item in data]
+ else:
+ return data
+
+
+def build_prompt(line, dataset_name):
+ try:
+ criteria = eval(line['criteria'])
+ except:
+ criteria = line['criteria']
+
+ if isinstance(criteria, dict):
+ new_criteria = {}
+ for k in criteria:
+ if 'subjective' in k.lower():
+ new_criteria['subjective'] = criteria[k]
+ else:
+ new_criteria['objective'] = criteria[k]
+ else:
+ assert isinstance(criteria, str)
+ new_criteria = {'subjective': criteria}
+ criteria = new_criteria
+ assert 'subjective' in criteria, 'No subjective criteria found in the criteria dict'
+
+ prompts = {}
+ if listinstr(['Creation_MMBench'], dataset_name):
+ dataset_name = 'Creation_MMBench'
+ prompts['subjective'] = prompt_dict[dataset_name]['subjective'].format(
+ instructions=line['question'],
+ criteria=criteria['subjective'],
+ reference_answer_by_gpt4o=line['reference_answer_by_gpt4o'],
+ prediction=line['prediction']
+ )
+ if 'objective' in criteria:
+ if 'ground_truth' in line and (not pd.isna(line['ground_truth'])) and line['ground_truth'] != '':
+ prompts['objective'] = prompt_dict[dataset_name]['objective_with_gt'].format(
+ instructions=line['question'],
+ criteria=criteria['objective'],
+ groundtruth=line['ground_truth'],
+ reference_answer_by_gpt4o=line['reference_answer_by_gpt4o'],
+ prediction=line['prediction'])
+ else:
+ prompts['objective'] = prompt_dict[dataset_name]['objective_without_gt'].format(
+ instructions=line['question'],
+ criteria=criteria['objective'],
+ reference_answer_by_gpt4o=line['reference_answer_by_gpt4o'],
+ prediction=line['prediction'])
+ return prompts
+
+
+def Generate_Creation_MMBench_judge(model, image_list, prompt):
+ assert isinstance(prompt, dict)
+ response = {}
+ for key in prompt.keys():
+ if image_list and key == 'subjective':
+ input_msg = []
+ for img_path in image_list:
+ if read_ok(img_path):
+ input_msg.append({'type': 'image', 'value': img_path})
+ else:
+ raise ValueError(f"Image not found: {img_path}")
+ input_msg.append({'type': 'text', 'value': prompt[key]})
+ # print(f'using image {image_list} and text')
+ response[key] = model.generate(input_msg)
+ else:
+ response[key] = model.generate(prompt[key])
+ return response
+
+
+def extract_subjective(inp, dataset_name):
+ mapping_dict = {
+ 'LiveMMBench_Creation': 'FINAL CONCLUSION:',
+ 'Creation_MMBench': 'FINAL VERDICT IS:'
+ }
+ cands = {
+ 'A>>B', 'A>B', 'A=B', 'B>A', 'B>>A',
+ 'B<>A' in text:
+ return 2
+ elif 'AA' in text:
+ return 1
+ elif 'A=B' in text or 'B=A' in text:
+ return 0
+ elif 'A>B' in text or 'B>B' in text or 'B< 0 and not self.is_api:
+ concatenated_images = concat_images(tgt_path, max_concat=self.concat_num, column_num=self.column_num)
+
+ old_tgt_path = tgt_path
+ assert isinstance(old_tgt_path, list)
+ if self.column_num != -1:
+ tgt_path = [
+ '_'.join(old_tgt_path[0].split('_')[:-1]) + '_concat{}_{}.jpg'.format(self.concat_num, i)
+ for i in range(len(concatenated_images))
+ ]
+ else:
+ tgt_path = ['_'.join(old_tgt_path[0].split('_')[:-1]) + '_concat_all.jpg']
+
+ for path, concatenated_image in zip(tgt_path, concatenated_images):
+ if not read_ok(path):
+ decode_base64_to_image_file(encode_image_to_base64(concatenated_image), path)
+ num_images, image_size = len(old_tgt_path), concatenated_image.size
+ print('concat {} images to a new one with size {}. save at {}'.format(num_images, image_size, path))
+ return tgt_path
+
+ @classmethod
+ def evaluate(self, eval_file, **judge_kwargs):
+ logger = get_logger('Evaluation')
+ model = judge_kwargs['model']
+
+ storage = get_intermediate_file_path(eval_file, f'_{model}')
+ tmp_file = get_intermediate_file_path(eval_file, f'_{model}', 'pkl')
+
+ if osp.exists(storage):
+ logger.warning(f'GPT scoring file {storage} already exists, will reuse it in DUDE_eval. ')
+ else:
+ data = load(eval_file)
+ model = build_judge(max_tokens=128, **judge_kwargs)
+ lt = len(data)
+ lines = [data.iloc[i] for i in range(lt)]
+ tups = [(model, line) for line in lines]
+ indices = [line['index'] for line in lines]
+
+ ans = {}
+ if osp.exists(tmp_file):
+ ans = load(tmp_file)
+ tups = [x for x, i in zip(tups, indices) if i not in ans]
+ indices = [i for i in indices if i not in ans]
+
+ if len(indices):
+ new_results = list()
+ for model, line in tqdm(tups):
+ res = MMLongBench_auxeval(model, line)
+ new_results.append(res)
+
+ log_map, res_map, pred_map = {}, {}, {}
+ all_inds = [line['index'] for line in lines]
+ for k, v in zip(all_inds, new_results):
+ log_map[k] = v['log']
+ res_map[k] = v['res']
+ pred_map[k] = v['pred']
+ data['res'] = [res_map[idx] for idx in data['index']]
+ data['log'] = [log_map[idx] for idx in data['index']]
+ data['pred'] = [pred_map[idx] for idx in data['index']]
+ dump(data, storage)
+
+ score = DUDE_acc(storage)
+ score_pth = get_intermediate_file_path(storage, '_score', 'csv')
+
+ dump(score, score_pth)
+ logger.info(f'DUDE successfully finished evaluating {eval_file}, results saved in {score_pth}')
+ logger.info('Score: ')
+ logger.info(score)
diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/image_mcq.py b/VLMEvalKit-sudoku/vlmeval/dataset/image_mcq.py
new file mode 100644
index 0000000000000000000000000000000000000000..3f3e696204c86d73009bd6b06f32610e3da26861
--- /dev/null
+++ b/VLMEvalKit-sudoku/vlmeval/dataset/image_mcq.py
@@ -0,0 +1,2960 @@
+from datetime import date
+import re
+import warnings
+
+from .image_base import ImageBaseDataset
+from .utils import build_judge, DEBUG_MESSAGE
+from ..smp import *
+import pandas as pd
+from tqdm import tqdm
+
+MMMB_URLS = {
+ 'MMMB_ar': 'https://huggingface.co/datasets/AIDC-AI/Parrot-dataset/resolve/main/mmmb/mmmb_ar.tsv',
+ 'MMMB_cn': 'https://huggingface.co/datasets/AIDC-AI/Parrot-dataset/resolve/main/mmmb/mmmb_cn.tsv',
+ 'MMMB_en': 'https://huggingface.co/datasets/AIDC-AI/Parrot-dataset/resolve/main/mmmb/mmmb_en.tsv',
+ 'MMMB_pt': 'https://huggingface.co/datasets/AIDC-AI/Parrot-dataset/resolve/main/mmmb/mmmb_pt.tsv',
+ 'MMMB_ru': 'https://huggingface.co/datasets/AIDC-AI/Parrot-dataset/resolve/main/mmmb/mmmb_ru.tsv',
+ 'MMMB_tr': 'https://huggingface.co/datasets/AIDC-AI/Parrot-dataset/resolve/main/mmmb/mmmb_tr.tsv',
+}
+
+MTL_MMBench_URLS = {
+ 'MMBench_dev_ar': 'https://huggingface.co/datasets/AIDC-AI/Parrot-dataset/resolve/main/mmbench/mmbench_dev_ar.tsv',
+ 'MMBench_dev_cn': 'https://huggingface.co/datasets/AIDC-AI/Parrot-dataset/resolve/main/mmbench/mmbench_dev_cn.tsv',
+ 'MMBench_dev_en': 'https://huggingface.co/datasets/AIDC-AI/Parrot-dataset/resolve/main/mmbench/mmbench_dev_en.tsv',
+ 'MMBench_dev_pt': 'https://huggingface.co/datasets/AIDC-AI/Parrot-dataset/resolve/main/mmbench/mmbench_dev_pt.tsv',
+ 'MMBench_dev_tr': 'https://huggingface.co/datasets/AIDC-AI/Parrot-dataset/resolve/main/mmbench/mmbench_dev_tr.tsv',
+ 'MMBench_dev_ru': 'https://huggingface.co/datasets/AIDC-AI/Parrot-dataset/resolve/main/mmbench/mmbench_dev_ru.tsv',
+}
+
+MMMB_MD5 = {
+ 'MMMB_ar': 'f3a18b6385f1d9701840aa42de27aead', 'MMMB_cn': '13ed82fa89730037292fcaa27f08f430',
+ 'MMMB_en': '1cd781a71ec5a2983c090b84105d6a01', 'MMMB_pt': '548ea2b3bb2da991790386f0015d30d1',
+ 'MMMB_ru': 'ce1cc8a0533425ab0d86b326ebfc2984', 'MMMB_tr': '0733739d43090327975294292bc5cd67'
+}
+
+MTL_MMBench_MD5 = {
+ 'MMBench_dev_ar': '4271b4a0d0200e1a86380a878e0d64a4', 'MMBench_dev_cn': '2ed5135326fed02c8e51ea50dda8222f',
+ 'MMBench_dev_en': 'd9ab776fc018b3d45785e9a5c23431c2', 'MMBench_dev_pt': '4ddfbcd27ef12444b908c03831cd0295',
+ 'MMBench_dev_tr': '4fab39d501389d3d6cc90264bb708f11', 'MMBench_dev_ru': '5ba1171ff2e68f80637bf78349e402a5'
+}
+
+
+class ImageMCQDataset(ImageBaseDataset):
+
+ TYPE = 'MCQ'
+
+ DATASET_URL = {
+ # MMBench v1.0
+ 'MMBench_DEV_EN': 'https://opencompass.openxlab.space/utils/benchmarks/MMBench/MMBench_DEV_EN.tsv',
+ 'MMBench_TEST_EN': 'https://opencompass.openxlab.space/utils/benchmarks/MMBench/MMBench_TEST_EN.tsv',
+ 'MMBench_DEV_CN': 'https://opencompass.openxlab.space/utils/benchmarks/MMBench/MMBench_DEV_CN.tsv',
+ 'MMBench_TEST_CN': 'https://opencompass.openxlab.space/utils/benchmarks/MMBench/MMBench_TEST_CN.tsv',
+ 'MMBench_DEV_KO': 'https://huggingface.co/datasets/NCSOFT/K-MMBench/resolve/main/MMBench_DEV_KO.tsv',
+ 'MMBench': 'https://opencompass.openxlab.space/utils/benchmarks/MMBench/MMBench.tsv', # Internal
+ 'MMBench_CN': 'https://opencompass.openxlab.space/utils/benchmarks/MMBench/MMBench_CN.tsv', # Internal
+ # MMBench v1.1
+ 'MMBench_DEV_EN_V11': 'https://opencompass.openxlab.space/utils/benchmarks/MMBench/MMBench_DEV_EN_V11.tsv',
+ 'MMBench_TEST_EN_V11': 'https://opencompass.openxlab.space/utils/benchmarks/MMBench/MMBench_TEST_EN_V11.tsv',
+ 'MMBench_DEV_CN_V11': 'https://opencompass.openxlab.space/utils/benchmarks/MMBench/MMBench_DEV_CN_V11.tsv',
+ 'MMBench_TEST_CN_V11': 'https://opencompass.openxlab.space/utils/benchmarks/MMBench/MMBench_TEST_CN_V11.tsv',
+ 'MMBench_V11': 'https://opencompass.openxlab.space/utils/benchmarks/MMBench/MMBench_V11.tsv', # Internal
+ 'MMBench_CN_V11': 'https://opencompass.openxlab.space/utils/benchmarks/MMBench/MMBench_CN_V11.tsv', # Internal
+ # SEEDBench Series
+ 'SEEDBench_IMG': 'https://opencompass.openxlab.space/utils/benchmarks/SEEDBench/SEEDBench_IMG.tsv',
+ 'SEEDBench_IMG_KO': 'https://huggingface.co/datasets/NCSOFT/K-SEED/resolve/main/SEEDBench_IMG_KO.tsv',
+ 'SEEDBench2': 'https://huggingface.co/datasets/VLMEval/SEEDBench2/resolve/main/SEEDBench2.tsv',
+ 'SEEDBench2_Plus': 'https://opencompass.openxlab.space/utils/benchmarks/SEEDBench/SEEDBench2_Plus.tsv',
+ # ScienceQA Series
+ 'ScienceQA_VAL': 'https://opencompass.openxlab.space/utils/benchmarks/ScienceQA/ScienceQA_VAL.tsv',
+ 'ScienceQA_TEST': 'https://opencompass.openxlab.space/utils/benchmarks/ScienceQA/ScienceQA_TEST.tsv',
+ # MMT-Bench
+ 'MMT-Bench_ALL_MI': 'https://opencompass.openxlab.space/utils/benchmarks/MMT-Bench/MMT-Bench_ALL_MI.tsv',
+ 'MMT-Bench_ALL': 'https://opencompass.openxlab.space/utils/benchmarks/MMT-Bench/MMT-Bench_ALL.tsv',
+ 'MMT-Bench_VAL_MI': 'https://opencompass.openxlab.space/utils/benchmarks/MMT-Bench/MMT-Bench_VAL_MI.tsv',
+ 'MMT-Bench_VAL': 'https://opencompass.openxlab.space/utils/benchmarks/MMT-Bench/MMT-Bench_VAL.tsv',
+ # AesBench
+ 'AesBench_VAL': 'https://huggingface.co/datasets/VLMEval/AesBench/resolve/main/AesBench_VAL.tsv',
+ 'AesBench_TEST': 'https://huggingface.co/datasets/VLMEval/AesBench/resolve/main/AesBench_TEST.tsv',
+ # Q-Bench1
+ 'Q-Bench1_VAL': 'https://huggingface.co/datasets/zhangzicheng/qbench_tsv/resolve/main/Q-Bench1_VAL.tsv',
+ 'Q-Bench1_TEST': 'https://huggingface.co/datasets/zhangzicheng/qbench_tsv/resolve/main/Q-Bench1_TEST.tsv',
+ # A-Bench
+ 'A-Bench_VAL': 'https://huggingface.co/datasets/zhangzicheng/abench_tsv/resolve/main/A-bench_VAL.tsv',
+ 'A-Bench_TEST': 'https://huggingface.co/datasets/zhangzicheng/abench_tsv/resolve/main/A-bench_TEST.tsv',
+ # R-Bench
+ 'R-Bench-Dis': 'https://huggingface.co/datasets/lcysyzxdxc/R-Bench/resolve/main/R-bench-dis.tsv',
+ 'R-Bench-Ref': 'https://huggingface.co/datasets/lcysyzxdxc/R-Bench/resolve/main/R-bench-ref.tsv',
+ # Other Benchmarks
+ 'CCBench': 'https://opencompass.openxlab.space/utils/VLMEval/CCBench.tsv',
+ 'AI2D_TEST': 'https://opencompass.openxlab.space/utils/VLMEval/AI2D_TEST.tsv',
+ 'AI2D_TEST_NO_MASK': 'https://opencompass.openxlab.space/utils/VLMEval/AI2D_TEST_NO_MASK.tsv',
+ 'MMStar': 'https://opencompass.openxlab.space/utils/VLMEval/MMStar.tsv',
+ 'MMStar_KO': 'https://huggingface.co/datasets/NCSOFT/K-MMStar/resolve/main/MMStar_KO.tsv',
+ 'RealWorldQA': 'https://opencompass.openxlab.space/utils/VLMEval/RealWorldQA.tsv',
+ 'MLLMGuard_DS': 'https://opencompass.openxlab.space/utils/VLMEval/MLLMGuard_DS.tsv',
+ 'BLINK': 'https://opencompass.openxlab.space/utils/VLMEval/BLINK.tsv',
+ 'BLINK_circular': 'https://opencompass.openxlab.space/utils/VLMEval/BLINK_circular.tsv',
+ 'TaskMeAnything_v1_imageqa_random': (
+ 'https://huggingface.co/datasets/weikaih/TaskMeAnything-v1-imageqa-random/'
+ 'resolve/main/TaskMeAnything-v1-imageqa-random.tsv'
+ ),
+ 'A-OKVQA': 'https://huggingface.co/datasets/Allen8/A-OKVQA/resolve/main/a-okvqa.tsv',
+ 'WorldMedQA-V': 'https://opencompass.openxlab.space/utils/VLMEval/WorldMedQA-V.tsv',
+ 'VisOnlyQA-VLMEvalKit': (
+ 'https://huggingface.co/datasets/ryokamoi/VisOnlyQA_Eval_Real/'
+ 'resolve/main/visonlyqa_vlmevalkit.tsv'
+ ),
+ 'MMCR': 'http://opencompass.openxlab.space/utils/VLMEval/MMCR.tsv',
+ 'MMSci_DEV_MCQ': 'https://opencompass.openxlab.space/utils/VLMEval/MMSci_DEV_MCQ.tsv',
+ "MMVP": "http://opencompass.openxlab.space/utils/VLMEval/MMVP.tsv",
+ "K-DTCBench": "https://huggingface.co/datasets/NCSOFT/K-DTCBench/resolve/main/K-DTCBench.tsv",
+ # For Internal Use Only
+ 'MMBench_V11_MINI': 'https://opencompass.openxlab.space/utils/TEST/MMBench_V11_MINI.tsv',
+ 'MMStar_MINI': 'https://opencompass.openxlab.space/utils/TEST/MMStar_MINI.tsv',
+ 'AI2D_MINI': 'https://opencompass.openxlab.space/utils/TEST/AI2D_MINI.tsv',
+ "VStarBench": "https://huggingface.co/datasets/xjtupanda/VStar_Bench/resolve/main/VStarBench.tsv",
+ 'PathMMU_VAL': 'https://huggingface.co/datasets/Pfei111/PathMMU/resolve/main/PathMMU_VAL.tsv',
+ 'PathMMU_TEST': 'https://huggingface.co/datasets/Pfei111/PathMMU/resolve/main/PathMMU_TEST.tsv',
+ 'CMMU_MCQ': 'https://huggingface.co/datasets/Pfei111/CMMU_VAL_MCQ/resolve/main/CMMU_VAL_MCQ.tsv',
+ 'MicroVQA': 'https://opencompass.openxlab.space/utils/VLMEval/MicroVQA.tsv',
+ 'MMSIBench_circular': 'https://opencompass.openxlab.space/utils/VLMEval/MMSIBench_circular.tsv',
+ 'SpatialEval': 'https://opencompass.openxlab.space/utils/VLMEval/SpatialEval.tsv',
+ "StaticEmbodiedBench": "https://huggingface.co/datasets/xiaojiahao/StaticEmbodiedBench/resolve/main/StaticEmbodiedBench.tsv", # noqa
+ "StaticEmbodiedBench_circular": "https://huggingface.co/datasets/xiaojiahao/StaticEmbodiedBench/resolve/main/StaticEmbodiedBench_circular.tsv" # noqa
+ }
+
+ DATASET_MD5 = {
+ # MMBench v1.0
+ 'MMBench_DEV_EN': 'b6caf1133a01c6bb705cf753bb527ed8',
+ 'MMBench_TEST_EN': '6939fadb0ce626fefc0bdc9c64efc528',
+ 'MMBench_DEV_CN': '08b8fc3324a5ed74155350f57be69fbd',
+ 'MMBench_TEST_CN': '7e1239baf0ee4c8b513e19705a0f317e',
+ 'MMBench_DEV_KO': '72e1cde9124b5015be6d0dd5c9b5500d',
+ 'MMBench': '4115aea3383f3dd0083be6a633e0f820', # Internal Only
+ 'MMBench_CN': '2e053ffc90ea598b1feae13c36dc13ee', # Internal Only
+ # MMBench v1.1
+ 'MMBench_DEV_EN_V11': '30c05be8f2f347a50be25aa067248184',
+ 'MMBench_TEST_EN_V11': '26f0f15381a21720255091d3e0316ce6',
+ 'MMBench_DEV_CN_V11': '593f9b5f6bea453d870a798b34ae4f37',
+ 'MMBench_TEST_CN_V11': '74bbe4556dac745613c7cbe5ad787050',
+ 'MMBench_V11': 'b9276414f57af1308dcc4d0cd9b42e7c', # Internal Only
+ 'MMBench_CN_V11': '95f6980dd1b4de38e3cbffe0305a3f25', # Internal Only
+ # SEEDBench
+ 'SEEDBench_IMG': '68017231464752261a2526d6ca3a10c0',
+ 'SEEDBench_IMG_KO': 'b354a9ac3493f3ccf294e69b216bfab3',
+ 'SEEDBench2': '4ec15cf864c4f16274112284f531813e',
+ 'SEEDBench2_Plus': 'e32d3216dc4f452b0fe497a52015d1fd',
+ # ScienceQA
+ 'ScienceQA_VAL': '96320d05e142e585e7204e72affd29f3',
+ 'ScienceQA_TEST': 'e42e9e00f9c59a80d8a5db35bc32b71f',
+ # MMT-Bench
+ 'MMT-Bench_ALL_MI': '5272157097e19cdd7cb41e412ab3b7c7',
+ 'MMT-Bench_ALL': 'b273a2f4c596fe4f2605de0494cd632f',
+ 'MMT-Bench_VAL_MI': 'c7d7b998eb5cd9aa36c7d4f721472462',
+ 'MMT-Bench_VAL': '8dd4b730f53dbf9c3aed90ca31c928e0',
+ # AesBench
+ 'AesBench_VAL': '3edb0c319e9187aa0b97fe7a11700a8c',
+ 'AesBench_TEST': '58b1f7ba2cc32e1d68896d6ee716bbf8',
+ # Q-Bench1
+ 'Q-Bench1_VAL': '837bdb6cd2da571713543462815187b7',
+ 'Q-Bench1_TEST': '15e759bfd58c9d5f30b23a317d347153',
+ # A-Bench
+ 'A-Bench_VAL': '218563ec50d34bb336c814143a5bb9c1',
+ 'A-Bench_TEST': '567013fb033a20cf23f51d8e865bd16c',
+ # R-Bench
+ 'R-Bench-Dis': 'd6e961dbfc43350688af2560226830b4',
+ 'R-Bench-Ref': '270c1cb555acb523f3fdb178ed57021d',
+ # Other Benchmarks
+ 'CCBench': 'f5dde47f24dc5a6fb6e595b409b466ac',
+ 'AI2D_TEST': '0f593e0d1c7df9a3d69bf1f947e71975',
+ 'AI2D_TEST_NO_MASK': 'fd8f463634d4fe9fbd23b876e8eea5be',
+ 'MMStar': 'e1ecd2140806c1b1bbf54b43372efb9e',
+ 'MMStar_KO': 'cc6049c7314bb54b9ac5e247a2bfb357',
+ 'RealWorldQA': '4de008f55dc4fd008ca9e15321dc44b7',
+ 'MLLMGuard_DS': '975fc0dd7119386e198c37d71e274b3f',
+ 'BLINK': '3b6649b6a662184ea046908e5506260e',
+ 'BLINK_circular': '75aee2332420c7654dc51b1442fafc7b',
+ 'TaskMeAnything_v1_imageqa_random': '023fef69e2ca21827afb77c5ec3bc889',
+ 'WorldMedQA-V': '441e63875e30c87f5750528b57b41285',
+ "VisOnlyQA-VLMEvalKit": 'cf460a31d2acb8d3a7cecd0e69298bfa',
+ 'MMCR': '9052635f2c3835bdb87755ef73564f5e',
+ 'MMSci_DEV_MCQ': '865144aa866e29b251bdc7d63a735b6b',
+ "MMVP": "8cb732b141a0cba5b42159df2839e557",
+ "K-DTCBench": "fe72a85b010513d3840b5f3be2de6ed3",
+ "VStarBench": "b18854d7075574be06b631cd5f7d2d6a",
+ 'MicroVQA': 'd7506438701a2076ec277f8bb3586c1a',
+ 'MMSIBench_circular': '7be2b9e8a280863272e89fab5ba40807',
+ 'SpatialEval': '4c8eb33142b26be2916fb9164287b72b',
+ "StaticEmbodiedBench": "5c50611650ca966970180a80d49429f0",
+ "StaticEmbodiedBench_circular": "034cf398a3c7d848d966e1081e4baf68"
+ }
+
+ DATASET_URL.update(MMMB_URLS)
+ DATASET_URL.update(MTL_MMBench_URLS)
+ DATASET_MD5.update(MMMB_MD5)
+ DATASET_MD5.update(MTL_MMBench_MD5)
+
+ def build_prompt(self, line):
+
+ if isinstance(line, int):
+ line = self.data.iloc[line]
+
+ if self.meta_only:
+ tgt_path = toliststr(line['image_path'])
+ else:
+ tgt_path = self.dump_image(line)
+
+ question = line['question']
+ options = {
+ cand: line[cand]
+ for cand in string.ascii_uppercase
+ if cand in line and not pd.isna(line[cand])
+ }
+ options_prompt = 'Options:\n'
+ for key, item in options.items():
+ options_prompt += f'{key}. {item}\n'
+ hint = line['hint'] if ('hint' in line and not pd.isna(line['hint'])) else None
+ prompt = ''
+ if hint is not None:
+ prompt += f'Hint: {hint}\n'
+ prompt += f'Question: {question}\n'
+ if len(options):
+ prompt += options_prompt
+ prompt += 'Please select the correct answer from the options above. \n'
+
+ msgs = []
+ if isinstance(tgt_path, list):
+ msgs.extend([dict(type='image', value=p) for p in tgt_path])
+ else:
+ msgs = [dict(type='image', value=tgt_path)]
+ msgs.append(dict(type='text', value=prompt))
+
+ return msgs
+
+ def evaluate(self, eval_file, **judge_kwargs):
+ if judge_kwargs.get('use_verifier', False):
+ return self.evaluate_verifier(eval_file, **judge_kwargs)
+ else:
+ return self.evaluate_heuristic(eval_file, **judge_kwargs)
+
+ def evaluate_heuristic(self, eval_file, **judge_kwargs):
+ from .utils.multiple_choice import (
+ report_acc, report_acc_MMT, report_acc_MMSci, mcq_circular_eval, mcq_vanilla_eval
+ )
+ # assert dataset is not None
+ dataset_map = {
+ 'MMBench_TEST_EN': 'MMBench', 'MMBench_TEST_EN_V11': 'MMBench_V11',
+ 'MMBench_TEST_CN': 'MMBench_CN', 'MMBench_TEST_CN_V11': 'MMBench_CN_V11'
+ }
+ dataset = self.dataset_name
+ if dataset in dataset_map:
+ dataset = dataset_map[dataset]
+ nproc = judge_kwargs.pop('nproc', 4)
+
+ circular = False
+ if listinstr(['mmbench', 'ccbench', 'circular', 'mmcr'], dataset.lower()):
+ data = load(eval_file)
+ data['index'] = [int(x) for x in data['index']]
+ dump(data, eval_file)
+ circular = True
+
+ model = judge_kwargs.get('model', 'exact_matching')
+ assert model in ['chatgpt-0125', 'exact_matching', 'gpt-4-0125']
+ name_str_map = {'chatgpt-0125': 'openai', 'gpt-4-0125': 'gpt4'}
+ name_str = name_str_map[model] if model in name_str_map else model
+
+ if model == 'exact_matching':
+ model = None
+ elif gpt_key_set():
+ model = build_judge(**judge_kwargs)
+ if not model.working():
+ warnings.warn('OPENAI API is not working properly, will use exact matching for evaluation')
+ warnings.warn(DEBUG_MESSAGE)
+ model = None
+ else:
+ warnings.warn('OPENAI_API_KEY is not set properly, will use exact matching for evaluation')
+ model = None
+
+ result_file = get_intermediate_file_path(eval_file, f'_{name_str}_result', 'pkl')
+
+ data = load(eval_file)
+ data = data.sort_values(by='index')
+ data['prediction'] = [str(x) for x in data['prediction']]
+ # If not choice label, then use lower case
+ for k in data.keys():
+ data[k.lower() if k not in list(string.ascii_uppercase) else k] = data.pop(k)
+
+ meta = self.data
+ meta_q_map = {x: y for x, y in zip(meta['index'], meta['question'])}
+ data_map = {x: y for x, y in zip(data['index'], data['question'])}
+ for k in data_map:
+ assert k in meta_q_map, (
+ f'eval_file should be the same as or a subset of dataset {self.dataset_name}'
+ )
+
+ if circular:
+ data = mcq_circular_eval(model, data, meta, nproc, result_file, self.dataset_name)
+ else:
+ data = mcq_vanilla_eval(model, data, meta, nproc, result_file, self.dataset_name)
+
+ # load split
+ eval_record = get_intermediate_file_path(eval_file, f'_{name_str}_result')
+ dump(data, eval_record)
+ data = load(eval_record)
+
+ # May have different report acc functions for different datasets
+ if 'MMT' in dataset:
+ acc = report_acc_MMT(data)
+ elif 'MMSci' in dataset:
+ acc = report_acc_MMSci(data)
+ else:
+ acc = report_acc(data)
+
+ score_file = get_intermediate_file_path(eval_file, '_acc', 'csv')
+ dump(acc, score_file)
+
+ # The piece of code is for internal use, to check vanilla acc (circ0 & all) for circular datasets
+ if circular and os.environ.get('PRINT_VANILLA', None) == '1':
+ acc_map = {}
+ acc_map['circular'] = acc
+ # Vanilla Circ0 Acc
+ data = load(eval_file)
+ data['index'] = [int(x) for x in data['index']]
+ if 'g_index' in data:
+ data['g_index'] = [int(x) for x in data['g_index']]
+ circ0 = data[data['g_index'] == data['index']]
+ else:
+ offset = 1e6
+ circ0 = data[data['index'] <= offset]
+ result_file = get_intermediate_file_path(eval_file, f'_{name_str}_vanilla_result', 'pkl')
+ data0 = mcq_vanilla_eval(model, circ0, meta, nproc, result_file, self.dataset_name)
+ dump(data0, get_intermediate_file_path(eval_file, f'_{name_str}_vanilla_circ0_result'))
+ data = load(get_intermediate_file_path(eval_file, f'_{name_str}_vanilla_circ0_result'))
+ acc_map['vanilla_0'] = report_acc(data)
+ # Vanilla ALL Acc
+ data = load(eval_file)
+ dataall = mcq_vanilla_eval(model, data, meta, nproc, result_file, self.dataset_name)
+ dump(dataall, get_intermediate_file_path(eval_file, f'_{name_str}_vanilla_all_result'))
+ data = load(get_intermediate_file_path(eval_file, f'_{name_str}_vanilla_all_result'))
+ acc_map['vanilla_all'] = report_acc(data)
+ # Merge & Print the Evaluation Results
+ for k, v in acc_map.items():
+ if 'split' not in v:
+ v['split'] = [None] * len(v)
+ if len(v) == 1 and pd.isna(v['split'][0]):
+ v['split'] = [k]
+ else:
+ assert not pd.isna(v['split'][0])
+ v['split'] = [k + '_' + sp for sp in v['split']]
+ score_all = [acc_map['vanilla_0'], acc_map['vanilla_all'], acc_map['circular']]
+ score_all = pd.concat(score_all)
+ print(score_all)
+ score_file = get_intermediate_file_path(eval_file, '_acc_all', 'csv')
+ dump(score_all, score_file)
+
+ if dataset == 'AesBench_VAL':
+ warnings.warn('Note that AesBench VAL is just a toy version of AesBench TEST. For full results, \
+ please evaluate on AesBench TEST. The AesBench TEST dataset is more than 20 times \
+ larger than the VAL dataset and the leaderboard results are based on AesBench TEST.')
+ if dataset == 'VisOnlyQA-VLMEvalKit':
+ warnings.warn('Note that the results on VisOnlyQA-VLMEvalKit are different from the results on \
+ the original VisOnlyQA. VisOnlyQA-VLMEvalKit does not include the \
+ chemistry__shape_multi split and uses a different evaluation prompt. Please \
+ explicitly specify the version of the dataset when you report results.')
+
+ return acc
+
+ def evaluate_verifier(self, eval_file, **judge_kwargs):
+ # assert dataset is not None
+ dataset_map = {
+ 'MMBench_TEST_EN': 'MMBench', 'MMBench_TEST_EN_V11': 'MMBench_V11',
+ 'MMBench_TEST_CN': 'MMBench_CN', 'MMBench_TEST_CN_V11': 'MMBench_CN_V11'
+ }
+ dataset = self.dataset_name
+ if dataset in dataset_map:
+ dataset = dataset_map[dataset]
+
+ circular = False
+ if listinstr(['mmbench', 'ccbench', 'circular', 'mmcr'], dataset.lower()):
+ circular = True
+
+ if circular:
+ raise ValueError("circular is not supported for verifier evaluation")
+
+ data = load(eval_file)
+ data = data.sort_values(by='index')
+ data['prediction'] = [str(x) for x in data['prediction']]
+ # If not choice label, then use lower case
+ for k in data.keys():
+ data[k.lower() if k not in list(string.ascii_uppercase) else k] = data.pop(k)
+
+ # Add verifier evaluation for specific datasets
+ from .utils.verifier import Verifier
+ verifier = Verifier(use_vllm=judge_kwargs.get('use_vllm', False))
+ verifier_scores = []
+ verifier_matches = []
+ for idx, row in tqdm(data.iterrows(), total=len(data), desc="Verifier Evaluation Progress"):
+ question_text = row['question']
+ if 'A' in row and not pd.isna(row['A']):
+ options = []
+ for option_key in ['A', 'B', 'C', 'D', 'E']:
+ if option_key in row and not pd.isna(row[option_key]):
+ options.append(f"{option_key}. {row[option_key]}")
+ if options:
+ question_text += "\nOptions:\n" + "\n".join(options)
+
+ correct_option = str(row['answer']).strip().upper()
+ if correct_option in row and not pd.isna(row[correct_option]):
+ answer_text = f"{correct_option}. {row[correct_option]}"
+ else:
+ answer_text = correct_option
+
+ score = verifier.evaluate(question_text, row['prediction'], answer_text)
+ verifier_scores.append(score)
+ verifier_matches.append(1.0 if score else 0.0)
+
+ data['verifier_score'] = verifier_scores
+ data['verifier_match'] = verifier_matches
+
+ detailed_result_file = get_intermediate_file_path(eval_file, '_detailed_results')
+ dump(data, detailed_result_file)
+
+ def report_acc_verifier(result_file):
+ from collections import defaultdict
+
+ data = load(result_file)
+ tot = defaultdict(lambda: 0)
+ hit = defaultdict(lambda: 0)
+ lt = len(data)
+
+ for i in range(lt):
+ item = data.iloc[i]
+ split_name = item.get('split', 'Overall')
+ if pd.isna(split_name):
+ split_name = 'Overall'
+
+ tot['Overall'] += 1
+ tot[split_name] += 1
+
+ if 'category' in item and not pd.isna(item['category']):
+ category = item['category']
+ tot[category] += 1
+
+ if item['verifier_score'] is True:
+ hit['Overall'] += 1
+ hit[split_name] += 1
+
+ if 'category' in item and not pd.isna(item['category']):
+ hit[category] += 1
+
+ res = defaultdict(list)
+ for k in tot.keys():
+ if k == 'Overall':
+ res['Category'].append('Overall')
+ else:
+ res['Category'].append(k)
+ res['Total'].append(tot[k])
+ res['Hit'].append(hit[k])
+ res['Accuracy'].append(hit[k] / tot[k] * 100 if tot[k] > 0 else 0.0)
+
+ res_df = pd.DataFrame(res)
+ return res_df
+ acc = report_acc_verifier(detailed_result_file)
+ score_file = get_intermediate_file_path(eval_file, '_acc', 'csv')
+ dump(acc, score_file)
+ return acc
+
+
+class MedXpertQA_MM_test(ImageMCQDataset):
+
+ DATASET_URL = {
+ 'MedXpertQA_MM_test': 'https://opencompass.openxlab.space/utils/VLMEval/MedXpertQA_MM_test.tsv',
+ }
+
+ DATASET_MD5 = {
+ 'MedXpertQA_MM_test': '73c12d28ebdfca97c5fd3c3be3fe357b',
+ }
+
+
+class MMMUDataset(ImageMCQDataset):
+
+ DATASET_URL = {
+ 'MMMU_DEV_VAL': 'https://opencompass.openxlab.space/utils/VLMEval/MMMU_DEV_VAL.tsv',
+ 'MMMU_TEST': 'https://opencompass.openxlab.space/utils/VLMEval/MMMU_TEST.tsv',
+ }
+
+ DATASET_MD5 = {
+ 'MMMU_DEV_VAL': '585e8ad75e73f75dcad265dfd0417d64',
+ 'MMMU_TEST': 'c19875d11a2d348d07e5eb4bdf33166d',
+ }
+
+ @staticmethod
+ def split_MMMU(msgs):
+ text, images = None, []
+ for s in msgs:
+ if s['type'] == 'image':
+ images.append(s['value'])
+ elif s['type'] == 'text':
+ assert text is None
+ text = s['value']
+ text_segs = text.split(''
+ image_idx = int(seg[0]) - 1
+ segs.append(dict(type='image', value=images[image_idx]))
+ segs.append(dict(type='text', value=seg[2:]))
+ return segs
+
+ def build_prompt(self, line):
+ msgs = super().build_prompt(line)
+ msgs = self.split_MMMU(msgs)
+ return msgs
+
+
+class MMMUProDataset(MMMUDataset):
+
+ TYPE = 'MCQ_MMMU_Pro'
+
+ def __init__(self, **kwargs):
+ super().__init__(**kwargs)
+ if 'MMMU_Pro_V' in self.dataset_name:
+ self.data['question'] = ['placeholder'] * len(self.data)
+
+ DATASET_URL = {
+ 'MMMU_Pro_10c': 'https://opencompass.openxlab.space/utils/VLMEval/MMMU_Pro_10c.tsv',
+ 'MMMU_Pro_10c_COT': 'https://opencompass.openxlab.space/utils/VLMEval/MMMU_Pro_10c.tsv',
+ 'MMMU_Pro_V': 'https://opencompass.openxlab.space/utils/VLMEval/MMMU_Pro_V.tsv',
+ 'MMMU_Pro_V_COT': 'https://opencompass.openxlab.space/utils/VLMEval/MMMU_Pro_V.tsv',
+ }
+
+ DATASET_MD5 = {
+ 'MMMU_Pro_10c': '22cee868fe6b680d14b99bfff6db8172',
+ 'MMMU_Pro_10c_COT': '22cee868fe6b680d14b99bfff6db8172',
+ 'MMMU_Pro_V': 'd01441a87b3dbe721b5a04652ae38009',
+ 'MMMU_Pro_V_COT': 'd01441a87b3dbe721b5a04652ae38009',
+ }
+
+ def build_prompt(self, line):
+ if isinstance(line, int):
+ line = self.data.iloc[line]
+
+ if self.meta_only:
+ tgt_path = toliststr(line['image_path'])
+ else:
+ tgt_path = self.dump_image(line)
+
+ if 'MMMU_Pro_V' in self.dataset_name:
+ question = 'Answer the following multiple-choice question in the image. '
+ if 'COT' in self.dataset_name:
+ question += (
+ "The last line of your response should be of the following format: 'Answer: $LETTER' "
+ "(without quotes) where LETTER is one of the options. Think step by step before answering. "
+ )
+ else:
+ question += "Answer directly with the option letter from the given choices. "
+ if isinstance(tgt_path, list):
+ assert len(tgt_path) == 1
+ tgt_path = tgt_path[0]
+ return [dict(type='image', value=tgt_path), dict(type='text', value=question)]
+ else:
+ question = line['question']
+ options = {
+ cand: line[cand]
+ for cand in string.ascii_uppercase
+ if cand in line and not pd.isna(line[cand])
+ }
+ options_prompt = 'Options:\n'
+ for key, item in options.items():
+ options_prompt += f'{key}. {item}\n'
+ prompt = ''
+ prompt += f'Question: {question}\n'
+ if len(options):
+ prompt += options_prompt
+ if 'COT' in self.dataset_name:
+ prompt += (
+ "Answer the following multiple-choice question. The last line of your response should be of "
+ "the following format: 'Answer: $LETTER' (without quotes) where LETTER is one of the options. "
+ "Think step by step before answering. "
+ )
+ else:
+ prompt += "Answer directly with the option letter from the given choices. "
+
+ msgs = []
+ if isinstance(tgt_path, list):
+ msgs.extend([dict(type='image', value=p) for p in tgt_path])
+ else:
+ msgs = [dict(type='image', value=tgt_path)]
+ msgs.append(dict(type='text', value=prompt))
+ msgs = self.split_MMMU(msgs)
+ return msgs
+
+ def cot_postproc(self, response):
+ lines = response.strip().split('\n')
+ lines = [x.strip() for x in lines]
+ cands = [x for x in lines if x.startswith('Answer:')]
+ if len(cands) == 1:
+ counter = defaultdict(lambda: 0)
+ for ch in cands[0]:
+ if ch in string.ascii_uppercase:
+ counter[ch] += 1
+ if len(counter) == 1:
+ return list(counter.keys())[0]
+ else:
+ return cands[0][7:]
+ return response
+
+ def evaluate(self, eval_file, **judge_kwargs):
+ if 'COT' in self.dataset_name:
+ data = load(eval_file)
+ data['prediction'] = [self.cot_postproc(x) for x in data['prediction']]
+ tgt = get_intermediate_file_path(eval_file, '_cotpost')
+ dump(data, tgt)
+ res = super().evaluate(tgt, **judge_kwargs)
+ acc_org = get_intermediate_file_path(eval_file, '_acc', 'csv')
+ acc_now = get_intermediate_file_path(eval_file, '_cotpost_acc', 'csv')
+ shutil.copy(acc_now, acc_org)
+ return res
+ else:
+ return super().evaluate(eval_file, **judge_kwargs)
+
+
+class MUIRDataset(ImageMCQDataset):
+
+ DATASET_URL = {
+ 'MUIRBench': 'http://opencompass.openxxlab.com/utils/VLMEval/MUIRBench.tsv'
+ }
+
+ DATASET_MD5 = {
+ 'MUIRBench': '2e5e6fd7699761b08a7cb3ab8c0c2ec8'
+ }
+
+ @staticmethod
+ def split_MUIR(msgs):
+ text, images = None, []
+
+ # Separate images and text from msgs
+ for s in msgs:
+ if s['type'] == 'image':
+ images.append(s['value'])
+ elif s['type'] == 'text':
+ assert text is None # Ensure only one text entry is expected
+ text = s['value']
+
+ # Split text by tags
+ text_segs = text.split('')
+
+ # Initialize the segments list
+ segs = []
+
+ # Iterate through the text segments and images
+ for i, seg in enumerate(text_segs):
+ # Append the image if this is not the first segment and there are still images left
+ if i > 0 and i - 1 < len(images):
+ segs.append(dict(type='image', value=images[i - 1]))
+ # Append the text segment (if it's non-empty)
+ if len(seg) > 0:
+ segs.append(dict(type='text', value=seg))
+
+ return segs
+
+ def build_prompt(self, line):
+
+ if isinstance(line, int):
+ line = self.data.iloc[line]
+
+ if self.meta_only:
+ tgt_path = toliststr(line['image_path'])
+ else:
+ tgt_path = self.dump_image(line)
+
+ question = line['question']
+ options = {
+ cand: line[cand]
+ for cand in string.ascii_uppercase
+ if cand in line and not pd.isna(line[cand])
+ }
+ # options_prompt = ''
+ options_prompt = '\n'.join([f'{key}. {item}' for key, item in options.items()])
+ # for key, item in options.items():
+ # options_prompt += f'{key}. {item}\n'
+
+ prompt = ''
+
+ prompt += f'{question}\n'
+ if len(options):
+ prompt += options_prompt
+ prompt += "\nAnswer with the option's letter from the given choices directly."
+
+ msgs = []
+ if isinstance(tgt_path, list):
+ msgs.extend([dict(type='image', value=p) for p in tgt_path])
+ else:
+ msgs = [dict(type='image', value=tgt_path)]
+ msgs.append(dict(type='text', value=prompt))
+
+ msgs = self.split_MUIR(msgs)
+ return msgs
+
+
+class GMAIMMBenchDataset(ImageMCQDataset):
+
+ DATASET_URL = {
+ 'GMAI-MMBench_VAL': 'https://huggingface.co/datasets/VLMEval/GMAI-MMBench/resolve/main/GMAI-MMBench_VAL.tsv',
+ 'GMAI_mm_bench_TEST_part_1': 'https://huggingface.co/datasets/OpenGVLab/GMAI-MMBench/resolve/main/GMAI_mm_bench_TEST_part_1.tsv', # noqa: E501
+ 'GMAI_mm_bench_TEST_part_2': 'https://huggingface.co/datasets/OpenGVLab/GMAI-MMBench/resolve/main/GMAI_mm_bench_TEST_part_2.tsv', # noqa: E501
+ 'GMAI_mm_bench_TEST_part_3': 'https://huggingface.co/datasets/OpenGVLab/GMAI-MMBench/resolve/main/GMAI_mm_bench_TEST_part_3.tsv', # noqa: E501
+ 'GMAI_mm_bench_TEST_part_4': 'https://huggingface.co/datasets/OpenGVLab/GMAI-MMBench/resolve/main/GMAI_mm_bench_TEST_part_4.tsv', # noqa: E501
+ 'GMAI_mm_bench_TEST_part_5': 'https://huggingface.co/datasets/OpenGVLab/GMAI-MMBench/resolve/main/GMAI_mm_bench_TEST_part_5.tsv', # noqa: E501
+ 'GMAI_mm_bench_TEST_part_6': 'https://huggingface.co/datasets/OpenGVLab/GMAI-MMBench/resolve/main/GMAI_mm_bench_TEST_part_6.tsv', # noqa: E501
+ 'GMAI_mm_bench_TEST_part_7': 'https://huggingface.co/datasets/OpenGVLab/GMAI-MMBench/resolve/main/GMAI_mm_bench_TEST_part_7.tsv', # noqa: E501
+ 'GMAI_mm_bench_TEST_part_8': 'https://huggingface.co/datasets/OpenGVLab/GMAI-MMBench/resolve/main/GMAI_mm_bench_TEST_part_8.tsv', # noqa: E501
+ 'GMAI_mm_bench_TEST_part_9': 'https://huggingface.co/datasets/OpenGVLab/GMAI-MMBench/resolve/main/GMAI_mm_bench_TEST_part_9.tsv', # noqa: E501
+ 'GMAI_mm_bench_TEST_part_10': 'https://huggingface.co/datasets/OpenGVLab/GMAI-MMBench/resolve/main/GMAI_mm_bench_TEST_part_10.tsv', # noqa: E501
+ 'GMAI_mm_bench_TEST_part_11': 'https://huggingface.co/datasets/OpenGVLab/GMAI-MMBench/resolve/main/GMAI_mm_bench_TEST_part_11.tsv', # noqa: E501
+ }
+
+ DATASET_MD5 = {
+ 'GMAI-MMBench_VAL': '254bd581627866f1c499d3d6b4422324',
+ 'GMAI_mm_bench_TEST_part_1': '900d735231230a63f4ed45665c078ef4',
+ 'GMAI_mm_bench_TEST_part_2': '1b27ab621386945d7e4a765ad2d22b0e',
+ 'GMAI_mm_bench_TEST_part_3': '44bdc2b6267dd505d529b8cad06f0fb2',
+ 'GMAI_mm_bench_TEST_part_4': '5a04a04fcac9f1466709f242fdb80acb',
+ 'GMAI_mm_bench_TEST_part_5': 'c70baf8909eda9af0ddeab275c721336',
+ 'GMAI_mm_bench_TEST_part_6': '825abc39596b644dead9350d0cfa3b96',
+ 'GMAI_mm_bench_TEST_part_7': 'defb8aed2fb77365a76b6b9abd6a2701',
+ 'GMAI_mm_bench_TEST_part_8': 'ff490d60b85f2bb0abb67a435b298c65',
+ 'GMAI_mm_bench_TEST_part_9': 'ff67c86f40da93b09139ac1d1ba5dc6b',
+ 'GMAI_mm_bench_TEST_part_10': '3dae94627b9ac0fe00180d4780fbf6dc',
+ 'GMAI_mm_bench_TEST_part_11': 'd08dc813f0eb6bbab63cae2a9d113c4b',
+ }
+
+ @classmethod
+ def supported_datasets(cls):
+ return ['GMAI-MMBench_VAL', 'GMAI-MMBench_TEST']
+
+ def load_data(self, dataset):
+ if dataset == 'GMAI-MMBench_VAL':
+ data_path = osp.join(LMUDataRoot(), f'{dataset}.tsv')
+ if file_size(data_path, 'GB') > 1:
+ local_path = data_path.replace('.tsv', '_local.tsv')
+ if not osp.exists(local_path) or os.environ.get('FORCE_LOCAL'):
+ from ..tools import LOCALIZE
+ LOCALIZE(data_path, local_path)
+ data_path = local_path
+ return load(data_path)
+ elif dataset == 'GMAI-MMBench_TEST':
+ dfs = []
+ for part_num in range(1, 12):
+ part_name = f'GMAI_mm_bench_TEST_part_{part_num}'
+ url = self.DATASET_URL[part_name]
+ file_md5 = self.DATASET_MD5.get(part_name)
+ tsv_path = osp.join(LMUDataRoot(), f'{part_name}.tsv')
+ if not osp.exists(tsv_path) or (file_md5 and md5(tsv_path) != file_md5):
+ download_file(url, filename=tsv_path)
+ local_path = tsv_path.replace('.tsv', '_local.tsv')
+ if not osp.exists(local_path) or os.environ.get('FORCE_LOCAL'):
+ from ..tools import LOCALIZE
+ LOCALIZE(tsv_path, local_path)
+ tsv_path = local_path
+ # 加载数据
+ df = load(tsv_path)
+ dfs.append(df)
+ # 合并所有数据
+ data = pd.concat(dfs, ignore_index=True)
+ return data
+ else:
+ raise ValueError(f"未知的数据集:{dataset}")
+
+ def report_acc_by_groups(self, df, group_column):
+ res = defaultdict(list)
+
+ # Check for the 'split' column
+ if 'split' in df:
+ splits = list(set(df['split']))
+ res['split'] = splits
+ else:
+ df['split'] = ['none'] * len(df)
+ res['split'] = ['none']
+
+ res['Overall'] = [np.mean(df[df['split'] == sp]['hit']) for sp in res['split']]
+
+ if group_column not in df:
+ raise ValueError(f"Column '{group_column}' not found in dataframe.") # noqa: E713
+
+ abilities = list(set(df[group_column]))
+ abilities = ['None' if isinstance(ab, float) and pd.isna(ab) else ab for ab in abilities]
+ abilities.sort()
+
+ for ab in abilities:
+ ab_name = ab
+ sub_df = df[df[group_column] == ab]
+ res[ab_name] = [np.mean(sub_df[sub_df['split'] == sp]['hit']) for sp in res['split']]
+
+ return pd.DataFrame(res)
+
+ def evaluate(self, eval_file, **judge_kwargs):
+ from .utils.multiple_choice import report_acc, mcq_vanilla_eval
+ nproc = judge_kwargs.pop('nproc', 4)
+
+ suffix = eval_file.split('.')[-1]
+ model = judge_kwargs.get('model', 'exact_matching')
+ assert model in ['chatgpt-0125', 'exact_matching', 'gpt-4-0125']
+ name_str_map = {'chatgpt-0125': 'openai', 'gpt-4-0125': 'gpt4'}
+ name_str = name_str_map[model] if model in name_str_map else model
+
+ if model == 'exact_matching':
+ model = None
+ elif gpt_key_set():
+ model = build_judge(**judge_kwargs)
+ if not model.working():
+ warnings.warn('OPENAI API is not working properly, will use exact matching for evaluation')
+ warnings.warn(DEBUG_MESSAGE)
+ model = None
+ else:
+ warnings.warn('OPENAI_API_KEY is not set properly, will use exact matching for evaluation')
+ model = None
+
+ result_file = eval_file.replace(f'.{suffix}', f'_{name_str}_result.pkl')
+
+ data = load(eval_file)
+ data = data.sort_values(by='index')
+ data['prediction'] = [str(x) for x in data['prediction']]
+ # If not choice label, then use lower case
+ for k in data.keys():
+ data[k.lower() if k not in list(string.ascii_uppercase) else k] = data.pop(k)
+
+ meta = self.data
+ meta_q_map = {x: y for x, y in zip(meta['index'], meta['question'])}
+ data_map = {x: y for x, y in zip(data['index'], data['question'])}
+ for k in data_map:
+ assert k in meta_q_map, (
+ f'eval_file should be the same as or a subset of dataset {self.dataset_name}'
+ )
+
+ data = mcq_vanilla_eval(model, data, meta, nproc, result_file, self.dataset_name)
+
+ # load split
+ dump(data, eval_file.replace(f'.{suffix}', f'_{name_str}_result.{suffix}'))
+ data = load(eval_file.replace(f'.{suffix}', f'_{name_str}_result.{suffix}'))
+
+ acc = report_acc(data)
+
+ for group_col in ['clinical vqa task', 'department', 'perceptual granularity']:
+ acc_grouped = self.report_acc_by_groups(data, group_col)
+ score_file_grouped = eval_file.replace(f'.{suffix}', f'_{group_col}_acc.csv')
+ dump(acc_grouped, score_file_grouped)
+
+ return acc
+
+
+class MMERealWorld(ImageMCQDataset):
+
+ TYPE = 'MMERealWorld'
+
+ DATASET_MD5 = {
+ 'MME-RealWorld': '271c33ec814c39533c467ec6fb8a6f36',
+ 'MME-RealWorld-Lite': '4c17057d7d3b6c4a0d4397c3dae0881c',
+ 'MME-RealWorld-CN': 'daaa763d52a760a38606d5dedb3fe444',
+ }
+ SYS = {
+ 'MME-RealWorld': (
+ 'Select the best answer to the above multiple-choice question based on the image. '
+ 'Respond with only the letter (A, B, C, D, or E) of the correct option. \n'
+ 'The best answer is:'
+ ),
+ 'MME-RealWorld-Lite': (
+ 'Select the best answer to the above multiple-choice question based on the image. '
+ 'Respond with only the letter (A, B, C, D, or E) of the correct option. \n'
+ 'The best answer is:'
+ ),
+ 'MME-RealWorld-CN': (
+ '根据图像选择上述多项选择题的最佳答案。只需回答正确选项的字母(A, B, C, D 或 E)。\n'
+ '最佳答案为:'
+ ),
+ }
+
+ @classmethod
+ def supported_datasets(cls):
+ return ['MME-RealWorld', 'MME-RealWorld-CN', 'MME-RealWorld-Lite',]
+
+ def load_data(
+ self, dataset="MME-RealWorld", repo_id="yifanzhang114/MME-RealWorld-Base64"
+ ):
+
+ def check_integrity(pth):
+ data_file = osp.join(pth, f"{dataset}.tsv")
+
+ if not os.path.exists(data_file):
+ return False
+
+ if md5(data_file) != self.DATASET_MD5[dataset]:
+ return False
+ return True
+
+ def generate_tsv(pth):
+ tsv_file = os.path.join(pth, f"{dataset}.tsv")
+
+ if os.path.exists(tsv_file):
+ print(f"{tsv_file} already exists.")
+ return
+
+ json_dir = os.path.join(pth, dataset)
+ json_files = [f for f in os.listdir(json_dir) if f.endswith(".json")]
+
+ data_list = []
+ for json_file in json_files:
+ with open(os.path.join(json_dir, json_file), "r") as f:
+ data = json.load(f)
+ for item in tqdm(data):
+ choice_prompt = (
+ "The choices are listed below:\n"
+ if dataset in ["MME-RealWorld", "MME-RealWorld-Lite"]
+ else "选项如下所示:\n"
+ )
+ data_list.append(
+ {
+ "index": item["index"],
+ "image": item["image"],
+ "question": item["question"],
+ "multi-choice options": choice_prompt
+ + "\n".join(item["multi-choice options"]),
+ "A": item["multi-choice options"][0][4:],
+ "B": item["multi-choice options"][1][4:],
+ "C": item["multi-choice options"][2][4:],
+ "D": item["multi-choice options"][3][4:],
+ "E": item["multi-choice options"][4][4:],
+ "answer": item["answer"],
+ "category": item["category"],
+ "l2-category": item["l2-category"],
+ }
+ )
+ df = pd.DataFrame(data_list)
+ df.to_csv(tsv_file, sep="\t", index=False)
+ print(f"TSV file saved to {tsv_file}")
+
+ # Check if dataset is cached and has integrity
+ if dataset == "MME-RealWorld-Lite":
+ url = 'https://huggingface.co/datasets/yifanzhang114/MME-RealWorld-Base64/resolve/main/mme_realworld_lite.tsv' # noqa: E501
+ file_md5 = (
+ self.DATASET_MD5[dataset] if dataset in self.DATASET_MD5 else None
+ )
+ datas = self.prepare_tsv(url, file_md5)
+ choice_prompt = "The choices are listed below:\n"
+ for index, item in datas.iterrows():
+ options = eval(item["multi-choice options"])
+ datas.loc[index, "multi-choice options"] = choice_prompt + "\n".join(
+ options
+ )
+ datas.loc[index, "A"] = options[0][4:]
+ datas.loc[index, "B"] = options[1][4:]
+ datas.loc[index, "C"] = options[2][4:]
+ datas.loc[index, "D"] = options[3][4:]
+ datas.loc[index, "E"] = options[4][4:]
+ return datas
+
+ update_flag = False
+ cache_path = get_cache_path(repo_id)
+ if cache_path is not None and check_integrity(cache_path):
+ dataset_path = cache_path
+ print(f"Using cached dataset from {cache_path}")
+ else:
+ from huggingface_hub import snapshot_download
+
+ # Download or find the dataset path
+ dataset_path = snapshot_download(repo_id=repo_id, repo_type="dataset")
+ generate_tsv(dataset_path)
+ update_flag = True
+
+ data_path = os.path.join(dataset_path, f"{dataset}.tsv")
+ if file_size(data_path, "GB") > 1:
+ local_path = data_path.replace(".tsv", "_local.tsv")
+ if (
+ not osp.exists(local_path)
+ or os.environ.get("FORCE_LOCAL", None)
+ or update_flag
+ ):
+ from vlmeval.tools import LOCALIZE
+
+ LOCALIZE(data_path, local_path)
+ data_path = local_path
+ return load(data_path)
+
+ def post_build(self, dataset):
+ self.TYPE = 'MMERealWorld'
+
+ # Given one data record, return the built prompt (a multi-modal message), can override
+ def build_prompt(self, line):
+ if isinstance(line, int):
+ line = self.data.iloc[line]
+
+ if self.meta_only:
+ tgt_path = toliststr(line['image_path'])
+ else:
+ tgt_path = self.dump_image(line)
+
+ question = line['question']
+
+ choice_prompt = line['multi-choice options'] + '\n'
+ question += ' ' + choice_prompt + self.SYS[self.dataset_name]
+
+ msgs = []
+ if isinstance(tgt_path, list):
+ msgs.extend([dict(type='image', value=p) for p in tgt_path])
+ else:
+ msgs = [dict(type='image', value=tgt_path)]
+ msgs.append(dict(type='text', value=question))
+ return msgs
+
+ # It returns a dictionary
+ @classmethod
+ def evaluate(self, eval_file, **judge_kwargs):
+ from .utils.multiple_choice import extract_characters_regex, get_dimension_rating
+ assert get_file_extension(eval_file) in ['xlsx', 'json', 'tsv'], 'data file should be an supported format (xlsx/json/tsv) file' # noqa: E501
+ FAIL_MSG = 'Failed to obtain answer via API.'
+ tmp_file = get_intermediate_file_path(eval_file, '_tmp', 'pkl')
+ tgt_file = get_intermediate_file_path(eval_file, '_rating', 'json')
+ score_file = get_intermediate_file_path(eval_file, '_score')
+
+ if not osp.exists(score_file):
+
+ res = {} if not osp.exists(tmp_file) else load(tmp_file)
+ res = {k: v for k, v in res.items() if FAIL_MSG not in v}
+
+ data = load(eval_file)
+ cnt_rejected = 0
+ data_un = data[~pd.isna(data['prediction'])]
+
+ for idx in data['index']:
+ ans = data.loc[data['index'] == idx, 'answer'].values[0]
+ pred = data.loc[data['index'] == idx, 'prediction'].values[0]
+
+ match_cot = re.search(r"(.*?)", pred, re.DOTALL)
+ cot = match_cot.group(1).strip() if match_cot else pred
+
+ target_instances = ast.literal_eval(data.loc[data['index'] == idx, 'target_instances'].values[0])
+ iou = self.evaluate_box_iou(cot, target_instances)
+
+ data.loc[data['index'] == idx, 'iou'] = iou
+
+ match_pred = re.search(r"(.*?)", pred, re.DOTALL)
+ pred = match_pred.group(1).strip().upper() if match_pred else pred
+
+ extract_pred = extract_characters_regex(pred)
+ if extract_pred == '':
+ cnt_rejected += 1
+ data.loc[data['index'] == idx, 'score'] = 0
+ else:
+ data.loc[data['index'] == idx, 'score'] = int(extract_pred == ans)
+
+ print(
+ f'Among {len(data)} questions, failed to obtain prediction for {len(data) - len(data_un)} questions, '
+ f'failed to obtain the score for another {cnt_rejected} questions. '
+ f'Those questions will be counted as 0 score in ALL rating.'
+ )
+
+ dump(data, score_file)
+
+ rating = get_dimension_rating(score_file)
+ dump(rating, tgt_file)
+ return rating
+
+ def evaluate_box_iou(predict_str: str, target_instances: list) -> float:
+ pattern = r"(.*?)"
+ matches = re.findall(pattern, predict_str, re.DOTALL)
+
+ all_boxes = []
+
+ for match in matches:
+ box = match.strip()
+
+ coord_pattern = r'\[(\d+),(\d+),(\d+),(\d+)\]'
+ coord_match = re.match(coord_pattern, box)
+
+ if coord_match:
+ x1, y1, x2, y2 = map(int, coord_match.groups())
+
+ if x1 < x2 and y1 < y2:
+ # all_boxes.append([(x1 + x2) / 2, (y1 + y2) / 2, x2 - x1, y2 - y1])
+ all_boxes.append([x1, y1, x2, y2])
+
+ if len(all_boxes) == 0:
+ return 0
+
+ target_boxes = target_instances
+ if len(target_boxes) == 0:
+ return len(all_boxes) > 0
+
+ def calculate_average_iou(pred_boxes, target_boxes):
+ """
+ 计算每个目标框与预测框中 IoU 最大的预测框之间的平均 IoU。
+
+ 参数:
+ pred_boxes (List[List[float]]): 预测框列表,每个框为 [cx, cy, w, h]
+ target_boxes (List[List[float]]): 目标框列表,每个框为 [cx, cy, w, h]
+
+ 返回:
+ float: 匹配上的平均 IoU
+ """
+ def compute_iou(box1, box2):
+ """计算两个框之间的 IoU"""
+ x1_min, y1_min, x1_max, y1_max = box1
+ x2_min, y2_min, x2_max, y2_max = box2
+
+ inter_x_min = max(x1_min, x2_min)
+ inter_y_min = max(y1_min, y2_min)
+ inter_x_max = min(x1_max, x2_max)
+ inter_y_max = min(y1_max, y2_max)
+
+ inter_width = max(0, inter_x_max - inter_x_min)
+ inter_height = max(0, inter_y_max - inter_y_min)
+ inter_area = inter_width * inter_height
+
+ area1 = (x1_max - x1_min) * (y1_max - y1_min)
+ area2 = (x2_max - x2_min) * (y2_max - y2_min)
+
+ union_area = area1 + area2 - inter_area
+
+ return inter_area / union_area if union_area > 0 else 0.0
+
+ pred_coords = pred_boxes
+ target_coords = target_boxes
+
+ total_iou = 0.0
+ num_targets = len(target_boxes)
+
+ if num_targets == 0:
+ return 0.0
+
+ # 为每个目标框找到最大 IoU 的预测框
+ for t_coord in target_coords:
+ best_iou = 0.0
+ for p_coord in pred_coords:
+ iou = compute_iou(t_coord, p_coord)
+ if iou > best_iou:
+ best_iou = iou
+ total_iou += best_iou
+
+ return total_iou / num_targets
+
+ return calculate_average_iou(all_boxes, target_boxes)
+
+
+class CVBench(ImageMCQDataset):
+ """CV-Bench, composed of two sub datasets:
+ CV-Bench-2D: 2D computer vision tasks
+ CV-Bench-3D: 3D computer vision tasks
+
+ Reference:
+ - https://cambrian-mllm.github.io/
+ - https://huggingface.co/datasets/nyu-visionx/CV-Bench
+
+ Evaluation strategy:
+ See [Cambrian-1](https://arxiv.org/pdf/2406.16860) Appendix C
+ """
+ DATASET_URL = {
+ "CV-Bench-2D": "http://opencompass.openxlab.space/utils/VLMEval/CV-Bench-2D.tsv",
+ "CV-Bench-3D": "http://opencompass.openxlab.space/utils/VLMEval/CV-Bench-3D.tsv",
+ }
+
+ DATASET_MD5 = {
+ "CV-Bench-2D": "a7cff4cc2857cc237ee2b89e62bccb2d",
+ "CV-Bench-3D": "bb94c0d568d652d15b60e001ac40a170",
+ }
+
+ def build_prompt(self, line):
+ if isinstance(line, int):
+ line = self.data.iloc[line]
+
+ if self.meta_only:
+ tgt_path = toliststr(line["image_path"])
+ else:
+ tgt_path = self.dump_image(line)
+
+ msgs = []
+ if isinstance(tgt_path, list):
+ msgs.extend([dict(type="image", value=p) for p in tgt_path])
+ else:
+ msgs = [dict(type="image", value=tgt_path)]
+ # use the prompt provided by the dataset
+ msgs.append(dict(type="text", value=line["prompt"]))
+ return msgs
+
+ def evaluate(self, eval_file, **judge_kwargs):
+ from .utils.multiple_choice import mcq_vanilla_eval, report_acc
+
+ nproc = judge_kwargs.pop("nproc", 4)
+
+ model_name = judge_kwargs.get("model", "extract_matching")
+
+ if model_name == "exact_matching":
+ model = None
+ elif gpt_key_set():
+ model = build_judge(**judge_kwargs)
+ if not model.working():
+ warnings.warn(DEBUG_MESSAGE)
+ model = None
+ else:
+ warnings.warn(
+ "OPENAI_API_KEY is not set properly, will use exact matching for evaluation"
+ )
+ model = None
+
+ result_file = get_intermediate_file_path(eval_file, f"_{model_name}_result", "pkl")
+
+ data = load(eval_file)
+ data = data.sort_values(by="index")
+ data["prediction"] = [str(x) for x in data["prediction"]]
+ # If not choice label, then use lower case
+ for k in data.keys():
+ data[k.lower() if k not in list(string.ascii_uppercase) else k] = data.pop(
+ k
+ )
+
+ meta = self.data
+ meta_q_map = {x: y for x, y in zip(meta["index"], meta["question"])}
+ data_map = {x: y for x, y in zip(data["index"], data["question"])}
+ for k in data_map:
+ assert (
+ k in meta_q_map
+ ), f"eval_file should be the same as or a subset of dataset {self.dataset_name}"
+
+ score_file = get_intermediate_file_path(eval_file, "_acc", "csv")
+
+ if osp.exists(score_file):
+ acc = load(score_file)
+ return acc
+ data = mcq_vanilla_eval(
+ model, data, meta, nproc, result_file, self.dataset_name
+ )
+ dump(data, get_intermediate_file_path(eval_file, f"_{model_name}_result"))
+ data = load(get_intermediate_file_path(eval_file, f"_{model_name}_result"))
+
+ if all(data["split"] == "2D"): # 2D
+ acc = self.report_accuracy(data)
+ else: # 3D, use default evaluation strategy
+ acc = report_acc(data)
+
+ dump(acc, score_file)
+
+ return acc
+
+ def report_accuracy(self, data):
+ # CV-Bench-2D evaluation strategy
+ # first calculate the accuracy for each source
+ # then calculate the overall accuracy by averaging across all sources
+ res = defaultdict(list)
+
+ splits = list(set(data["split"]))
+ res["split"] = splits
+
+ sources = set(data["source"])
+ for source in sources:
+ sub_df = data[data["source"] == source]
+ res[source] = [
+ np.mean(sub_df[sub_df["split"] == sp]["hit"]) for sp in res["split"]
+ ]
+ res = pd.DataFrame(res)
+ res["Overall"] = 0
+ for source in sources:
+ res["Overall"] += res[source]
+ res["Overall"] = res["Overall"] / len(sources)
+ return res
+
+
+class HRBenchDataset(ImageMCQDataset):
+
+ DATASET_URL = {
+ 'HRBench4K': 'https://huggingface.co/datasets/DreamMr/HR-Bench/resolve/main/hr_bench_4k.tsv',
+ 'HRBench8K': 'https://huggingface.co/datasets/DreamMr/HR-Bench/resolve/main/hr_bench_8k.tsv',
+ }
+
+ DATASET_MD5 = {
+ 'HRBench4K': 'f6b041b03d49543494b8a56d2e35be65',
+ 'HRBench8K': '274c9c7f89329b804a4723178a00219c',
+ }
+
+ def evaluate(self, eval_file, **judge_kwargs):
+ assert os.path.exists(eval_file), '{} does not exist!'.format(eval_file)
+ from .utils.multiple_choice import mcq_vanilla_eval
+ from .utils.hrbench import report_acc_hrbench
+ nproc = judge_kwargs.pop('nproc', 4)
+
+ model = judge_kwargs.get('model', 'extract_matching')
+ assert model in ['chatgpt-0125', 'exact_matching', 'gpt-4-0125']
+ name_str_map = {'chatgpt-0125': 'openai', 'gpt-4-0125': 'gpt4'}
+ name_str = name_str_map[model] if model in name_str_map else model
+
+ if model == 'exact_matching':
+ model = None
+ elif gpt_key_set():
+ model = build_judge(**judge_kwargs)
+ if not model.working():
+ warnings.warn('OPENAI API is not working properly, will use exact matching for evaluation')
+ warnings.warn(DEBUG_MESSAGE)
+ model = None
+ else:
+ warnings.warn('OPENAI_API_KEY is not set properly, will use exact matching for evaluation')
+ model = None
+
+ result_file = get_intermediate_file_path(eval_file, f'_{name_str}_result', 'pkl')
+
+ data = load(eval_file)
+ data = data.sort_values(by='index')
+ data['prediction'] = [str(x) for x in data['prediction']]
+ # If not choice label, then use lower case
+ for k in data.keys():
+ data[k.lower() if k not in list(string.ascii_uppercase) else k] = data.pop(k)
+
+ meta = self.data
+ meta_q_map = {x: y for x, y in zip(meta['index'], meta['question'])}
+ data_map = {x: y for x, y in zip(data['index'], data['question'])}
+ for k in data_map:
+ assert k in meta_q_map, (
+ f'eval_file should be the same as or a subset of dataset {self.dataset_name}'
+ )
+
+ score_file = get_intermediate_file_path(eval_file, '_acc', 'csv')
+
+ if osp.exists(score_file):
+ acc = load(score_file)
+ return acc
+ data = mcq_vanilla_eval(model, data, meta, nproc, result_file, self.dataset_name)
+ dump(data, get_intermediate_file_path(eval_file, f'_{name_str}_result'))
+ data = load(get_intermediate_file_path(eval_file, f'_{name_str}_result'))
+
+ acc = report_acc_hrbench(data)
+
+ dump(acc, score_file)
+
+ return acc
+
+
+class CustomMCQDataset(ImageMCQDataset):
+
+ def load_data(self, dataset):
+ data_path = osp.join(LMUDataRoot(), f'{dataset}.tsv')
+
+ if file_size(data_path, 'GB') > 1:
+ local_path = data_path.replace('.tsv', '_local.tsv')
+ if not osp.exists(local_path) or os.environ.get('FORCE_LOCAL', None):
+ from ..tools import LOCALIZE
+ LOCALIZE(data_path, local_path)
+ data_path = local_path
+ return load(data_path)
+
+
+class NaturalBenchDataset(ImageMCQDataset):
+
+ DATASET_URL = {
+ 'NaturalBenchDataset': (
+ 'https://huggingface.co/datasets/BaiqiL/'
+ 'NaturalBench/resolve/main/NaturalBenchDataset.tsv'
+ ),
+ }
+ DATASET_MD5 = {
+ 'NaturalBenchDataset':'e5f724932972eaeb8a9099e6979606ec',
+ }
+
+ def build_prompt(self, line):
+ SUFFIX_FOR_VQA = {
+ "yes_no": "Please answer Yes or No.",
+ "multiple_choice": "Please output the letter corresponding to the correct option."
+ }
+ if isinstance(line, int):
+ line = self.data.iloc[line]
+
+ if self.meta_only:
+ tgt_path = toliststr(line['image_path'])
+ else:
+ tgt_path = self.dump_image(line)
+
+ question = line['question']
+ prompt = f'{question} {SUFFIX_FOR_VQA[line["type"]]}'
+ msgs = []
+ if isinstance(tgt_path, list):
+ msgs.extend([dict(type='image', value=p) for p in tgt_path])
+ else:
+ msgs = [dict(type='image', value=tgt_path)]
+ msgs.append(dict(type='text', value=prompt))
+
+ return msgs
+
+ def evaluate(self, eval_file, **judge_kwargs):
+ from .utils.naturalbench import extract_answer, get_scores
+
+ data = load(eval_file)
+ data = data.sort_values(by='index')
+ predictions = [str(x) for x in data['prediction']]
+ answers = [str(x) for x in data['answer']]
+ indexs = [str(x) for x in data['index']]
+ meta = self.data
+ types = [str(x) for x in meta['type']]
+ results = {}
+ assert len(predictions) == len(answers) == len(indexs) == len(types) == (1900 * 4)
+ number_answered_samples = len(predictions) // 4
+ for i in range(number_answered_samples):
+ results[i] = {
+ "q0_i0": extract_answer(predictions[i * 4], types[i * 4]),
+ "q0_i1": extract_answer(predictions[i * 4 + 1], types[i * 4 + 1]),
+ "q1_i0": extract_answer(predictions[i * 4 + 2], types[i * 4 + 2]),
+ "q1_i1": extract_answer(predictions[i * 4 + 3], types[i * 4 + 3])
+ }
+
+ scores = get_scores(results)
+ print(scores)
+ score_file = get_intermediate_file_path(eval_file, '_acc', 'csv')
+ df = pd.DataFrame(list(scores.items()), columns=['Metric', 'Score'])
+ dump(df, score_file)
+
+ return scores
+
+
+class WeMath(ImageBaseDataset):
+ TYPE = 'MCQ'
+ DATASET_URL = {
+ 'WeMath': 'https://opencompass.openxlab.space/utils/VLMEval/WeMath.tsv',
+ 'WeMath_COT': 'https://opencompass.openxlab.space/utils/VLMEval/WeMath.tsv',
+ }
+ DATASET_MD5 = {'WeMath': 'b5e969a075f01290a542411fb7766388',
+ 'WeMath_COT': 'b5e969a075f01290a542411fb7766388'}
+
+ def build_prompt(self, line):
+ if isinstance(line, int):
+ line = self.data.iloc[line]
+
+ if self.meta_only:
+ tgt_path = toliststr(line['image_path'])
+ else:
+ tgt_path = self.dump_image(line)
+
+ question = line['question']
+ options = {
+ cand: line[cand]
+ for cand in string.ascii_uppercase
+ if cand in line and not pd.isna(line[cand])
+ }
+ options_prompt = 'Options:\n'
+ for key, item in options.items():
+ options_prompt += f'{key}. {item}\n'
+ hint = line['hint'] if ('hint' in line and not pd.isna(line['hint'])) else None
+ prompt = ''
+ if hint is not None:
+ prompt += f'Hint: {hint}\n'
+ prompt += f'Question: {question}\n'
+ if len(options):
+ prompt += options_prompt
+
+ if 'COT' in self.dataset_name:
+ requirement = line['requirement']
+ if requirement is not None:
+ prompt += f'\n{requirement}'
+
+ msgs = []
+ if isinstance(tgt_path, list):
+ msgs.extend([dict(type='image', value=p) for p in tgt_path])
+ else:
+ msgs = [dict(type='image', value=tgt_path)]
+ msgs.append(dict(type='text', value=prompt))
+
+ return msgs
+
+ def evaluate(self, eval_file, **judge_kwargs):
+ from .utils.wemath import wemath_evaluate_models, wemath_accuracy
+ from .utils.multiple_choice import mcq_vanilla_eval
+
+ # model = judge_kwargs['model']
+ model = judge_kwargs.get('model', 'exact_matching')
+ assert model in ['exact_matching', 'gpt-4-0125', 'gpt-4-turbo', 'gpt-4o-mini'], model
+ name_str_map = {'gpt-4-0125': 'gpt4', 'gpt-4-turbo': 'gpt4-turbo', 'gpt-4o-mini': 'gpt4o-mini'}
+ name_str = name_str_map[model] if model in name_str_map else model
+
+ if model == 'exact_matching':
+ model = None
+ elif gpt_key_set():
+ model = build_judge(**judge_kwargs)
+ if not model.working():
+ warnings.warn('OPENAI API is not working properly, will use exact matching for evaluation')
+ warnings.warn(DEBUG_MESSAGE)
+ model = None
+ else:
+ warnings.warn('OPENAI_API_KEY is not set properly, will use exact matching for evaluation')
+ model = None
+
+ storage = get_intermediate_file_path(eval_file, f'_{name_str}')
+ nproc = judge_kwargs.pop('nproc', 4)
+
+ if not osp.exists(storage) and model is not None:
+ data = load(eval_file)
+ result_file = get_intermediate_file_path(eval_file, f'_{name_str}_result', 'pkl')
+
+ data = load(eval_file)
+ data = data.sort_values(by='index')
+ data['prediction'] = [str(x) for x in data['prediction']]
+ # If not choice label, then use lower case
+ for k in data.keys():
+ data[k.lower() if k not in list(string.ascii_uppercase) else k] = data.pop(k)
+
+ meta = self.data
+ meta_q_map = {x: y for x, y in zip(meta['index'], meta['question'])}
+ data_map = {x: y for x, y in zip(data['index'], data['question'])}
+ for k in data_map:
+ assert k in meta_q_map, (
+ f'eval_file should be the same as or a subset of dataset {self.dataset_name}'
+ )
+ data = mcq_vanilla_eval(model, data, meta, nproc, result_file, self.dataset_name)
+
+ if 'id' in data.columns:
+ # 更改列名
+ data.rename(columns={'id': 'ID'}, inplace=True)
+ dump(data, storage)
+ if osp.exists(storage):
+ accuracy_scores = wemath_evaluate_models(storage)
+ four_dim_scores = wemath_accuracy(storage)
+ else:
+ accuracy_scores = wemath_evaluate_models(eval_file)
+ four_dim_scores = wemath_accuracy(eval_file)
+ combine_score = {**accuracy_scores, **four_dim_scores}
+ combine_score = pd.DataFrame(combine_score)
+ score_pth = get_intermediate_file_path(storage, '_score', 'csv')
+ dump(combine_score, score_pth)
+ return combine_score
+
+
+class VMCBenchDataset(ImageBaseDataset):
+
+ TYPE = 'MCQ'
+
+ DATASET_URL = {
+ 'VMCBench_DEV': 'https://huggingface.co/datasets/suyc21/VMCBench/resolve/main/data/tsv/VMCBench_DEV.tsv',
+ 'VMCBench_TEST': 'https://huggingface.co/datasets/suyc21/VMCBench/resolve/main/data/tsv/VMCBench_TEST.tsv'
+ }
+
+ DATASET_MD5 = {
+ }
+
+ def build_prompt(self, line):
+ if isinstance(line, int):
+ line = self.data.iloc[line]
+
+ if self.meta_only:
+ tgt_path = toliststr(line['image_path'])
+ else:
+ tgt_path = self.dump_image(line)
+ question = line['question']
+ options = {
+ cand: line[cand]
+ for cand in string.ascii_uppercase
+ if cand in line and not pd.isna(line[cand])
+ }
+ options_prompt = 'Options:\n'
+ for key, item in options.items():
+ options_prompt += f'{key}. {item}\n'
+ prompt = ''
+ prompt += f'Question: {question}\n'
+ if len(options):
+ prompt += options_prompt
+ prompt += "Answer with the option's letter from the given choices directly. \n"
+
+ msgs = []
+ if isinstance(tgt_path, list):
+ msgs.extend([dict(type='image', value=p) for p in tgt_path])
+ else:
+ msgs = [dict(type='image', value=tgt_path)]
+ msgs.append(dict(type='text', value=prompt))
+
+ return msgs
+
+ def evaluate(self, eval_file, **judge_kwargs):
+ from .utils.vmcbench import get_mc_score, report_vmc_acc
+ data = load(eval_file)
+ data = data.sort_values(by='index')
+ data['prediction'] = [str(x) for x in data['prediction']]
+ data['hit'] = data.apply(get_mc_score, axis=1)
+ result_file = get_intermediate_file_path(eval_file, '_result')
+ dump(data, result_file)
+ acc = report_vmc_acc(data)
+ score_file = get_intermediate_file_path(eval_file, '_acc', 'csv')
+ dump(acc, score_file)
+
+ return acc
+
+
+class LEGO(ImageMCQDataset):
+
+ DATASET_URL = {
+ 'LEGO': 'https://opencompass.openxlab.space/utils/VLMEval/LEGO.tsv',
+ 'LEGO_circular': 'https://opencompass.openxlab.space/utils/VLMEval/LEGO_circular.tsv',
+ }
+ DATASET_MD5 = {'LEGO': 'cfa845764442ebd54afa369c26011b8e'}
+
+ @staticmethod
+ def split_LEGO(msgs):
+ text, images = None, []
+ for s in msgs:
+ if s['type'] == 'image':
+ images.append(s['value'])
+ elif s['type'] == 'text':
+ assert text is None
+ text = s['value']
+ text_segs = text.split(''
+ image_idx = int(seg[0]) - 1
+ segs.append(dict(type='image', value=images[image_idx]))
+ segs.append(dict(type='text', value=seg[2:]))
+ return segs
+
+ def build_prompt_sort(self, line):
+
+ if isinstance(line, int):
+ line = self.data.iloc[line]
+
+ if self.meta_only:
+ tgt_path = toliststr(line['image_path'])
+ else:
+ tgt_path = self.dump_image(line)
+
+ question = line['question']
+ options = {
+ cand: line[cand]
+ for cand in string.ascii_uppercase
+ if cand in line and not pd.isna(line[cand])
+ }
+ options_prompt = 'Options:\n'
+ for key, item in options.items():
+ options_prompt += f'{key}. {item}\n'
+ hint = line['hint'] if ('hint' in line and not pd.isna(line['hint'])) else None
+ prompt = ''
+ if hint is not None:
+ prompt += f'Hint: {hint}\n'
+ prompt += f'Question: {question}\n'
+ if len(options):
+ prompt += options_prompt
+ prompt += (
+ "Please respond with only the sequence of letters (e.g., 'BDAC') "
+ "that correctly orders the steps.\n"
+ )
+
+ msgs = []
+ if isinstance(tgt_path, list):
+ msgs.extend([dict(type='image', value=p) for p in tgt_path])
+ else:
+ msgs = [dict(type='image', value=tgt_path)]
+ msgs.append(dict(type='text', value=prompt))
+
+ return msgs
+
+ def build_prompt(self, line):
+ if line['question_type'] == 'sort':
+ msgs = self.build_prompt_sort(line)
+ else:
+ msgs = super().build_prompt(line)
+ msgs = self.split_LEGO(msgs)
+ return msgs
+
+
+class VisuLogic(ImageMCQDataset):
+ TYPE = "MCQ"
+ DATASET_URL = {
+ 'VisuLogic': 'http://opencompass.openxlab.space/utils/VLMEval/VisuLogic.tsv'
+ }
+ DATASET_MD5 = {
+ 'VisuLogic': 'b0820b5ec1e01dfe3951927f0def73b6',
+ }
+
+ def build_prompt(self, line):
+ if isinstance(line, int):
+ line = self.data.iloc[line]
+
+ if self.meta_only:
+ tgt_path = toliststr(line['image_path'])
+ else:
+ tgt_path = self.dump_image(line)
+
+ question = line['question']
+ prompt = ''
+ prompt += question
+ prompt += "\nSolve the complex visual logical reasoning problem through step-by-step reasoning."
+ prompt += "Think about the reasoning process first "
+ prompt += "and answer the question following this format: Answer: \\boxed{$LETTER}"
+
+ msgs = []
+ if isinstance(tgt_path, list):
+ msgs.extend([dict(type='image', value=p) for p in tgt_path])
+ else:
+ msgs = [dict(type='image', value=tgt_path)]
+ msgs.append(dict(type='text', value=prompt))
+
+ return msgs
+
+ def evaluate(self, eval_file, **judge_kwargs):
+ from .utils.visulogic import VisuLogic_acc
+ from .utils.multiple_choice import mcq_vanilla_eval
+
+ # model = judge_kwargs['model']
+ model = judge_kwargs.get('model', 'exact_matching')
+ assert model in ['exact_matching', 'gpt-4-0125', 'gpt-4-turbo', 'gpt-4o-mini'], model
+ name_str_map = {'gpt-4-0125': 'gpt4', 'gpt-4-turbo': 'gpt4-turbo', 'gpt-4o-mini': 'gpt4o-mini'}
+ name_str = name_str_map[model] if model in name_str_map else model
+
+ if model == 'exact_matching':
+ model = None
+ elif gpt_key_set():
+ model = build_judge(**judge_kwargs)
+ if not model.working():
+ warnings.warn('OPENAI API is not working properly, will use exact matching for evaluation')
+ warnings.warn(DEBUG_MESSAGE)
+ model = None
+ else:
+ warnings.warn('OPENAI_API_KEY is not set properly, will use exact matching for evaluation')
+ model = None
+
+ storage = get_intermediate_file_path(eval_file, f'_{name_str}')
+
+ if osp.exists(storage):
+ accuracy_scores = VisuLogic_acc(storage)
+ else:
+ accuracy_scores = VisuLogic_acc(eval_file)
+ combine_score = {**accuracy_scores,}
+ combine_score = pd.DataFrame(combine_score)
+ score_pth = get_intermediate_file_path(storage, '_acc', 'csv')
+ dump(combine_score, score_pth)
+ return combine_score
+
+
+class TDBench(ImageMCQDataset):
+ DATASET_URL = {
+ 'tdbench_rot0': 'https://huggingface.co/datasets/Columbia-ICSL/TDBench/resolve/main/tdbench_rot0.tsv',
+ 'tdbench_rot90': 'https://huggingface.co/datasets/Columbia-ICSL/TDBench/resolve/main/tdbench_rot90.tsv',
+ 'tdbench_rot180': 'https://huggingface.co/datasets/Columbia-ICSL/TDBench/resolve/main/tdbench_rot180.tsv',
+ 'tdbench_rot270': 'https://huggingface.co/datasets/Columbia-ICSL/TDBench/resolve/main/tdbench_rot270.tsv',
+ 'tdbench_cs_zoom': 'https://huggingface.co/datasets/Columbia-ICSL/TDBench/resolve/main/case_study_zoom_in.tsv',
+ 'tdbench_cs_height': 'https://huggingface.co/datasets/Columbia-ICSL/TDBench/resolve/main/case_study_height.tsv',
+ 'tdbench_cs_integrity': 'https://huggingface.co/datasets/Columbia-ICSL/TDBench/resolve/main/case_study_integrity.tsv', # noqa: E501
+ 'tdbench_cs_depth': 'https://huggingface.co/datasets/Columbia-ICSL/TDBench/resolve/main/case_study_depth.tsv',
+ }
+
+ DATASET_MD5 = {
+ 'tdbench_rot0': '98d58436f01ca2bf2f1db1b9bfd7a947',
+ 'tdbench_rot90': 'd4afebfd0a4776242069e43269779f41',
+ 'tdbench_rot180': 'd54dd9f418f83ed612b02fd5f42f65c7',
+ 'tdbench_rot270': 'f95304455582de5635ff10c0400562ac',
+ 'tdbench_cs_zoom': '2a01618c9c1e7d1a9d86af545e943392',
+ 'tdbench_cs_height': 'ecbe1c5802e25749558417208164bcb3',
+ 'tdbench_cs_integrity': '05b2045cae2016f6edc400da48e2df4b',
+ 'tdbench_cs_depth': '449dbe4b24a43a06a9f680811deae517',
+ }
+
+ def evaluate(self, eval_file, **judge_kwargs):
+ acc, result_file = self.do_evaluate(eval_file, **judge_kwargs)
+ # For case studies (cs_x), do not do rotation eval
+ if '_rot' not in self.dataset_name:
+ return acc
+
+ from .utils.tdbench import rotational_eval
+ re_result = rotational_eval(result_file)
+ if re_result is not None and re_result is not False:
+ file_addr = osp.abspath(result_file.split('_rot')[0] + '_REresult.csv')
+ link_addr = osp.join(osp.dirname(osp.dirname(result_file)), osp.basename(file_addr))
+ re_result.to_csv(file_addr, index=True)
+ print(tabulate(re_result, headers="keys"))
+ if osp.exists(link_addr) or osp.islink(link_addr):
+ os.remove(link_addr)
+ os.symlink(file_addr, link_addr)
+
+ return acc
+
+ def do_evaluate(self, eval_file, **judge_kwargs):
+ from .utils.multiple_choice import report_acc, mcq_vanilla_eval
+ nproc = judge_kwargs.pop('nproc', 4)
+
+ model = judge_kwargs.get('model', 'exact_matching')
+ assert model in ['chatgpt-0125', 'exact_matching', 'gpt-4-0125', 'gpt-4o-mini']
+ name_str_map = {'chatgpt-0125': 'openai', 'gpt-4-0125': 'gpt4', 'gpt-4o-mini': 'gpt4omini'}
+ name_str = name_str_map[model] if model in name_str_map else model
+
+ if model == 'exact_matching':
+ model = None
+ elif gpt_key_set():
+ model = build_judge(**judge_kwargs)
+ if not model.working():
+ warnings.warn('OPENAI API is not working properly, will use exact matching for evaluation')
+ warnings.warn(DEBUG_MESSAGE)
+ model = None
+ else:
+ warnings.warn('OPENAI_API_KEY is not set properly, will use exact matching for evaluation')
+ model = None
+
+ result_file = get_intermediate_file_path(eval_file, f'_{name_str}_result', 'pkl')
+
+ data = load(eval_file)
+ data = data.sort_values(by='index')
+ data['prediction'] = [str(x) for x in data['prediction']]
+ # If not choice label, then use lower case
+ for k in data.keys():
+ data[k.lower() if k not in list(string.ascii_uppercase) else k] = data.pop(k)
+
+ meta = self.data
+ meta_q_map = {x: y for x, y in zip(meta['index'], meta['question'])}
+ data_map = {x: y for x, y in zip(data['index'], data['question'])}
+ for k in data_map:
+ assert k in meta_q_map, (
+ f'eval_file should be the same as or a subset of dataset {self.dataset_name}'
+ )
+
+ data = mcq_vanilla_eval(model, data, meta, nproc, result_file, self.dataset_name)
+
+ # Save evaluation results
+ judged_result_file = get_intermediate_file_path(eval_file, f'_{name_str}_result')
+ dump(data, judged_result_file)
+
+ acc = report_acc(data)
+
+ score_file = get_intermediate_file_path(eval_file, '_acc', 'csv')
+ dump(acc, score_file)
+
+ return acc, judged_result_file
+
+
+class MicroBench(ImageMCQDataset):
+
+ DATASET_URL = {'MicroBench': ''}
+
+ DATASET_PART_URL = {
+ 'part_1': 'https://huggingface.co/datasets/xuxuxuxuxu/Microbench/resolve/main/part_1.tsv',
+ 'part_2': 'https://huggingface.co/datasets/xuxuxuxuxu/Microbench/resolve/main/part_2.tsv',
+ 'part_3': 'https://huggingface.co/datasets/xuxuxuxuxu/Microbench/resolve/main/part_3.tsv',
+ 'part_4': 'https://huggingface.co/datasets/xuxuxuxuxu/Microbench/resolve/main/part_4.tsv',
+ 'part_5': 'https://huggingface.co/datasets/xuxuxuxuxu/Microbench/resolve/main/part_5.tsv',
+ 'part_6': 'https://huggingface.co/datasets/xuxuxuxuxu/Microbench/resolve/main/part_6.tsv',
+ 'part_7': 'https://huggingface.co/datasets/xuxuxuxuxu/Microbench/resolve/main/part_7.tsv',
+ 'part_8': 'https://huggingface.co/datasets/xuxuxuxuxu/Microbench/resolve/main/part_8.tsv',
+ 'part_9': 'https://huggingface.co/datasets/xuxuxuxuxu/Microbench/resolve/main/part_9.tsv',
+ 'part_10': 'https://huggingface.co/datasets/xuxuxuxuxu/Microbench/resolve/main/part_10.tsv',
+ 'part_11': 'https://huggingface.co/datasets/xuxuxuxuxu/Microbench/resolve/main/part_11.tsv',
+ 'part_12': 'https://huggingface.co/datasets/xuxuxuxuxu/Microbench/resolve/main/part_12.tsv',
+ 'part_13': 'https://huggingface.co/datasets/xuxuxuxuxu/Microbench/resolve/main/part_13.tsv',
+ 'part_14': 'https://huggingface.co/datasets/xuxuxuxuxu/Microbench/resolve/main/part_14.tsv',
+ }
+
+ def load_data(self, dataset="MicroBench", repo_id="xuxuxuxuxu/MicroBench"):
+
+ dfs = []
+ for part_num in range(1, 15):
+ part_name = f'part_{part_num}'
+ url = self.DATASET_PART_URL[part_name]
+ tsv_path = osp.join(LMUDataRoot(), f'microbench_{part_name}.tsv')
+ if not osp.exists(tsv_path):
+ download_file(url, filename=tsv_path)
+ local_path = tsv_path.replace('.tsv', '_local.tsv')
+ if not osp.exists(local_path) or os.environ.get('FORCE_LOCAL'):
+ from ..tools import LOCALIZE
+ LOCALIZE(tsv_path, local_path)
+ tsv_path = local_path
+ # 加载数据
+ df = load(tsv_path)
+ dfs.append(df)
+ # 合并所有数据
+ data = pd.concat(dfs, ignore_index=True)
+ return data
+
+
+class XLRSBench(ImageMCQDataset):
+
+ DATASET_URL = {'XLRS-Bench-lite': ''}
+
+ DATASET_PART_URL = {
+ 'part0': 'https://huggingface.co/datasets/initiacms/XLRS-Bench-lite_VLM/resolve/main/XLRS-Bench-lite_part0.jsonl', # noqa E501
+ 'part1': 'https://huggingface.co/datasets/initiacms/XLRS-Bench-lite_VLM/resolve/main/XLRS-Bench-lite_part1.jsonl', # noqa E501
+ 'part2': 'https://huggingface.co/datasets/initiacms/XLRS-Bench-lite_VLM/resolve/main/XLRS-Bench-lite_part2.jsonl', # noqa E501
+ 'part3': 'https://huggingface.co/datasets/initiacms/XLRS-Bench-lite_VLM/resolve/main/XLRS-Bench-lite_part3.jsonl', # noqa E501
+ 'part4': 'https://huggingface.co/datasets/initiacms/XLRS-Bench-lite_VLM/resolve/main/XLRS-Bench-lite_part4.jsonl', # noqa E501
+ 'part5': 'https://huggingface.co/datasets/initiacms/XLRS-Bench-lite_VLM/resolve/main/XLRS-Bench-lite_part5.jsonl', # noqa E501
+ 'part6': 'https://huggingface.co/datasets/initiacms/XLRS-Bench-lite_VLM/resolve/main/XLRS-Bench-lite_part6.jsonl', # noqa E501
+ 'part7': 'https://huggingface.co/datasets/initiacms/XLRS-Bench-lite_VLM/resolve/main/XLRS-Bench-lite_part7.jsonl', # noqa E501
+ 'part8': 'https://huggingface.co/datasets/initiacms/XLRS-Bench-lite_VLM/resolve/main/XLRS-Bench-lite_part8.jsonl', # noqa E501
+ 'part9': 'https://huggingface.co/datasets/initiacms/XLRS-Bench-lite_VLM/resolve/main/XLRS-Bench-lite_part9.jsonl', # noqa E501
+ 'part10': 'https://huggingface.co/datasets/initiacms/XLRS-Bench-lite_VLM/resolve/main/XLRS-Bench-lite_part10.jsonl', # noqa E501
+ 'part11': 'https://huggingface.co/datasets/initiacms/XLRS-Bench-lite_VLM/resolve/main/XLRS-Bench-lite_part11.jsonl', # noqa E501
+ 'part12': 'https://huggingface.co/datasets/initiacms/XLRS-Bench-lite_VLM/resolve/main/XLRS-Bench-lite_part12.jsonl', # noqa E501
+ 'part13': 'https://huggingface.co/datasets/initiacms/XLRS-Bench-lite_VLM/resolve/main/XLRS-Bench-lite_part13.jsonl', # noqa E501
+ 'part14': 'https://huggingface.co/datasets/initiacms/XLRS-Bench-lite_VLM/resolve/main/XLRS-Bench-lite_part14.jsonl' # noqa E501
+ }
+
+ def load_data(self, dataset="XLRS-Bench-lite_VLM", repo_id="initiacms/XLRS-Bench-lite_VLM"):
+ def load_jsonl(f):
+ lines = open(f, encoding='utf-8').readlines()
+ lines = [x.strip() for x in lines]
+ if lines[-1] == '':
+ lines = lines[:-1]
+ data = [json.loads(x) for x in lines]
+ return pd.DataFrame(data)
+ dfs = []
+ for part_num in range(15):
+ part_name = f'part{part_num}'
+ url = self.DATASET_PART_URL[part_name]
+ tsv_path = osp.join(LMUDataRoot(), f'XLRS-Bench-lite_{part_name}.jsonl')
+ local_path = tsv_path.replace('.jsonl', '_local.tsv')
+ if not osp.exists(local_path) or os.environ.get('FORCE_LOCAL'):
+ fname = tsv_path
+ new_fname = local_path
+ if not osp.exists(fname):
+ download_file(url, filename=fname)
+
+ if new_fname is None:
+ new_fname = fname.replace('.jsonl', '_local.tsv')
+
+ base_name = osp.basename(fname)
+ dname = osp.splitext(base_name)[0]
+
+ data = load_jsonl(fname)
+ data_new = localize_df(data, dname)
+ dump(data_new, new_fname)
+ print(f'The localized version of data file is {new_fname}')
+
+ tsv_path = local_path
+ # 加载数据
+ df = load_jsonl(tsv_path) if tsv_path.endswith('.jsonl') else load(tsv_path)
+ dfs.append(df)
+ # 合并所有数据
+ data = pd.concat(dfs, ignore_index=True)
+ return data
+
+ def build_prompt(self, line):
+ if isinstance(line, int):
+ line = self.data.iloc[line]
+
+ if self.meta_only:
+ tgt_path = toliststr(line['image_path'])
+ else:
+ tgt_path = self.dump_image(line)
+
+ question = line['question']
+ prompt = question + line['multi-choice options']
+ msgs = []
+ if isinstance(tgt_path, list):
+ msgs.extend([dict(type='image', value=p) for p in tgt_path])
+ else:
+ msgs = [dict(type='image', value=tgt_path)]
+ msgs.append(dict(type='text', value=prompt))
+
+ return msgs
+
+ @staticmethod
+ def extract_characters_regex(s, choices=["(A)", "(B)", "(C)", "(D)", "(E)"]):
+ if type(s) is dict:
+ s = ""
+ s = s.strip()
+ answer_prefixes = [
+ "The best answer is",
+ "The correct answer is",
+ "The answer is",
+ "The answer",
+ "The best option isThe correct option is",
+ "Best answer:Best option:",
+ ]
+ for answer_prefix in answer_prefixes:
+ s = s.replace(answer_prefix, "")
+
+ if not re.search("[ABCDE]", s):
+ return ""
+ matches = re.findall(r"\(([a-eA-E])\)", s)
+ if len(matches) == 0:
+ matches = re.findall(r"(?:^|\s)?([a-eA-E])(?:$|[\s,.])?", s)
+ if len(matches) == 0:
+ matches = re.findall(r"[a-eA-E]", s)
+ if len(matches) == 0:
+ return ""
+ else:
+ matches = set(mat.upper() for mat in matches)
+ return "".join(matches)
+
+ def evaluate(self, eval_file, **judge_kwargs):
+ data = load(eval_file)
+ data['prediction'] = [str(x) for x in data['prediction']]
+ task_stats = {}
+ micro_metric = {'correct': 0, 'total': 0}
+ for index, it in data.iterrows():
+ task = f"{it['category']}/{it['l2-category']}"
+ if task not in task_stats:
+ task_stats[task] = {'correct': 0, 'total': 0}
+ task_stats[task]['total'] += 1
+ micro_metric['total'] += 1
+ pred = self.extract_characters_regex(it['prediction'])
+ if set(pred) == set(it['answer']):
+ task_stats[task]['correct'] += 1
+ micro_metric['correct'] += 1
+ accuracy_dict = {task: [stats['correct'] / stats['total']] for task, stats in sorted(task_stats.items())}
+ print(accuracy_dict)
+ result_df = pd.DataFrame(accuracy_dict)
+ result_df['Overall macro'] = result_df.mean(axis=1)
+ result_df['Overall micro'] = micro_metric['correct'] / micro_metric['total']
+ score_file = get_intermediate_file_path(eval_file, '_acc', 'csv')
+ dump(result_df, score_file)
+ return result_df
+
+
+class OmniEarthMCQBench(ImageMCQDataset):
+ DATASET_URL = {"OmniEarth-Bench": ""}
+
+ DATASET_PART_URL = {
+ "part0": "https://huggingface.co/datasets/initiacms/OmniEarth-Bench_MCQ_VLM/resolve/main/OmniEarth-Bench_MCQ_part0.jsonl", # noqa E501
+ "part1": "https://huggingface.co/datasets/initiacms/OmniEarth-Bench_MCQ_VLM/resolve/main/OmniEarth-Bench_MCQ_part1.jsonl", # noqa E501
+ "part2": "https://huggingface.co/datasets/initiacms/OmniEarth-Bench_MCQ_VLM/resolve/main/OmniEarth-Bench_MCQ_part2.jsonl", # noqa E501
+ "part3": "https://huggingface.co/datasets/initiacms/OmniEarth-Bench_MCQ_VLM/resolve/main/OmniEarth-Bench_MCQ_part3.jsonl", # noqa E501
+ "part4": "https://huggingface.co/datasets/initiacms/OmniEarth-Bench_MCQ_VLM/resolve/main/OmniEarth-Bench_MCQ_part4.jsonl", # noqa E501
+ "part5": "https://huggingface.co/datasets/initiacms/OmniEarth-Bench_MCQ_VLM/resolve/main/OmniEarth-Bench_MCQ_part5.jsonl", # noqa E501
+ "part6": "https://huggingface.co/datasets/initiacms/OmniEarth-Bench_MCQ_VLM/resolve/main/OmniEarth-Bench_MCQ_part6.jsonl", # noqa E501
+ "part7": "https://huggingface.co/datasets/initiacms/OmniEarth-Bench_MCQ_VLM/resolve/main/OmniEarth-Bench_MCQ_part7.jsonl", # noqa E501
+ "part8": "https://huggingface.co/datasets/initiacms/OmniEarth-Bench_MCQ_VLM/resolve/main/OmniEarth-Bench_MCQ_part8.jsonl", # noqa E501
+ "part9": "https://huggingface.co/datasets/initiacms/OmniEarth-Bench_MCQ_VLM/resolve/main/OmniEarth-Bench_MCQ_part9.jsonl", # noqa E501
+ "part10": "https://huggingface.co/datasets/initiacms/OmniEarth-Bench_MCQ_VLM/resolve/main/OmniEarth-Bench_MCQ_part10.jsonl", # noqa E501
+ "part11": "https://huggingface.co/datasets/initiacms/OmniEarth-Bench_MCQ_VLM/resolve/main/OmniEarth-Bench_MCQ_part11.jsonl", # noqa E501
+ "part12": "https://huggingface.co/datasets/initiacms/OmniEarth-Bench_MCQ_VLM/resolve/main/OmniEarth-Bench_MCQ_part12.jsonl", # noqa E501
+ "part13": "https://huggingface.co/datasets/initiacms/OmniEarth-Bench_MCQ_VLM/resolve/main/OmniEarth-Bench_MCQ_part13.jsonl", # noqa E501
+ "part14": "https://huggingface.co/datasets/initiacms/OmniEarth-Bench_MCQ_VLM/resolve/main/OmniEarth-Bench_MCQ_part14.jsonl", # noqa E501
+ }
+
+ def load_data(self, dataset="OmniEarth-Bench_MCQ_VLM", repo_id="initiacms/OmniEarth-Bench_MCQ_VLM"):
+ def load_jsonl(f):
+ lines = open(f, encoding='utf-8').readlines()
+ lines = [x.strip() for x in lines]
+ if lines[-1] == '':
+ lines = lines[:-1]
+ data = [json.loads(x) for x in lines]
+ return pd.DataFrame(data)
+ dfs = []
+ for part_num in range(15):
+ part_name = f'part{part_num}'
+ url = self.DATASET_PART_URL[part_name]
+ tsv_path = osp.join(LMUDataRoot(), f'OmniEarth-Bench_MCQ_{part_name}.jsonl')
+ if not osp.exists(tsv_path):
+ download_file(url, filename=tsv_path)
+ local_path = tsv_path.replace('.jsonl', '_local.tsv')
+ if not osp.exists(local_path) or os.environ.get('FORCE_LOCAL'):
+ fname = tsv_path
+ new_fname = local_path
+ if new_fname is None:
+ new_fname = fname.replace('.jsonl', '_local.tsv')
+
+ base_name = osp.basename(fname)
+ dname = osp.splitext(base_name)[0]
+
+ data = load_jsonl(fname)
+ data_new = localize_df(data, dname)
+ dump(data_new, new_fname)
+ print(f'The localized version of data file is {new_fname}')
+
+ tsv_path = local_path
+ # 加载数据
+ df = load_jsonl(tsv_path) if tsv_path.endswith('.jsonl') else load(tsv_path)
+ dfs.append(df)
+ # 合并所有数据
+ data = pd.concat(dfs, ignore_index=True)
+ return data
+
+ def build_prompt(self, line):
+ if isinstance(line, int):
+ line = self.data.iloc[line]
+
+ if self.meta_only:
+ tgt_path = toliststr(line["image_path"])
+ else:
+ tgt_path = self.dump_image(line)
+
+ question = line["question"]
+ prompt = question + line["multi-choice options"]
+ msgs = []
+ if isinstance(tgt_path, list):
+ msgs.extend([dict(type="image", value=p) for p in tgt_path])
+ else:
+ msgs = [dict(type="image", value=tgt_path)]
+ msgs.append(dict(type="text", value=prompt))
+
+ return msgs
+
+ @staticmethod
+ def extract_characters_regex(s, choices=["(A)", "(B)", "(C)", "(D)", "(E)", "(F)", "(G)"]):
+ if type(s) is dict:
+ s = ""
+ s = s.strip()
+ answer_prefixes = [
+ "The best answer is",
+ "The correct answer is",
+ "The answer is",
+ "The answer",
+ "The best option isThe correct option is",
+ "Best answer:Best option:",
+ ]
+ for answer_prefix in answer_prefixes:
+ s = s.replace(answer_prefix, "")
+
+ if not re.search("[ABCDEFG]", s):
+ return ""
+ matches = re.findall(r"\(([a-gA-G])\)", s)
+ if len(matches) == 0:
+ matches = re.findall(r"(?:^|\s)?([a-gA-G])(?:$|[\s,.])?", s)
+ if len(matches) == 0:
+ matches = re.findall(r"[a-gA-G]", s)
+ if len(matches) == 0:
+ return ""
+ else:
+ matches = set(mat.upper() for mat in matches)
+ return "".join(matches)
+
+ def evaluate(self, eval_file, **judge_kwargs):
+ data = load(eval_file)
+ data['prediction'] = [str(x) for x in data['prediction']]
+ task_stats = {}
+ micro_metric = {"correct": 0, "total": 0}
+ for index, it in data.iterrows():
+ task = f"{it['category']}/{it['l2-category']}/{it['l3-category']}/{it['l4-category']}"
+ if task not in task_stats:
+ task_stats[task] = {"correct": 0, "total": 0}
+ task_stats[task]["total"] += 1
+ micro_metric["total"] += 1
+ pred = self.extract_characters_regex(it["prediction"])
+ if set(pred) == set(it["answer"]):
+ task_stats[task]["correct"] += 1
+ micro_metric["correct"] += 1
+ accuracy_dict = {task: [stats["correct"] / stats["total"]] for task, stats in sorted(task_stats.items())}
+ result_df = pd.DataFrame(accuracy_dict)
+ from collections import defaultdict
+
+ sphere_accs = defaultdict(list)
+ for task, acc in accuracy_dict.items():
+ sphere = task.split("/")[0]
+ assert len(acc) == 1
+ sphere_accs[sphere].append(acc[0])
+ for sphere, accs in sphere_accs.items():
+ result_df[f"Sphere macro: {sphere}"] = sum(accs) / len(accs)
+ result_df["Overall macro"] = result_df.mean(axis=1)
+ result_df["Overall micro"] = micro_metric["correct"] / micro_metric["total"]
+ score_file = get_intermediate_file_path(eval_file, '_acc', 'csv')
+ dump(result_df, score_file)
+ return result_df
+
+
+class OmniMedVQA(ImageMCQDataset):
+
+ DATASET_URL = {'OmniMedVQA': ''}
+
+ DATASET_PART_URL = {
+ 'part_1': 'https://huggingface.co/datasets/KKYYKK/OmniMed_VLM/resolve/main/part_1.tsv',
+ 'part_2': 'https://huggingface.co/datasets/KKYYKK/OmniMed_VLM/resolve/main/part_2.tsv',
+ 'part_3': 'https://huggingface.co/datasets/KKYYKK/OmniMed_VLM/resolve/main/part_3.tsv',
+ 'part_4': 'https://huggingface.co/datasets/KKYYKK/OmniMed_VLM/resolve/main/part_4.tsv',
+ 'part_5': 'https://huggingface.co/datasets/KKYYKK/OmniMed_VLM/resolve/main/part_5.tsv',
+ 'part_6': 'https://huggingface.co/datasets/KKYYKK/OmniMed_VLM/resolve/main/part_6.tsv',
+ 'part_7': 'https://huggingface.co/datasets/KKYYKK/OmniMed_VLM/resolve/main/part_7.tsv',
+ 'part_8': 'https://huggingface.co/datasets/KKYYKK/OmniMed_VLM/resolve/main/part_8.tsv',
+ 'part_9': 'https://huggingface.co/datasets/KKYYKK/OmniMed_VLM/resolve/main/part_9.tsv',
+ 'part_10': 'https://huggingface.co/datasets/KKYYKK/OmniMed_VLM/resolve/main/part_10.tsv',
+ 'part_11': 'https://huggingface.co/datasets/KKYYKK/OmniMed_VLM/resolve/main/part_11.tsv',
+ 'part_12': 'https://huggingface.co/datasets/KKYYKK/OmniMed_VLM/resolve/main/part_12.tsv',
+ 'part_13': 'https://huggingface.co/datasets/KKYYKK/OmniMed_VLM/resolve/main/part_13.tsv',
+ 'part_14': 'https://huggingface.co/datasets/KKYYKK/OmniMed_VLM/resolve/main/part_14.tsv',
+ 'part_15': 'https://huggingface.co/datasets/KKYYKK/OmniMed_VLM/resolve/main/part_15.tsv',
+ 'part_16': 'https://huggingface.co/datasets/KKYYKK/OmniMed_VLM/resolve/main/part_16.tsv',
+ 'part_17': 'https://huggingface.co/datasets/KKYYKK/OmniMed_VLM/resolve/main/part_17.tsv',
+ 'part_18': 'https://huggingface.co/datasets/KKYYKK/OmniMed_VLM/resolve/main/part_18.tsv',
+ 'part_19': 'https://huggingface.co/datasets/KKYYKK/OmniMed_VLM/resolve/main/part_19.tsv',
+ 'part_20': 'https://huggingface.co/datasets/KKYYKK/OmniMed_VLM/resolve/main/part_20.tsv',
+ 'part_21': 'https://huggingface.co/datasets/KKYYKK/OmniMed_VLM/resolve/main/part_21.tsv',
+ 'part_22': 'https://huggingface.co/datasets/KKYYKK/OmniMed_VLM/resolve/main/part_22.tsv',
+ 'part_23': 'https://huggingface.co/datasets/KKYYKK/OmniMed_VLM/resolve/main/part_23.tsv',
+ 'part_24': 'https://huggingface.co/datasets/KKYYKK/OmniMed_VLM/resolve/main/part_24.tsv',
+ 'part_25': 'https://huggingface.co/datasets/KKYYKK/OmniMed_VLM/resolve/main/part_25.tsv',
+ 'part_26': 'https://huggingface.co/datasets/KKYYKK/OmniMed_VLM/resolve/main/part_26.tsv',
+ 'part_27': 'https://huggingface.co/datasets/KKYYKK/OmniMed_VLM/resolve/main/part_27.tsv',
+ 'part_28': 'https://huggingface.co/datasets/KKYYKK/OmniMed_VLM/resolve/main/part_28.tsv',
+ 'part_29': 'https://huggingface.co/datasets/KKYYKK/OmniMed_VLM/resolve/main/part_29.tsv',
+ 'part_30': 'https://huggingface.co/datasets/KKYYKK/OmniMed_VLM/resolve/main/part_30.tsv',
+ }
+
+ def load_data(self, dataset="OmniMedVQA", repo_id="KKYYKK/OmniMed_VLM"):
+
+ dfs = []
+ for part_num in range(1, 15):
+ part_name = f'part_{part_num}'
+ url = self.DATASET_PART_URL[part_name]
+ tsv_path = osp.join(LMUDataRoot(), f'omnimedbench_{part_name}.tsv')
+ if not osp.exists(tsv_path):
+ download_file(url, filename=tsv_path)
+ local_path = tsv_path.replace('.tsv', '_local.tsv')
+ if not osp.exists(local_path) or os.environ.get('FORCE_LOCAL'):
+ from ..tools import LOCALIZE
+ LOCALIZE(tsv_path, local_path)
+ tsv_path = local_path
+ # 加载数据
+ df = load(tsv_path)
+ dfs.append(df)
+ # 合并所有数据
+ data = pd.concat(dfs, ignore_index=True)
+ return data
+
+
+class MSEarthMCQ(ImageMCQDataset):
+
+ DATASET_URL = {
+ 'MSEarthMCQ': 'http://opencompass.openxlab.space/utils/VLMEval/MSEarthMCQ.tsv',
+ }
+
+ DATASET_MD5 = {
+ 'MSEarthMCQ': '4e32b487dbd241e66458251186540a6d'
+ }
+
+ def build_prompt(self, line):
+ '''
+
+You are tasked with answering a multiple-choice question about the above given input image.
+
+Caption:
+Delineation of hazardous regions for the nine classifications for …
+Question:
+Which aquifer shows the highest spread of the Fe-Mn hazard?
+Options:
+A. Aquifer 1
+B. Aquifer 2
+C. Aquifer 3
+D. None of the above
+Based on the image, select the correct option (e.g., 'A', 'B', 'C', 'D') or \
+directly state the correct option content. Do not give any explanation.
+'''
+
+ if isinstance(line, int):
+ line = self.data.iloc[line]
+
+ if self.meta_only:
+ tgt_path = toliststr(line['image_path'])
+ else:
+ tgt_path = self.dump_image(line)
+
+ question = line['question']
+ options = {
+ cand: line[cand]
+ for cand in string.ascii_uppercase
+ if cand in line and not pd.isna(line[cand])
+ }
+ options_prompt = 'Options:\n'
+ for key, item in options.items():
+ options_prompt += f'{key}. {item}\n'
+ hint = line['hint'] if ('hint' in line and not pd.isna(line['hint'])) else None
+ prompt = 'You are tasked with answering a multiple-choice question about the given input image.\n\n'
+ if hint is not None:
+ prompt += f'Hint: {hint}\n'
+ prompt += f'Caption:\n {question}\n'
+ if len(options):
+ prompt += options_prompt
+ # prompt += 'Please select the correct answer from the options above. \n'
+ prompt += "Based on the image, select the correct option (e.g., 'A', 'B', 'C', 'D') or directly state the correct option content, Do not give any explaination." # noqa E501
+
+ msgs = []
+ if isinstance(tgt_path, list):
+ msgs.extend([dict(type='image', value=p) for p in tgt_path])
+ else:
+ msgs = [dict(type='image', value=tgt_path)]
+ msgs.append(dict(type='text', value=prompt))
+
+ return msgs
+
+
+class VLMBlind(ImageMCQDataset):
+ TYPE = "MCQ"
+ DATASET_URL = {
+ 'VLMBlind': 'http://opencompass.openxlab.space/utils/VLMEval/VLMBlind.tsv'
+ }
+ DATASET_MD5 = {
+ 'VLMBlind': 'e0f960236afe08f9fa48e8ccc908b2a9',
+ }
+
+ def extract_content_in_braces(self, input_str):
+ import re
+ pattern = r'\{(.*?)\}'
+ match = re.search(pattern, input_str)
+ if match:
+ return match.group(1)
+ else:
+ return ""
+
+ def compare_string_with_values(self, input_str, target_values):
+ import re
+ try:
+ target_nums = [int(x.strip()) for x in target_values.split(',')]
+ if len(target_nums) != 2:
+ return False
+ except Exception:
+ return False
+
+ rows_match = re.search(r'[Rr]ows?(?:[^{}]*)\{(\d+)\}', input_str)
+ cols_match = re.search(r'[Cc]olumns?(?:[^{}]*)\{(\d+)\}', input_str)
+
+ if rows_match and cols_match:
+ input_nums = [int(rows_match.group(1)), int(cols_match.group(1))]
+ return input_nums == target_nums
+
+ pattern2 = r'\((\d+),\s*(\d+)\)'
+ match2 = re.search(pattern2, input_str)
+ if match2:
+ input_nums = [int(match2.group(1)), int(match2.group(2))]
+ return input_nums == target_nums
+ return False
+
+ def evaluate(self, eval_file, **judge_kwargs):
+ data = load(eval_file)
+ task_stats = {}
+
+ for index, data_item in data.iterrows():
+ task = data_item["task"]
+ if task not in task_stats:
+ task_stats[task] = {'correct': 0, 'total': 0}
+ task_stats[task]['total'] += 1
+ if data_item["task"] == "Subway Connections":
+ ans = self.extract_content_in_braces(data_item["prediction"])
+ if ans == data_item["answers"]:
+ task_stats[task]['correct'] += 1
+ elif data_item["task"] == "Nested Squares":
+ ans = self.extract_content_in_braces(data_item["prediction"])
+ if ans == data_item["answers"]:
+ task_stats[task]['correct'] += 1
+ elif data_item["task"] == "Line Plot Intersections":
+ ans = self.extract_content_in_braces(data_item["prediction"])
+ if ans == data_item["answers"]:
+ task_stats[task]['correct'] += 1
+ elif data_item["task"] == "Touchdown Reading":
+ if self.compare_string_with_values(data_item["prediction"], data_item["answers"]):
+ task_stats[task]['correct'] += 1
+
+ accuracy_dict = {task: [stats['correct'] / stats['total']] for task, stats in sorted(task_stats.items())}
+ accuracy_df = pd.DataFrame(accuracy_dict)
+ score_file = get_intermediate_file_path(eval_file, '_acc', 'csv')
+ dump(accuracy_df, score_file)
+ return accuracy_df
+
+
+class SCAM(ImageMCQDataset):
+
+ # Dataset loading is done manually in `load_data`
+ DATASET_URL = {'SCAM': 'None'}
+ DATASET_MD5 = {'SCAM': 'None'}
+
+ def load_data(self, dataset):
+ import base64
+ import io
+ import datasets
+ import random
+ random.seed(42)
+
+ # Function to convert dataset to VLMEvalKit format
+ def convert_to_vlmeval_format(example):
+ # Convert image to base64
+ buffer = io.BytesIO()
+ example['image'].save(buffer, format="PNG")
+ img_base64 = base64.b64encode(buffer.getvalue()).decode("utf-8")
+
+ # Shuffle the options
+ shuffle = random.choice([True, False])
+ return {
+ 'image_base64': img_base64,
+ 'question': 'What entity is depicted in the image?',
+ 'A': example['attack_word' if shuffle else 'object_label'],
+ 'B': example['object_label' if shuffle else 'attack_word'],
+ 'answer': 'B' if shuffle else 'A',
+ 'category': example['type'],
+ }
+
+ # Load and convert dataset
+ ds = datasets.load_dataset("BLISS-e-V/SCAM", split="train")
+ # Use 8 workers for parallel processing
+ ds = ds.map(convert_to_vlmeval_format, remove_columns=ds.column_names, num_proc=8)
+ df = ds.to_pandas()
+ # Rename df column, because using `image` with a hf ds has different functionality
+ df.rename(columns={'image_base64': 'image'}, inplace=True)
+ df['index'] = range(1, len(df) + 1) # add index column with unique values
+
+ return df
+
+
+class _3DSRBench(ImageMCQDataset):
+
+ DATASET_URL = {'3DSRBench': 'http://opencompass.openxlab.space/utils/VLMEval/3DSRBench.tsv'}
+ DATASET_MD5 = {'3DSRBench': '610516a0b4710595545b7613c60524e8'}
+
+ def evaluate(self, eval_file, **judge_kwargs):
+ from .utils.multiple_choice import report_acc
+ from .utils.sr3d import parse_3dsr_prediction, eval_3dsr
+ from ..smp import dump, load
+ from ..utils.dataset_util import TDBench_grounding_eval
+ from ..dataset import parse_img_path_list
+ from ..config import VLM_EVAL_WITH_SUBSET
+ data = load(eval_file)
+ # parse the model predictions
+ data = parse_img_path_list(data)
+ data = parse_3dsr_prediction(data)
+ # rotate the image and boxes
+ data['hit'] = eval_3dsr(data)
+ result_file = get_intermediate_file_path(eval_file, '_acc')
+ if VLM_EVAL_WITH_SUBSET:
+ data['subset'] = [x.split('|')[0] for x in data['index']]
+ dump(data, result_file)
+ return report_acc(data)
+
+
+class AffordanceDataset(ImageMCQDataset):
+ DATASET_URL = {'A4Bench': "http://opencompass.openxlab.space/utils/VLMEval/A4Bench.tsv"}
+ DATASET_MD5 = {'A4Bench': "7c0dc90e8c03e67ff937f3abb4a3fffb"}
+
+ def build_prompt(self, line):
+ if isinstance(line, int):
+ line = self.data.iloc[line]
+
+ if self.meta_only:
+ tgt_path = toliststr(line['image_path'])
+ else:
+ tgt_path = self.dump_image(line)
+
+ question = line['question']
+ options = {
+ cand: line[cand]
+ for cand in string.ascii_uppercase
+ if cand in line and not pd.isna(line[cand])
+ }
+ options_prompt = 'Options:\n'
+ for key, item in options.items():
+ options_prompt += f'{key}. {item}\n'
+ hint = line['hint'] if ('hint' in line and not pd.isna(line['hint'])) else None
+ affordance_definition = (
+ """ Please read the following key points of Gibson's Affordance Theory before answering:
+ Gibson's Affordance Theory core principles:
+ 1. Core Definition
+ - "What the environment offers the animal for good or ill" (Gibson, 1979)
+ - Complementarity between animal's capacities and environmental properties
+ - Example: Horizontal rigid surface affords support for standing; cliff edge affords falling
+
+ 2. Key Characteristics
+ - Direct perception through ecological optics (e.g., texture gradients specify walkability)
+ - Functional relativity (e.g., knee-high surface affords sitting for adults but not children)
+ - Action possibilities multiplicity (e.g., stone as missile/paperweight/hammer)
+
+ 3. Fundamental Distinctions
+ - Affordance vs physical measurement (support measured relative to animal's weight)
+ - Invariant optical information (e.g., horizon specifies earth-sky separation)
+ - Niche as occupied affordance system (e.g., aquatic vs terrestrial niches)
+
+ 4. Theoretical Breakthroughs
+ - Rejecting subjective-objective dichotomy (air affords breathing & seeing simultaneously)
+ - Lawful misinformation cases (e.g., visual cliff experiment with glass extension)
+ - Embodied perception (posture/gravity constraints in surface perception)
+
+ 5. Ecological Evidence
+ - Animate vs inanimate distinction (infants' immediate perception of agency)
+ - Occlusion laws (peek-a-boo as concealment affordance learning)
+ - Tool-body extension (staff as arm extension for reaching/striking)"""
+ ) # noqa: E122
+ # 构建提示结构
+ prompt = ''
+ if hint is not None:
+ prompt += f'Hint: {hint}\n'
+ prompt += f'Concept: {affordance_definition}\n' # 插入定义
+ prompt += f'Question: {question}\n'
+ if len(options):
+ prompt += options_prompt
+ prompt += ("""Process multiple-choice questions under STRICT rules:
+ 1. Final answer MUST be valid Python list:
+ - Format examples: ['A'] or ['B','D']
+ - Output ONLY the answer list, NO explanations
+ 2. Mandatory requirements:
+ a. MUST determine question type (single/multi-select: ONLY ONE answer list
+ b. Uppercase letters in alphabetical order (A < B < C < D < E)
+ c. Use English single quotes and brackets
+ 3. Processing logic:
+ - All wrong: Return most probable single option (e.g., ['D'])
+ - Partial correct: Keep ONLY confirmed correct options
+ - Uncertain: Output highest-probability combination
+ 4. Format RULES:
+ - STRICTLY ONE list (no multiple answers like ['C'] and ['A','B'])
+ - NO non-list formats (e.g., 'C', A,B)
+ - NO empty lists (even if all options wrong)
+
+ Output: Answer list""")
+ msgs = []
+ if isinstance(tgt_path, list):
+ msgs.extend([dict(type='image', value=p) for p in tgt_path])
+ else:
+ msgs = [dict(type='image', value=tgt_path)]
+ msgs.append(dict(type='text', value=prompt))
+ return msgs
+
+ def is_match(self, row):
+ import ast
+ answer = ast.literal_eval(row['answer'])
+ prediction = ast.literal_eval(row['prediction'])
+ return sorted(answer) == sorted(prediction)
+
+ def evaluate(self, eval_file, **judge_kwargs):
+ from .utils.multiple_choice import (
+ report_acc, report_acc_MMT, report_acc_MMSci, mcq_circular_eval, mcq_vanilla_eval
+ )
+
+ suffix = eval_file.split('.')[-1]
+ model = judge_kwargs.get('model', 'exact_matching')
+ assert model in ['chatgpt-0125', 'exact_matching', 'gpt-4-0125']
+ name_str_map = {'chatgpt-0125': 'openai', 'gpt-4-0125': 'gpt4'}
+ name_str = name_str_map[model] if model in name_str_map else model
+
+ if model == 'exact_matching':
+ model = None
+ elif gpt_key_set():
+ model = build_judge(**judge_kwargs)
+ if not model.working():
+ warnings.warn('OPENAI API is not working properly, will use exact matching for evaluation')
+ warnings.warn(DEBUG_MESSAGE)
+ model = None
+ else:
+ warnings.warn('OPENAI_API_KEY is not set properly, will use exact matching for evaluation')
+ model = None
+
+ try:
+ df = pd.read_excel(eval_file)
+ except FileNotFoundError:
+ print(f"未找到文件:{eval_file}")
+ except Exception as e:
+ print(f"读取文件时出现错误:{e}")
+ else:
+ # 添加 match 列
+ df['match'] = df.apply(self.is_match, axis=1).astype(int)
+
+ # load split
+ dump(df, eval_file.replace(f'.{suffix}', f'_{name_str}_result.{suffix}'))
+ df = load(eval_file.replace(f'.{suffix}', f'_{name_str}_result.{suffix}'))
+
+ acc = df['match'].mean()
+ print(f"准确率(ACC): {acc * 100:.2f}%")
+
+ score_file = eval_file.replace(f'.{suffix}', '_acc.csv')
+ try:
+ acc_df = pd.DataFrame({'Accuracy': [acc]})
+ acc_df.to_csv(score_file, index=False)
+ except Exception as e:
+ print(f"保存准确率到 CSV 文件时出现错误: {e}")
+
+ selected_columns = ['index', 'question', 'prediction', 'match']
+ return df[selected_columns]
+
+
+class TreeBench(ImageMCQDataset):
+
+ TYPE = 'MCQ'
+
+ DATASET_URL = {
+ 'TreeBench': 'https://huggingface.co/datasets/HaochenWang/TreeBench/resolve/main/TreeBench.tsv',
+ }
+
+ def build_prompt(self, line):
+ if isinstance(line, int):
+ line = self.data.iloc[line]
+
+ if self.meta_only:
+ tgt_path = toliststr(line['image_path'])
+ else:
+ tgt_path = self.dump_image(line)
+
+ prompt = line['question']
+ if not line["category"] == "Perception/OCR":
+ prompt += "\nOptions:\n" + line["multi-choice options"]
+ prompt += "\nAnswer with the option's letter directly."
+
+ msgs = []
+ if isinstance(tgt_path, list):
+ msgs.extend([dict(type='image', value=p) for p in tgt_path])
+ else:
+ msgs = [dict(type='image', value=tgt_path)]
+ msgs.append(dict(type='text', value=prompt))
+
+ return msgs
+
+ # It returns a dictionary
+ @classmethod
+ def evaluate(self, eval_file, **judge_kwargs):
+ from .utils.treebench import get_acc
+ score_file = get_intermediate_file_path(eval_file, '_acc', 'csv')
+ try:
+ res = get_acc(eval_file)
+ dump(res, score_file)
+ return res
+ except:
+ return 0
+
+ def evaluate_box_iou(predict_str: str, target_instances: list) -> float:
+ pattern = r"(.*?)"
+ matches = re.findall(pattern, predict_str, re.DOTALL)
+
+ all_boxes = []
+
+ for match in matches:
+ box = match.strip()
+
+ coord_pattern = r'\[(\d+),(\d+),(\d+),(\d+)\]'
+ coord_match = re.match(coord_pattern, box)
+
+ if coord_match:
+ x1, y1, x2, y2 = map(int, coord_match.groups())
+
+ if x1 < x2 and y1 < y2:
+ # all_boxes.append([(x1 + x2) / 2, (y1 + y2) / 2, x2 - x1, y2 - y1])
+ all_boxes.append([x1, y1, x2, y2])
+
+ if len(all_boxes) == 0:
+ return 0
+
+ target_boxes = target_instances
+ if len(target_boxes) == 0:
+ return len(all_boxes) > 0
+
+ def calculate_average_iou(pred_boxes, target_boxes):
+ """
+ 计算每个目标框与预测框中 IoU 最大的预测框之间的平均 IoU。
+
+ 参数:
+ pred_boxes (List[List[float]]): 预测框列表,每个框为 [cx, cy, w, h]
+ target_boxes (List[List[float]]): 目标框列表,每个框为 [cx, cy, w, h]
+
+ 返回:
+ float: 匹配上的平均 IoU
+ """
+ def compute_iou(box1, box2):
+ """计算两个框之间的 IoU"""
+ x1_min, y1_min, x1_max, y1_max = box1
+ x2_min, y2_min, x2_max, y2_max = box2
+
+ inter_x_min = max(x1_min, x2_min)
+ inter_y_min = max(y1_min, y2_min)
+ inter_x_max = min(x1_max, x2_max)
+ inter_y_max = min(y1_max, y2_max)
+
+ inter_width = max(0, inter_x_max - inter_x_min)
+ inter_height = max(0, inter_y_max - inter_y_min)
+ inter_area = inter_width * inter_height
+
+ area1 = (x1_max - x1_min) * (y1_max - y1_min)
+ area2 = (x2_max - x2_min) * (y2_max - y2_min)
+
+ union_area = area1 + area2 - inter_area
+
+ return inter_area / union_area if union_area > 0 else 0.0
+
+ pred_coords = pred_boxes
+ target_coords = target_boxes
+
+ total_iou = 0.0
+ num_targets = len(target_boxes)
+
+ if num_targets == 0:
+ return 0.0
+
+ # 为每个目标框找到最大 IoU 的预测框
+ for t_coord in target_coords:
+ best_iou = 0.0
+ for p_coord in pred_coords:
+ iou = compute_iou(t_coord, p_coord)
+ if iou > best_iou:
+ best_iou = iou
+ total_iou += best_iou
+
+ return total_iou / num_targets
+
+ return calculate_average_iou(all_boxes, target_boxes)
+
+
+class CVQA(ImageMCQDataset):
+
+ @classmethod
+ def supported_datasets(cls):
+ return ['CVQA_LOC', 'CVQA_EN']
+
+ DATASET_URL = {
+ "CVQA_EN": (
+ "https://huggingface.co/datasets/timothycdc/"
+ "VLMEvalKit_CVQA/resolve/main/CVQA_ENG.tsv"
+ ),
+ "CVQA_LOC": (
+ "https://huggingface.co/datasets/timothycdc/"
+ "VLMEvalKit_CVQA/resolve/main/CVQA_LOC.tsv"
+ ),
+ }
+
+ DATASET_MD5 = {
+ "CVQA_EN": "f49ad8ad39dbc4208ea8985a3ca00804",
+ "CVQA_LOC": "b51dcf2820cb292aa5cb3430dd7d5049",
+ }
+
+ def build_prompt(self, line):
+ if isinstance(line, int):
+ line = self.data.iloc[line]
+
+ if self.meta_only:
+ tgt_path = toliststr(line['image_path'])
+ else:
+ tgt_path = self.dump_image(line)
+
+ question = line['question']
+ options = {
+ cand: line[cand]
+ for cand in string.ascii_uppercase
+ if cand in line and not pd.isna(line[cand])
+ }
+ options_prompt = 'Options:\n'
+ for key, item in options.items():
+ options_prompt += f'{key}. {item}\n'
+
+ prompt = f'Question: {question}\n'
+ if len(options):
+ prompt += options_prompt
+ prompt += (
+ 'Select the best answer to the above multiple-choice question '
+ 'based on the image. Respond with only the letter of the '
+ 'correct option (A, B, C, or D).\n'
+ 'The best answer is: '
+ )
+
+ msgs = []
+ if isinstance(tgt_path, list):
+ msgs.extend([dict(type='image', value=p) for p in tgt_path])
+ else:
+ msgs = [dict(type='image', value=tgt_path)]
+ msgs.append(dict(type='text', value=prompt))
+
+ return msgs
+
+
+class TopViewRS(ImageMCQDataset):
+ DATASET_URL = {
+ 'TopViewRS': 'https://opencompass.openxlab.space/utils/VLMEval/TopViewRS.tsv'
+ }
+
+ DATASET_MD5 = {
+ 'TopViewRS': '5669bc122457979dd2ac3b69b5dc1622'
+ }
+
+ def evaluate(self, eval_file, **judge_kwargs):
+ from .utils.multiple_choice import eval_vanilla, report_topviewrs_acc
+ from ..utils import track_progress_rich
+ from ..smp import load, dump, gpt_key_set
+ from collections import defaultdict
+ import numpy as np
+ import pandas as pd
+ import string
+ import warnings
+ import os.path as osp
+
+ def mcq_topviewrs_eval(model, data, meta, nproc, result_file, dataset_name=None):
+ result = {}
+ if osp.exists(result_file):
+ result = load(result_file)
+ answer_map = {i: c for i, c in zip(meta['index'], meta['answer'])}
+
+ data = data[data['index'].isin(answer_map)]
+ data['GT'] = [answer_map[idx] for idx in data['index']]
+ items = []
+
+ for i in range(len(data)):
+ item = data.iloc[i]
+ if item['index'] not in result:
+ items.append(item)
+
+ tups = [dict(model=model, item=x, dataset_name=dataset_name) for x in items]
+ keys = [x['index'] for x in items]
+ if len(tups):
+ res = track_progress_rich(eval_vanilla, tups, nproc=nproc, chunksize=nproc, save=result_file, keys=keys)
+ result = load(result_file)
+ for k, v in zip(keys, res):
+ if k not in result:
+ result[k] = v
+
+ data['hit'] = [result[i]['hit'] for i in data['index']]
+ data['log'] = [result[i]['log'] for i in data['index']]
+
+ def extract_letter(log_text):
+ if not log_text:
+ return None
+ if "[" in log_text and "]" in log_text:
+ return log_text[log_text.index("[") + 1:log_text.index("]")]
+ return log_text.rstrip(". ").split()[-1]
+
+ def partial_match_score(row):
+ """Calculate PM score using formula: |intersection| / max(|labels|, |predictions|)"""
+ model_letter = extract_letter(row['log'])
+ correct_letter = row['GT']
+
+ if not model_letter:
+ return 0.0
+
+ # Get option texts
+ model_option = row.get(model_letter, '')
+ correct_option = row.get(correct_letter, '')
+
+ if not model_option or not correct_option:
+ return 0.0
+
+ # Get word sets
+ model_words = set(str(model_option).lower().split())
+ correct_words = set(str(correct_option).lower().split())
+
+ # PM formula: |labels ∩ predictions| / max(|labels|, |predictions|)
+ intersection = len(model_words.intersection(correct_words))
+ max_len = max(len(model_words), len(correct_words))
+
+ if max_len == 0:
+ return 0.0
+
+ pm_score = intersection / max_len
+ return pm_score
+
+ # Apply partial matching - returns float values (0.0 to 1.0)
+ data['partial_match'] = [partial_match_score(row) for _, row in data.iterrows()]
+
+ if 'GT' in data:
+ data.pop('GT')
+ return data
+ nproc = judge_kwargs.pop('nproc', 4)
+ suffix = eval_file.split('.')[-1]
+ model = judge_kwargs.get('model', 'exact_matching')
+ assert model in ['chatgpt-0125', 'exact_matching', 'gpt-4-0125']
+ name_str_map = {'chatgpt-0125': 'openai', 'gpt-4-0125': 'gpt4'}
+ name_str = name_str_map[model] if model in name_str_map else model
+
+ if model == 'exact_matching':
+ model = None
+ elif gpt_key_set():
+ model = build_judge(**judge_kwargs)
+ if not model.working():
+ warnings.warn('OPENAI API is not working properly, will use exact matching for evaluation')
+ model = None
+ else:
+ warnings.warn('OPENAI_API_KEY is not set properly, will use exact matching for evaluation')
+ model = None
+
+ result_file = eval_file.replace(f'.{suffix}', f'_{name_str}_result.pkl')
+
+ data = load(eval_file)
+ data = data.sort_values(by='index')
+ data['prediction'] = [str(x) for x in data['prediction']]
+
+ for k in data.keys():
+ data[k.lower() if k not in list(string.ascii_uppercase) else k] = data.pop(k)
+
+ meta = self.data
+ meta_q_map = {x: y for x, y in zip(meta['index'], meta['question'])}
+ data_map = {x: y for x, y in zip(data['index'], data['question'])}
+
+ for k in data_map:
+ assert k in meta_q_map, (
+ f'eval_file should be the same as or a subset of dataset {self.dataset_name}'
+ )
+ data = mcq_topviewrs_eval(model, data, meta, nproc, result_file, self.dataset_name)
+ eval_record = eval_file.replace(f'.{suffix}', f'_{name_str}_result.{suffix}')
+ dump(data, eval_record)
+ data = load(eval_record)
+ acc = report_topviewrs_acc(data)
+ score_file = eval_file.replace(f'.{suffix}', '_acc.csv')
+ dump(acc, score_file)
+ return acc
+
+class ShapeGrid(ImageBaseDataset):
+ TYPE = 'ShapeGrid'
+ # TYPE = 'MCQ'
+ DATASET_URL = {
+ 'ShapeGrid_area': 'ShapeGrid_area.tsv',
+ 'ShapeGrid_caption': 'ShapeGrid_caption.tsv',
+ 'ShapeGrid_caption_letter': 'ShapeGrid_caption_letter.tsv',
+ 'ShapeGrid_count': 'ShapeGrid_count.tsv',
+ 'ShapeGrid_count_letter': 'ShapeGrid_count_letter.tsv',
+ 'ShapeGrid_dis': 'ShapeGrid_dis.tsv',
+ 'ShapeGrid_dis_letter': 'ShapeGrid_dis_letter.tsv',
+ 'ShapeGrid_loc': 'ShapeGrid_loc.tsv',
+ 'ShapeGrid_loc_letter': 'ShapeGrid_loc_letter.tsv',
+ 'ShapeGrid_sudoku': 'ShapeGrid_sudoku.tsv',
+ 'ShapeGrid_sudoku_letter': 'ShapeGrid_sudoku_letter.tsv',
+ 'ShapeGrid_sudoku_mutiobj': 'ShapeGrid_sudoku_mutiobj.tsv',
+ }
+ DATASET_MD5 = {
+ 'ShapeGrid_area': '748aa6d4aa9d4de798306a63718455e3',
+ 'ShapeGrid_caption': '748aa6d4aa9d4de798306a63718455e3',
+ 'ShapeGrid_caption_letter': '748aa6d4aa9d4de798306a63718455e3',
+ 'ShapeGrid_count': '748aa6d4aa9d4de798306a63718455e3',
+ 'ShapeGrid_count_letter': '748aa6d4aa9d4de798306a63718455e3',
+ 'ShapeGrid_dis': '748aa6d4aa9d4de798306a63718455e3',
+ 'ShapeGrid_dis_letter': '748aa6d4aa9d4de798306a63718455e3',
+ 'ShapeGrid_loc': '748aa6d4aa9d4de798306a63718455e3',
+ 'ShapeGrid_loc_letter': '748aa6d4aa9d4de798306a63718455e3',
+ 'ShapeGrid_sudoku': '748aa6d4aa9d4de798306a63718455e3',
+ 'ShapeGrid_sudoku_letter': '748aa6d4aa9d4de798306a63718455e3',
+ 'ShapeGrid_sudoku_mutiobj': '748aa6d4aa9d4de798306a63718455e3',
+ }
+
+ # It returns a DataFrame
+
+ def evaluate(self, eval_file):
+ from .utils.shapegrid import ShapeGrid_acc
+
+ suffix = eval_file.split('.')[-1]
+ storage = eval_file.replace(f'.{suffix}', '_ShapeGrid.xlsx')
+ if not osp.exists(storage):
+ data = load(eval_file)
+ dump(data, storage)
+
+ score = ShapeGrid_acc(storage)
+ score_pth = storage.replace('.xlsx', '_score.csv')
+ dump(score, score_pth)
+ return score
+
+ # SHLdata adopts a custom prompt
+ def build_prompt(self, line):
+ SUFFIX_FOR_VQA = {
+ "yes_no": "\nPlease answer Yes or No.",
+ "multiple_choice": "\nAnswer with the option's letter from the given choices directly.",
+ "content_description": "\nOnly answer with the letters or shapes you see in the image",
+ "counting": "\nOnly answer with the number of shapes or letters mentioned in the question, using Arabic numerals.",
+ # "sudoku": 'In which row and column of the 3*3 grid is the bear or car located? For example:(0,0) = top-left cell,(0,1) = top-center cell,(0,2) = top-right cell,(1,0) = middle-left cell,(1,1) = center cell,(1,2) = middle-right cell,(2,0) = bottom-left cell,(2,1) = bottom-center cell,(2,2) = bottom-right cell.\n '
+ "sudoku": "Answer with the option's letter from the given choices directly.The image is a 3*3 grid.\n "
+ }
+ if isinstance(line, int):
+ line = self.data.iloc[line]
+
+ if self.meta_only:
+ tgt_path = toliststr(line['image_path'])
+ else:
+ tgt_path = self.dump_image(line)
+ question = line['question']
+ if line["category"] == "location":
+ prompt = f'{question} {SUFFIX_FOR_VQA["multiple_choice"]}'
+ elif line["category"] == "content":
+ prompt = f'{question} {SUFFIX_FOR_VQA["content_description"]}'
+ elif line["category"] == "count":
+ prompt = f'{question} {SUFFIX_FOR_VQA["counting"]}'
+ elif line["category"] == "sudoku_location":
+ prompt = f'{question}{SUFFIX_FOR_VQA["sudoku"]}'
+ # prompt = f'{question}'
+ # prompt = f'{SUFFIX_FOR_VQA["sudoku"]}{question}'
+ else:
+ prompt = f'{question} {SUFFIX_FOR_VQA["yes_no"]}'
+ msgs = []
+ if isinstance(tgt_path, list):
+ msgs.extend([dict(type='image', value=p) for p in tgt_path])
+ else:
+ msgs = [dict(type='image', value=tgt_path)]
+ msgs.append(dict(type='text', value=prompt))
+
+ return msgs
\ No newline at end of file
diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/m4bench.py b/VLMEvalKit-sudoku/vlmeval/dataset/m4bench.py
new file mode 100644
index 0000000000000000000000000000000000000000..2695c043ca0fade490abcb8087f925ae00f30314
--- /dev/null
+++ b/VLMEvalKit-sudoku/vlmeval/dataset/m4bench.py
@@ -0,0 +1,193 @@
+import os
+import re
+from tqdm import tqdm
+import pandas as pd
+
+from os import path as osp
+from .image_base import ImageBaseDataset
+from .utils import build_judge, DEBUG_MESSAGE
+from ..smp import decode_base64_to_image_file, load, dump, get_intermediate_file_path
+FAIL_MSG = 'Failed to obtain answer via API.'
+
+
+class M4Bench(ImageBaseDataset):
+ """
+ Dataset class for M4Bench, handling single and dual image inputs.
+ """
+ TYPE = 'M4Bench'
+
+ DATASET_URL = {
+ "State_Invariance": "https://huggingface.co/datasets/Anonymous8976/M4Bench/resolve/main/State_Invariance.tsv", # noqa: E501
+ "State_Comparison": "https://huggingface.co/datasets/Anonymous8976/M4Bench/resolve/main/State_Comparison.tsv", # noqa: E501
+ "Spatial_Perception": "https://huggingface.co/datasets/Anonymous8976/M4Bench/resolve/main/Spatial_Perception.tsv", # noqa: E501
+ "Instance_Comparison": "https://huggingface.co/datasets/Anonymous8976/M4Bench/resolve/main/Instance_Comparison.tsv", # noqa: E501
+ "Detailed_Difference": "https://huggingface.co/datasets/Anonymous8976/M4Bench/resolve/main/Detailed_Difference.tsv" # noqa: E501
+ }
+
+ DATASET_MD5 = {
+ "State_Invariance": "ad9723d478d4696dfc3b18bcaeca89b6",
+ "State_Comparison": "41999997360a88e6e388b9a5438a45eb",
+ "Spatial_Perception": "7059e29d15ad4379b6f0c0f1801dafe5",
+ "Instance_Comparison": "9a7f282d0a092b617147a36693df3461",
+ "Detailed_Difference": "f1cd60c1c1144768cd978efce5ba93a8"
+ }
+
+ def build_prompt(self, line):
+ """
+ Builds a multimodal prompt for the given data line.
+ """
+ HF_HEADER = "https://huggingface.co/datasets/Anonymous8976/M4Bench/resolve/main/data/" # noqa: E501
+
+ if isinstance(line, int):
+ line = self.data.iloc[line]
+
+ image1_base64 = line.get('image1', '')
+ image2_base64 = line.get('image2', '')
+ image1_url = line.get('image1_path', '')
+ image2_url = line.get('image2_path', '')
+
+ msgs = []
+
+ if image1_base64 and image2_base64 and image1_url and image2_url:
+ image1_base_path = image1_url.replace(HF_HEADER, '')
+ image1_local_path = osp.join(self.img_root, image1_base_path)
+
+ image2_base_path = image2_url.replace(HF_HEADER, '')
+ image2_local_path = osp.join(self.img_root, image2_base_path)
+
+ if not osp.exists(image1_local_path) or not osp.exists(image2_local_path):
+ decode_base64_to_image_file(image1_base64, image1_local_path)
+ decode_base64_to_image_file(image2_base64, image2_local_path)
+
+ # If both images are in base64 format
+ msgs = [
+ dict(type='image', value=image1_local_path),
+ dict(type='image', value=image2_local_path)
+ ]
+ elif image1_url and image2_url:
+ # If both images are URLs
+ msgs = [
+ dict(type='image', value=image1_url),
+ dict(type='image', value=image2_url)
+ ]
+ else:
+ raise ValueError("Both images must be provided either as base64 or URLs.") # noqa: E501
+
+ query = line['query']
+
+ msgs.append(dict(type='text', value=query))
+ return msgs
+
+ def evaluate(self, eval_file, **judge_kwargs):
+ """
+ Evaluates the model predictions against the ground truth.
+ """
+ results_df = load(eval_file)
+
+ dataset_name = None
+ for name in self.DATASET_URL:
+ if name in eval_file:
+ dataset_name = name
+ break
+
+ if dataset_name is None:
+ raise ValueError(
+ f"Could not determine dataset name from eval_file path: {eval_file}") # noqa: E501
+
+ # # Load ground truth data
+ # gt_file = get_cache_path(self.DATASET_URL[dataset_name])
+ # gt_df = pd.read_csv(gt_file, sep='\t', on_bad_lines='warn')
+
+ # # Merge predictions with ground truth
+ df = results_df.copy()
+
+ def get_ans(s):
+ s = str(s)
+ match = re.search(r'^\s*\(([A-Z])\)', s)
+ if match:
+ return match.group(1)
+
+ options = list('ABCDEFGHIJKLMNOPQRSTUVWXYZ')
+ for op in options:
+ if s.startswith(op):
+ return op
+ return None
+
+ if judge_kwargs:
+ try:
+ # Use LLM as a judge to parse the prediction
+ judge = build_judge(**judge_kwargs)
+
+ # Prepare data for the judge
+ def extract_question(q):
+ return q.split('\n(')[0]
+
+ def extract_options(q):
+ parts = q.split('\n(')
+ return '\n('.join(parts[1:]) if len(parts) > 1 else ''
+
+ df['question_text'] = df['query'].apply(extract_question)
+ df['options_text'] = df['query'].apply(extract_options)
+
+ prompt_tmpl = (
+ 'You are an AI assistant who will help me to match '
+ 'an answer with several options of a single-choice question. ' # noqa: E501
+ 'You are provided with a question, several options, and an answer, ' # noqa: E501
+ 'and you need to find which option is most similar to the answer. ' # noqa: E501
+ 'If the meaning of all options are significantly different from the answer, output Z. ' # noqa: E501
+ 'Your should output a single uppercase character in A, B, C, D (if they are valid options), and Z. \n' # noqa: E501
+ 'Example 1: \n'
+ 'Question: What is the main object in image?\nOptions: A. teddy bear B. rabbit C. cat D. dog\n' # noqa: E501
+ 'Answer: a cute teddy bear\nYour output: A\n'
+ 'Example 2: \n'
+ 'Question: What is the main object in image?\nOptions: A. teddy bear B. rabbit C. cat D. dog\n' # noqa: E501
+ 'Answer: Spider\nYour output: Z\n'
+ 'Example 3: \n'
+ 'Question: {question}\nOptions: {options}\nAnswer: {prediction}\nYour output: ' # noqa: E501
+ )
+
+ prompts = [
+ prompt_tmpl.format(
+ question=row['question_text'],
+ options=row['options_text'],
+ prediction=row['prediction']
+ )
+ for _, row in tqdm(df.iterrows(), total=len(df), desc="Processing rows")
+ ]
+ parsed_pred = []
+
+ for prompt in tqdm(prompts, desc="Calling judge"):
+ input_msg = [
+ {
+ "role": "user",
+ "content": [
+ {"type": "text", "value": prompt}
+ ]
+ }
+ ]
+
+ _, judge_output, res = judge.generate_inner(input_msg)
+ judge_ans = get_ans(judge_output)
+ parsed_pred.append(judge_ans)
+ df['parsed_pred'] = pd.Series(parsed_pred)
+
+ except Exception as e:
+ print(f"Error during judge evaluation: {e}")
+ print(DEBUG_MESSAGE)
+ df['parsed_pred'] = df['prediction'].apply(get_ans)
+ else:
+ # Fallback to simple parsing if no judge is provided
+ df['parsed_pred'] = df['prediction'].apply(get_ans)
+
+ # Calculate score
+ df['score'] = (df['parsed_pred'] == df['response'])
+
+ # Save detailed results
+ details_file = get_intermediate_file_path(eval_file, '_details')
+ dump(df, details_file)
+
+ # Calculate and return accuracy
+ acc = df['score'].mean() * 100
+ results = {'acc': acc, 'details': details_file}
+
+ return results
diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/miabench.py b/VLMEvalKit-sudoku/vlmeval/dataset/miabench.py
new file mode 100644
index 0000000000000000000000000000000000000000..c33f3510b888cc6c2c129385109edb478362ca72
--- /dev/null
+++ b/VLMEvalKit-sudoku/vlmeval/dataset/miabench.py
@@ -0,0 +1,166 @@
+import json
+import os
+
+import pandas as pd
+
+from .image_base import ImageBaseDataset
+from ..smp import *
+from .utils import build_judge, DEBUG_MESSAGE
+from ..utils import track_progress_rich
+
+
+def generate_prompt(d):
+ question = d['question']
+ weights = eval(d['component_weight'])
+ components = eval(d['components'])
+ num_of_component = int(d['num_of_component'])
+ response = d['prediction']
+
+ if num_of_component == 1:
+ components = f"The first component is: '{components[0]}'. "
+ score = f"The first component is worth: {weights[0]} scores. "
+ elif num_of_component == 2:
+ components = f"The first component is: '{components[0]}', and the second component is '{components[1]}'. "
+ score = f"The first and second component is each worth {weights[0]} and {weights[1]} scores. "
+ elif num_of_component == 3:
+ components = (
+ f"The first component is: '{components[0]}', and the second component is '{components[1]}', "
+ f"and the third component is '{components[2]}'. "
+ )
+ score = (
+ "The first, second, and third component is each worth "
+ f"{weights[0]}, {weights[1]}, and {weights[2]} scores."
+ )
+ elif num_of_component == 4:
+ components = (
+ f"The first component is: '{components[0]}', and the second component is '{components[1]}', "
+ f"and the third component is '{components[2]}', and the fourth component is '{components[3]}'. "
+ )
+ score = (
+ "The first, second, third, and fourth component is each worth "
+ f"{weights[0]}, {weights[1]}, {weights[2]}, and {weights[3]} scores."
+ )
+ elif num_of_component == 5:
+ components = (
+ f"The first component is: '{components[0]}', and the second component is '{components[1]}', "
+ f"and the third component is '{components[2]}', and the fourth component is '{components[3]}', "
+ f"and the fifth component is '{components[4]}'. "
+ )
+ score = (
+ "The first, second, third, fourth, and fifth component is each worth "
+ f"{weights[0]}, {weights[1]}, {weights[2]}, {weights[3]}, and {weights[4]} scores."
+ )
+
+ return (
+ "Here is an instruction for a multimodal LLM: '"
+ f"{question}"
+ "'. You need to grade if the response from the model follows each component of the instruction. "
+ f"{components}"
+ "The response is: '"
+ f"{response}"
+ "'. You need to score the response and be strict. The total score ranges from 0 to 10, "
+ "depending on if the response follows the instruction. "
+ f"{score}"
+ "List scores of each component, and the total score in one sentence in this format: "
+ "score of component 1: x/2, score of component 2: y/8, total score: z/10. Then explain your reasons."
+ )
+
+
+def process_rawscore(component_type, raw_score):
+ first_sentence = raw_score.split('.')[0].split(',')
+ score_dict = {}
+ for i in range(len(first_sentence) - 1):
+ score_ = first_sentence[i].split(':')[1][1:].split('/')
+ score = int(score_[0]) / int(score_[1])
+ score_dict[component_type[i]] = score
+ total_score_ = first_sentence[i + 1].split(':')[1][1:].split('/')
+ total_score = int(total_score_[0]) / int(total_score_[1])
+ score_dict['total_score'] = total_score
+ return score_dict
+
+
+def get_score_dict(data, score_raw):
+ cat_score_dict = {}
+ for i in range(len(data)):
+ try:
+ cmp = data['component_type'][i][2:-2]
+ cmp_list = cmp.split('\', \'')
+ score_dict = process_rawscore(cmp_list, score_raw[i])
+ for key, val in score_dict.items():
+ if key not in cat_score_dict.keys():
+ cat_score_dict[key] = [val]
+ else:
+ cat_score_dict[key].append(val)
+ except:
+ pass
+ cat_score_dict_average = {}
+ for key, val in cat_score_dict.items():
+ cat_score_dict_average[key] = sum(val) / len(val)
+ return cat_score_dict_average
+
+
+class MIABench(ImageBaseDataset):
+ TYPE = 'VQA'
+
+ DATASET_URL = {
+ 'MIA-Bench': 'https://opencompass.openxlab.space/utils/VLMEval/Mia-Bench.tsv',
+ }
+ DATASET_MD5 = {
+ 'MIA-Bench': '0b9de595f4dd40af18a69b94d89aba82',
+ }
+
+ @classmethod
+ def evaluate(self, eval_file, **judge_kwargs):
+ judge_name = judge_kwargs.pop('model', 'gpt-4o')
+
+ model = build_judge(model=judge_name, **judge_kwargs)
+
+ storage = get_intermediate_file_path(eval_file, f'_{judge_name}') # noqa: F841
+ tmp_file = get_intermediate_file_path(eval_file, f'_{judge_name}', 'pkl') # noqa: F841
+ nproc = judge_kwargs.pop('nproc', 4) # noqa: F841
+
+ if not osp.exists(storage):
+ data = load(eval_file)
+ num_samples = len(data)
+ lines = [data.loc[i] for i in range(num_samples)]
+ prompts = [generate_prompt(line) for line in lines]
+ org_data = MIABench('MIA-Bench').data
+ img_map = {x: y for x, y in zip(org_data['index'], org_data['image'])}
+ image_b64 = [img_map[idx] for idx in data['index']]
+ indices = list(data['index'])
+ mm_messages = [
+ dict(message=[
+ dict(type='text', value=prompt),
+ dict(type='image', value=f'data:image/jpeg;base64,{b64}')
+ ])
+ for prompt, b64 in zip(prompts, image_b64)
+ ]
+
+ res = {}
+ if osp.exists(tmp_file):
+ res = load(tmp_file)
+
+ jobs = {k: v for k, v in zip(indices, mm_messages) if k not in res}
+ job_keys = list(jobs.keys())
+ job_vals = [jobs[k] for k in job_keys]
+
+ resps = track_progress_rich(
+ model.generate,
+ job_vals,
+ nproc=nproc,
+ chunksize=nproc,
+ keys=job_keys,
+ save=tmp_file,
+ )
+ for k, resp in zip(job_keys, resps):
+ res[k] = resp
+ data['score_raw'] = [res[idx] for idx in indices]
+ dump(data, storage)
+
+ goresult = load(storage)
+ results = get_score_dict(goresult, goresult['score_raw'])
+ result_pth = get_intermediate_file_path(storage, '_score', 'csv')
+ results_pd = pd.DataFrame.from_dict(list(results.items()))
+ dump(results_pd, result_pth)
+
+ return results
diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/mmbench_video.py b/VLMEvalKit-sudoku/vlmeval/dataset/mmbench_video.py
new file mode 100644
index 0000000000000000000000000000000000000000..f2ada230c16e62d7610e1e98f59f788a6c65400f
--- /dev/null
+++ b/VLMEvalKit-sudoku/vlmeval/dataset/mmbench_video.py
@@ -0,0 +1,257 @@
+from huggingface_hub import snapshot_download
+from ..smp import *
+from ..smp.file import get_intermediate_file_path, get_file_extension
+from .video_base import VideoBaseDataset
+from .utils import build_judge, DEBUG_MESSAGE
+from ..utils import track_progress_rich
+
+
+FAIL_MSG = 'Failed to obtain answer via API.'
+
+
+def unwrap_hf_pkl(pth, suffix='.mp4'):
+ base_dir = os.path.join(pth, 'video_pkl/')
+ target_dir = os.path.join(pth, 'video/')
+ pickle_files = [os.path.join(base_dir, file) for file in os.listdir(base_dir)]
+ pickle_files.sort()
+
+ if not os.path.exists(target_dir):
+ os.makedirs(target_dir, exist_ok=True)
+ for pickle_file in pickle_files:
+ with open(pickle_file, 'rb') as file:
+ video_data = pickle.load(file)
+ # For each video file in the pickle file, write its contents to a new mp4 file
+ for video_name, video_content in video_data.items():
+ output_path = os.path.join(target_dir, f'{video_name}{suffix}')
+ with open(output_path, 'wb') as output_file:
+ output_file.write(video_content)
+ print('The video file has been restored and stored from the pickle file.')
+ else:
+ print('The video file already exists.')
+
+
+class MMBenchVideo(VideoBaseDataset):
+
+ MD5 = '98f7df3eb1007fc375ea6fe88a98e2ff'
+ SYS = 'You are an AI assistant responsible for answering questions about videos.'
+ FRAMES_TMPL_PACK = """
+You will be provided with {} separate frames uniformly sampled from a video, \
+the frames are provided in chronological order of the video.
+Please analyze these images and provide the answer / answers to the \
+following question / questions about the video content.
+If multiple questions are provided (with indices I1, I2, I3, ...), \
+you should organize your answers in the following json format:
+{{
+ 'I1': 'Answer to Question I1',
+ 'I2': 'Answer to Question I2',
+ ...
+}}
+Otherwise, please directly reply with your response to the only question.
+Even if the information in these separate frames is not enough to give an answer,
+PLEASE GIVE A RESPONSE TO EACH OF THE QUESTIONS IN THE FORMAT DESCRIBED ABOVE.
+"""
+
+ FRAMES_TMPL_NOPACK = """
+You will be provided with {} separate frames uniformly sampled from a video, \
+the frames are provided in chronological order of the video.
+Please analyze these images and provide the answer to the question about the video content.
+Please directly reply with your response to the only question.
+"""
+
+ TYPE = 'Video-VQA'
+
+ def __init__(self, dataset='MMBench-Video', pack=False, nframe=0, fps=-1):
+ super().__init__(dataset=dataset, pack=pack, nframe=nframe, fps=fps)
+
+ @classmethod
+ def supported_datasets(cls):
+ return ['MMBench-Video']
+
+ def prepare_dataset(self, dataset_name='MMBench-Video', repo_id='opencompass/MMBench-Video'):
+ def check_integrity(pth):
+ data_file = osp.join(pth, f'{dataset_name}.tsv')
+ if md5(data_file) != self.MD5:
+ return False
+ data = load(data_file)
+ for video_pth in data['video_path']:
+ if not osp.exists(osp.join(pth, video_pth)):
+ return False
+ return True
+
+ cache_path = get_cache_path(repo_id)
+ if cache_path is not None and check_integrity(cache_path):
+ dataset_path = cache_path
+ else:
+ if modelscope_flag_set():
+ from modelscope import dataset_snapshot_download
+ dataset_path = dataset_snapshot_download(dataset_id=repo_id)
+ else:
+ dataset_path = snapshot_download(repo_id=repo_id, repo_type='dataset')
+ unwrap_hf_pkl(dataset_path)
+ self.video_path = osp.join(dataset_path, 'video/')
+ data_file = osp.join(dataset_path, f'{dataset_name}.tsv')
+
+ return dict(data_file=data_file, root=osp.join(dataset_path, 'video'))
+
+ def build_prompt_pack(self, line):
+ if isinstance(line, int):
+ assert line < len(self)
+ video = self.videos[line]
+ elif isinstance(line, pd.Series):
+ video = line['video']
+ elif isinstance(line, str):
+ video = line
+
+ frames = self.save_video_frames(video)
+ sub = self.data[self.data['video'] == video]
+ sys_prompt = self.SYS + self.FRAMES_TMPL_PACK.format(len(frames))
+ message = [dict(type='text', value=sys_prompt)]
+ for im in frames:
+ message.append(dict(type='image', value=im))
+ nq = len(sub)
+ prompt = 'Questions: \n{}\nAnswers: \n'
+ qs = {int(sub.iloc[i]['index']): sub.iloc[i]['question'] for i in range(nq)}
+ prompt = prompt.format(json.dumps(qs))
+ message.append(dict(type='text', value=prompt))
+ return message
+
+ def build_prompt_nopack(self, line, video_llm):
+ if isinstance(line, int):
+ assert line < len(self)
+ line = self.data.iloc[line]
+ if video_llm:
+ question = line['question']
+ prefix, video_idx_path = os.path.split(line['video_path'])
+ message = [dict(type='text', value=question)]
+ message.append(dict(type='video', value=os.path.join(self.video_path, video_idx_path)))
+ return message
+ else:
+ frames = self.save_video_frames(line['video'])
+ sys_prompt = self.FRAMES_TMPL_NOPACK.format(len(frames))
+ message = [dict(type='text', value=sys_prompt)]
+ for im in frames:
+ message.append(dict(type='image', value=im))
+ prompt = 'Question: {}\nAnswer: '.format(line['question'])
+ message.append(dict(type='text', value=prompt))
+ return message
+
+ def build_prompt(self, line, video_llm):
+ if self.pack and not video_llm:
+ return self.build_prompt_pack(line)
+ else:
+ return self.build_prompt_nopack(line, video_llm)
+
+ @staticmethod
+ def remove_side_quote(s, syms=[',', '"', "'"]):
+ if np.all([x in syms for x in s]):
+ return ''
+ while s[0] in syms:
+ s = s[1:]
+ while s[-1] in syms:
+ s = s[:-1]
+ return s
+
+ @staticmethod
+ def robust_json_load(s):
+ try:
+ jsons = list(extract_json_objects(s))
+ assert len(jsons) == 1
+ return jsons[0]
+ except:
+ if '{' in s and s.find('{') == s.rfind('{'):
+ sub_str = s[s.find('{') + 1:].strip()
+ lines = sub_str.split('\n')
+ res = {}
+ for l in lines:
+ l = l.strip()
+ if ': ' in l:
+ key = l.split(': ')[0].strip()
+ val = l.split(': ')[1].strip()
+ key = MMBenchVideo.remove_side_quote(key)
+ val = MMBenchVideo.remove_side_quote(val)
+ if len(key) and len(val):
+ res[key] = val
+ return res
+ return None
+
+ def load_pack_answers(self, data_raw):
+ vstats = defaultdict(lambda: 0)
+ data = defaultdict(lambda: {})
+
+ for k in data_raw:
+ ans = data_raw[k].strip()
+ if FAIL_MSG in ans:
+ vstats['GEN_FAIL'] += 1
+ continue
+ res = self.robust_json_load(ans)
+ if res is not None:
+ data[k] = res
+ vstats['PARSE_OK'] += 1
+ else:
+ vstats['PARSE_FAIL'] += 1
+
+ # return data
+ meta = cp.deepcopy(self.data)
+ lt = len(meta)
+ prediction = []
+ for i in range(lt):
+ line = meta.iloc[i]
+ vid = line['video']
+ idx = str(line['index'])
+ prediction.append(data[vid][idx] if idx in data[vid] else None)
+ meta['prediction'] = prediction
+ vstats['VALIDQ'] = len([x for x in prediction if x is not None])
+ vstats['INVALIDQ'] = len([x for x in prediction if x is None])
+ return meta, vstats
+
+ # It returns a dictionary
+ @classmethod
+ def evaluate(self, eval_file, **judge_kwargs):
+ from .utils.mmbench_video import get_dimension_rating, system_prompt, build_prompt
+
+ assert get_file_extension(eval_file) in ['xlsx', 'json', 'tsv'], 'data file should be an supported format (xlsx/json/tsv) file' # noqa: E501
+ judge = judge_kwargs['model']
+ nproc = judge_kwargs.pop('nproc', 4)
+
+ tmp_file = get_intermediate_file_path(eval_file, f'_{judge}_tmp', 'pkl')
+ tgt_file = get_intermediate_file_path(eval_file, f'_{judge}_rating', 'json')
+ score_file = get_intermediate_file_path(eval_file, f'_{judge}_score')
+
+ model = build_judge(system_prompt=system_prompt, **judge_kwargs)
+ assert model.working(), 'MMBench-Video evaluation requires a working OPENAI API\n' + DEBUG_MESSAGE
+
+ if not osp.exists(score_file):
+ res = {} if not osp.exists(tmp_file) else load(tmp_file)
+ res = {k: v for k, v in res.items() if model.fail_msg not in v}
+
+ data = load(eval_file)
+ data_un = data[~data['index'].isin(res)]
+ data_un = data_un[~pd.isna(data_un['prediction'])]
+ lt = len(data_un)
+ prompts = [build_prompt(data_un.iloc[i]) for i in range(lt)]
+ indices = [data_un.iloc[i]['index'] for i in range(lt)]
+
+ if len(prompts):
+ _ = track_progress_rich(
+ model.generate,
+ prompts,
+ keys=indices,
+ save=tmp_file,
+ nproc=nproc,
+ chunksize=nproc
+ )
+ score_map = load(tmp_file)
+ data['score'] = [score_map[idx] if idx in score_map else -1 for idx in data['index']]
+ rejected = [x for x in score_map.values() if FAIL_MSG in x]
+ data['score'] = [int(x) if istype(x, int) else -1 for x in data['score']]
+ print(
+ f'Among {len(data)} questions, failed to obtain prediction for {len(data) - len(score_map)} questions, '
+ f'failed to obtain the score for another {len(rejected)} questions. '
+ f'Those questions will be counted as 0 score in ALL rating, and will not be counted in VALID rating.'
+ )
+
+ dump(data, score_file)
+
+ rating = get_dimension_rating(score_file)
+ dump(rating, tgt_file)
+ return rating
diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/mmifeval.py b/VLMEvalKit-sudoku/vlmeval/dataset/mmifeval.py
new file mode 100644
index 0000000000000000000000000000000000000000..6dcfd1f3849f2754a0340618c1cd667f8eca4ba9
--- /dev/null
+++ b/VLMEvalKit-sudoku/vlmeval/dataset/mmifeval.py
@@ -0,0 +1,483 @@
+# flake8: noqa
+import re
+
+from .image_base import ImageBaseDataset
+from .utils import build_judge, DEBUG_MESSAGE
+from ..smp import *
+from ..smp.file import get_intermediate_file_path
+from ..utils import track_progress_rich
+from ..dataset.utils.mmif.function_and_compare import *
+
+logger = get_logger("MMIFEval")
+
+aux_data_dict = {}
+judge_model = None
+
+# img_dict = {}
+# <<< prompt >>>
+
+
+def generate_eval_pt_c_level(constraints, prediction):
+ constraints_str = "\n".join(
+ [f"Constraint_{i + 1}: {constraint['value']}" for i, constraint in enumerate(constraints)]
+ )
+ pt = f"""\
+Your task is to evaluate whether the response from an AI assistant adheres to all of the given constraints. \
+Please follow the requirements below to make the judgment:
+1. Be strict and consistent in your assessment.
+2. You should refer to the content of image to make the judgment.
+3. For each constraint, if the response fails to fully meet the constraint, \
+give it a score of 0. Otherwise, give it a score of 1.
+
+
+{prediction}
+
+
+
+{constraints_str}
+
+
+You must evaluate and provide an explanation for each constraint listed, ensuring no constraint is omitted. \
+At the end, summarize the scores for all constraints in one sentence.
+
+Your output should strictly follow the format below:
+Judgement: ...
+Summary: Score of constraint_1: x/1, Score of constraint_2: x/1, Score of constraint_3: x/1, ..., Score of \
+constraint_n: x/1.
+"""
+ return pt
+
+
+def generate_eval_pt_p_level(question, prediction, ground_truth):
+ pt = f"""\
+You are an expert evaluator. Your task is to extract the answer from the model output and \
+compare it with the ground truth list \
+to determine whether the model answer covers all the points in the ground truth list. \
+The ground truth list is provided as a JSON array of strings, and the model answer is a text string. \
+An answer is considered correct if every element from the ground truth list appears in the model \
+answer (substring matching is acceptable). \
+The order does not matter. \
+
+Your response should only be 'right' if the model answer fully covers the ground truth, or 'wrong' if it does not. \
+Do not provide any additional commentary.
+
+Question: {question}
+Response from the model: {prediction}
+Ground Truth List: {ground_truth}
+"""
+ return pt
+
+
+def generate_cmp_pt(constraint, pred_with_constraint, pred_without_constraint):
+ pt = f"""\
+You are an expert in judging whether the respone follow the given constraint. \
+Your task is to assess whether the model's response satisfies \
+the given constraint and return True or False. I will provide you \
+with the constraint and the model's response under this constraint. \
+To assist with your evaluation, I will also provide you with the model's response \
+to the same question without the constraint.
+
+
+{constraint}
+
+
+
+{pred_with_constraint}
+
+
+
+{pred_without_constraint}
+
+
+**Please follow the steps below to evaluate**:
+Step 1. Compare the model's response under the constraint with its response without the constraint. \
+If you believe these two answers \
+are very similar, it means the model has not fully considered the impact of the constraint on the answer. \
+Please return False.
+Step 2. Compare the model's response under the constraint with the content of the constraint. If you believe the model's response \
+does not meet the requirements specified in the constraint, return False. Otherwise, \
+if the response effectively satisfies the constraint, return True.
+
+Start by briefly explaining your reasoning based on the above steps. At the end, provide a one-sentence \
+summary of your evaluation.
+
+Your output must strictly follow this format:
+Reasoning: ...
+Summary: "True" / "False".
+"""
+ return pt
+
+
+# <<< re >>>
+# extract score from gpt_resp
+# format: Score of instruction: x/1, Score of constraint_1: y/1, Score of constraint_2: z/1, ..., Score of constraint_n: w/1.
+# return: score_dict {'instruction': x/1, 'constraint_1': y/1,
+# 'constraint_2': z/1, ..., 'constraint_n': w/1}
+
+
+def extract_score_from_direct_gpt_resp(raw_score):
+ # Define regular expression patterns (updated to handle underscores in
+ # constraint names)
+ score_pattern = re.compile(r"Score\s+of\s+([a-zA-Z0-9_\-]+):\s*(\d+)\s*/\s*(\d+)", re.IGNORECASE)
+
+ # Clean the raw score to remove unnecessary symbols (e.g., newlines,
+ # multiple spaces)
+ # Normalize whitespace
+ cleaned_score = re.sub(r"\s+", " ", raw_score).strip()
+ # delete all the '*'
+ cleaned_score = re.sub(r"\*", "", cleaned_score)
+
+ # Find all individual component scores
+ score_matches = score_pattern.findall(cleaned_score)
+
+ # If no valid score matches found, print and raise an exception
+ if not score_matches:
+ raise ValueError("raw_score format is incorrect, cannot parse scores")
+
+ score_dict = {}
+
+ # Parse each component score
+ for match in score_matches:
+ component_name = match[0].strip().lower() # Component name, converted to lowercase
+ component_name = component_name.replace(" ", "_")
+ numerator = int(match[1]) # Numerator
+ denominator = int(match[2]) # Denominator
+ score = numerator / denominator # Calculate the score
+ score_dict[component_name] = score # Store it in the dictionary
+
+ return score_dict
+
+
+# extract score from gpt_resp
+# format: right or wrong
+# return: score
+
+
+def extract_score_from_p_level_gpt_resp(raw_score):
+ if raw_score == "right":
+ return 1
+ elif raw_score == "wrong":
+ return 0
+ else:
+ # try to find "right" or "wrong" in the raw_score
+ if re.search(r"right", raw_score, re.IGNORECASE):
+ return 1
+ elif re.search(r"wrong", raw_score, re.IGNORECASE):
+ return 0
+ else:
+ raise ValueError("raw_score format is incorrect, cannot parse scores")
+
+
+# extract score from gpt_resp
+# format: True or False
+# return: score
+
+
+def extract_score_from_cmp_gpt_resp(response_text):
+ # Step 1: Find the last occurrence of 'summary:'
+ summary_idx = response_text.lower().rfind("summary")
+ if summary_idx == -1:
+ raise ValueError("No 'summary' found in response.")
+
+ # Step 2: Slice the string after 'summary:' and extract value
+ after_summary = response_text[summary_idx + len("summary") :]
+
+ # Match true/false ignoring markdown and formatting
+ match = re.search(r"\b(true|false)\b", after_summary, re.IGNORECASE)
+ if match:
+ value = match.group(1).lower()
+ return 1 if value == "true" else 0
+
+ raise ValueError("No valid 'True' or 'False' found after 'summary'.")
+
+
+# <<< gpt >>>
+
+
+def run_once_with_image(pt, image, retry=4):
+ global judge_model
+ prefix = "data:image/jpeg;base64,"
+ img = prefix + image
+ messages = [dict(type="text", value=pt), dict(type="image", value=img)]
+ while retry:
+ try:
+ ans = judge_model.generate(messages)
+ return ans
+ except Exception as e:
+ logger.info(f"Error in run_once_with_image: {e}")
+ retry -= 1
+ return ans
+
+
+def run_once_without_image(pt, retry=3):
+ global judge_model
+ messages = [
+ dict(type="text", value=pt),
+ ]
+ while retry:
+ try:
+ ans = judge_model.generate(messages)
+ return ans
+ except Exception as e:
+ logger.info(f"Error in run_once_without_image: {e}")
+ retry -= 1
+ return ans
+
+
+# <<< score >>>
+
+
+def judge_one_item(item, retry=3):
+ global aux_data_dict
+ item = json.loads(item)
+ num_retry = 0
+ while num_retry < retry:
+ if item.get("tag", None) == "P-Level":
+ # in tsv file, answer is a string, need to be converted to list
+ pt = generate_eval_pt_p_level(item["question"], item["prediction"], json.loads(item["answer"]))
+ gpt_resp = run_once_without_image(pt)
+ try:
+ score = extract_score_from_p_level_gpt_resp(gpt_resp)
+ return (
+ 0,
+ "success",
+ {
+ "total_score": score,
+ "gpt_resp": gpt_resp,
+ },
+ )
+ except Exception as e:
+ logger.error(f"\nError:\n{e}\nItem:\n{item}\ngpt_resp:\n{gpt_resp}\n")
+ num_retry += 1
+ continue
+ else: # process C-Level data
+ # split into direct_gpt and other
+ # direct_gpt can be processed in batch
+ # other needs to be processed one by one
+ constraint_direct_gpt = []
+ constraint_other = []
+ for constraint in json.loads(item["constraints"]):
+ method = constraint["judge"]["method"]
+ if method == "direct_gpt":
+ constraint_direct_gpt.append(constraint)
+ else:
+ constraint_other.append(constraint)
+ score_dict = {}
+ # 1. process direct_gpt: if there is no direct_gpt, instruction is also
+ # needed
+ if len(constraint_direct_gpt) > 0:
+ pt_direct_gpt = generate_eval_pt_c_level(constraint_direct_gpt, item["prediction"])
+ gpt_resp = run_once_with_image(pt_direct_gpt, item["image"])
+ try:
+ direct_gpt_score_dict = extract_score_from_direct_gpt_resp(gpt_resp)
+ score_dict["gpt_resp_direct_gpt"] = gpt_resp
+ for i, constraint in enumerate(constraint_direct_gpt):
+ score_dict[constraint["key"]] = direct_gpt_score_dict[f"constraint_{i + 1}"]
+ except Exception as e:
+ logger.error(
+ f"\nError:\n{e}\nItem:\n{item}\npt_direct_gpt:\n{pt_direct_gpt}\ngpt_resp:\n{gpt_resp}"
+ )
+ num_retry += 1
+ continue
+ # 2. process rule_based
+ for constraint in constraint_other:
+ if constraint["judge"]["method"] == "rule_based":
+ # call function according to constraint["judge"]["verify_funcs"]
+ # maybe a list of function names (str)
+ # func in function_and_compare.py
+ # example: {"method": "rule_based", "verify_funcs": [{"func":
+ # "check_whether_response_paragraph_number_in_range", "params":
+ # [3, 3]}]}}
+ score = 1.0
+ # breakpoint()
+ for func_dict in constraint["judge"]["verify_funcs"]:
+ func = globals()[func_dict["func"]]
+ # use * to unpack the list, ** is used for dict
+ judge_result = func(item["prediction"], *func_dict["params"])
+ # breakpoint()
+ if not judge_result: # False -> score = 0
+ score = 0.0
+ break
+ # breakpoint()
+ score_dict[constraint["key"]] = score
+ # 3. process cmp_gpt
+ for constraint in constraint_other:
+ if constraint["judge"]["method"] == "cmp_gpt":
+ del_cons_prediction = aux_data_dict[item["id"]][constraint["key"]]
+ pt = generate_cmp_pt(constraint["value"], item["prediction"], del_cons_prediction)
+ gpt_resp = run_once_without_image(pt)
+ try:
+ score = extract_score_from_cmp_gpt_resp(gpt_resp)
+ score_dict[constraint["key"]] = score
+ score_dict[f"gpt_resp_cmp_gpt_{constraint['key']}"] = gpt_resp
+ except Exception as e:
+ logger.error(f"\nError:\n{e}\nItem:\n{item}\ngpt_resp:\n{gpt_resp}")
+ num_retry += 1
+ continue
+ # add total_score
+ total_score = 0.0
+ cnt = 0
+ for key, value in score_dict.items():
+ if key.startswith("gpt_resp_"):
+ continue
+ total_score += value
+ cnt += 1
+ score_dict["total_score"] = total_score / cnt
+ logger.info(f"score_dict:\n{score_dict}")
+ return 0, "success", score_dict
+ return 1, "C-Level, fail in judge", {}
+
+
+class MMIFEval(ImageBaseDataset):
+ TYPE = "VQA"
+
+ # TODO: add dataset url and md5
+ DATASET_URL = {"MM-IFEval": 'https://opencompass.openxlab.space/utils/VLMEval/MM-IFEval.tsv'}
+ DATASET_MD5 = {
+ "MM-IFEval": '973bb839961a449565073a5ee70ae7a6'
+ }
+
+ # Given one data record, return the built prompt (a multi-modal message), can override
+ # Actually, all lines have single image
+ def build_prompt(self, line):
+ if isinstance(line, int):
+ line = self.data.iloc[line]
+
+ if self.meta_only:
+ tgt_path = toliststr(line["image_path"])
+ else:
+ tgt_path = self.dump_image(line)
+
+ question = line["question"]
+
+ # save images for evaluation
+ # global img_dict
+ # img_dict[line["index"]] = line["image"]
+
+ msgs = []
+ if isinstance(tgt_path, list):
+ msgs.extend([dict(type="image", value=p) for p in tgt_path])
+ else:
+ msgs = [dict(type="image", value=tgt_path)]
+
+ # WildVision adopts text first
+ msgs = [dict(type="text", value=question)] + msgs
+
+ return msgs
+
+ # score for the infer file
+ # @classmethod
+
+ def evaluate(self, eval_file, **judge_kwargs):
+ raw_bench_data = MMIFEval("MM-IFEval").data
+ global aux_data_dict
+ model = judge_kwargs["model"]
+ storage = get_intermediate_file_path(eval_file, f"_{model}", "jsonl")
+ score_file = get_intermediate_file_path(eval_file, f"_{model}_score", "csv")
+ tmp_file = get_intermediate_file_path(eval_file, f"_{model}_tmp", "pkl")
+ nproc = judge_kwargs.pop("nproc", 4)
+
+ data_all = load(eval_file).to_dict(orient="records")
+
+ main_data = []
+ aux_data = []
+ for i, line in enumerate(data_all):
+ if line.get("infer_type", None) == "main":
+ main_data.append(line)
+ else:
+ aux_data.append(line)
+
+ line["image"] = raw_bench_data.iloc[i]["image"]
+
+ aux_data_dict = {}
+ for line in aux_data:
+ assert line["infer_type"] == "aux_cmp_gpt"
+ del_cons = line["del_cons"]
+ if line["id"] not in aux_data_dict:
+ aux_data_dict[line["id"]] = {}
+ aux_data_dict[line["id"]][del_cons] = line["prediction"]
+
+ # params
+ params_all = [json.dumps(item) for item in main_data]
+ indices_all = [line["id"] for line in main_data]
+
+ ans = {}
+ if os.path.exists(tmp_file):
+ ans_tuples = load(tmp_file)
+ for k, v in ans_tuples.items():
+ if v[0] == 0:
+ ans[k] = {"eval_ret_code": v[0], "eval_msg": v[1], "eval_score_dict": v[2]}
+ # ans is a dict
+ logger.info(f"Tmp file exists, loaded {len(ans)} data from {tmp_file}")
+
+ tups = [x for x, i in zip(params_all, indices_all) if i not in ans]
+ indices = [i for i in indices_all if i not in ans]
+
+ # judge
+ if not osp.exists(storage):
+ # judge_kwargs['system_prompt'] = SYSTEM_PROMPT
+ judge_kwargs["temperature"] = 0
+ judge_kwargs["img_detail"] = "high"
+ judge_kwargs["timeout"] = 300
+ global judge_model
+ judge_model = build_judge(max_tokens=4096, **judge_kwargs)
+
+ assert judge_model.working(), "MMIFEval evaluation requires a working OPENAI API\n" + DEBUG_MESSAGE
+
+ if len(indices):
+ new_results = track_progress_rich(
+ judge_one_item,
+ tups,
+ nproc=nproc,
+ chunksize=nproc,
+ keys=indices,
+ save=tmp_file,
+ )
+ for k, v in zip(indices, new_results):
+ ans[k] = {"eval_ret_code": v[0], "eval_msg": v[1], "eval_score_dict": v[2]}
+ else:
+ for k, v in ans.items():
+ if isinstance(v, tuple):
+ ans[k] = {"eval_ret_code": v[0], "eval_msg": v[1], "eval_score_dict": v[2]}
+ for item in main_data:
+ item.pop("image")
+
+ for item in main_data:
+ item["eval_ret_code"] = ans[item["id"]]["eval_ret_code"]
+ item["eval_msg"] = ans[item["id"]]["eval_msg"]
+ item["eval_score_dict"] = ans[item["id"]]["eval_score_dict"]
+ # storage is a jsonl file
+ with open(storage, "w") as f:
+ for item in main_data:
+ f.write(json.dumps(item) + "\n")
+
+ eval_data = load(storage)
+ # eval_data = [json.loads(line) for line in eval_data]
+ # calculate P-Level scores
+ p_level_score_sum = 0
+ c_level_score_sum = 0
+ p_level_cnt = 0
+ c_level_cnt = 0
+ for line in eval_data:
+ if line["tag"] == "P-Level":
+ p_level_score_sum += line["eval_score_dict"]["total_score"]
+ p_level_cnt += 1
+ elif line["tag"] == "C-Level":
+ c_level_score_sum += line["eval_score_dict"]["total_score"]
+ c_level_cnt += 1
+ p_level_accuracy = p_level_score_sum / p_level_cnt
+ c_level_accuracy = c_level_score_sum / c_level_cnt
+ # save to score_file
+ score_dict = {
+ "p_level_accuracy": [p_level_accuracy],
+ "c_level_accuracy": [c_level_accuracy],
+ "p_level_cnt": [p_level_cnt],
+ "c_level_cnt": [c_level_cnt],
+ "overall_accuracy": [
+ (p_level_accuracy * p_level_cnt + c_level_accuracy * c_level_cnt) / (p_level_cnt + c_level_cnt)
+ ],
+ }
+ score_df = pd.DataFrame(score_dict)
+ dump(score_df, score_file)
+
+ return score_df
diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/qbench_video.py b/VLMEvalKit-sudoku/vlmeval/dataset/qbench_video.py
new file mode 100644
index 0000000000000000000000000000000000000000..317fa019c30fb9f8756964456710634067bd3ce2
--- /dev/null
+++ b/VLMEvalKit-sudoku/vlmeval/dataset/qbench_video.py
@@ -0,0 +1,354 @@
+# flake8: noqa
+import huggingface_hub
+from huggingface_hub import snapshot_download
+from ..smp import *
+from ..smp.file import get_intermediate_file_path, get_file_extension
+from .video_concat_dataset import ConcatVideoDataset
+from .video_base import VideoBaseDataset
+from .utils import build_judge, DEBUG_MESSAGE
+from ..utils import track_progress_rich
+import torchvision.transforms as T
+from torchvision import transforms
+from torchvision.transforms.functional import InterpolationMode
+import pandas as pd
+import imageio
+import cv2
+import zipfile
+import os
+import glob
+from .utils.qbench_video import *
+
+FAIL_MSG = 'Failed to obtain answer via API.'
+
+
+class QBench_Video(ConcatVideoDataset):
+ def __init__(self, dataset='QBench_Video', nframe=0, fps=-1):
+ self.DATASET_SETS[dataset] = ['QBench_Video_MCQ','QBench_Video_VQA']
+ super().__init__(dataset=dataset, nframe=nframe, fps=fps)
+
+ @classmethod
+ def supported_datasets(cls):
+ return ['QBench_Video']
+
+ def evaluate(self, eval_file, **judge_kwargs):
+ result = super().evaluate(eval_file=eval_file, **judge_kwargs)
+ score_file = get_intermediate_file_path(eval_file, '_acc')
+ result.at['open_ended', 'acc'] /= 2
+ dump(result, score_file)
+ return result
+
+
+class QBench_Video_MCQ(VideoBaseDataset):
+
+ MD5 = '9d6760d75fa80aa9fd5e5cf1ea274ace'
+
+ FRAMES_TMPL_SYS = """
+You will receive {} distinct frames that have been uniformly sampled from a video sequence, arranged in the same temporal order as they appear in the video.
+Please analyze these frames and answer the question based on your observations.
+"""
+
+ FRAMES_TMPL_SYS_4VIDEO_LLM = """
+You will receive several distinct frames that have been uniformly sampled from a video sequence, arranged in the same temporal order as they appear in the video.
+Please analyze these frames and answer the question based on your observations.
+"""
+
+ POST_PROMPT = """
+Please answer the question in the following format: the uppercase letter of the correct answer option itself.
+Please do not add any other answers beyond this.
+"""
+
+ TYPE = 'Video-MCQ'
+
+ def __init__(self, dataset='qbenchvideo_single_MCQ', nframe=0, fps=-1):
+ dataset_tsv_name = 'qbenchvideo_single_MCQ'
+ super().__init__(dataset=dataset_tsv_name, nframe=nframe, fps=fps)
+
+ @classmethod
+ def supported_datasets(cls):
+ return ['QBench_Video_MCQ']
+
+ def prepare_dataset(self, dataset_name='qbenchvideo_single_MCQ', repo_id='zhangzicheng/Q-Bench-Video'):
+ def check_integrity(pth):
+ data_file = osp.join(pth, f'{dataset_name}.tsv')
+
+ if not os.path.exists(data_file):
+ return False
+
+ if md5(data_file) != self.MD5:
+ return False
+
+ data = load(data_file)
+ for idx, item in data.iterrows():
+ if not osp.exists(os.path.normpath(osp.join(pth, item['video_path']))):
+ return False
+ return True
+
+ cache_path = get_cache_path(repo_id)
+ if cache_path is not None and check_integrity(cache_path):
+ dataset_path = cache_path
+ else:
+ def unzip_videos(pth):
+ if not osp.exists(osp.join(pth, 'video')):
+ zip_file = osp.join(pth, 'video.zip')
+ with zipfile.ZipFile(zip_file, 'r') as zip_ref:
+ zip_ref.extractall(pth)
+ dataset_path = snapshot_download(repo_id=repo_id, repo_type='dataset')
+ unzip_videos(dataset_path)
+
+ data_file = osp.join(dataset_path, f'{dataset_name}.tsv')
+ return dict(root=dataset_path, data_file=data_file)
+
+ def save_video_frames(self, line):
+ video = line['video']
+ vid_path = os.path.normpath(os.path.join(self.data_root, line['video_path']))
+ import decord
+ vid = decord.VideoReader(vid_path)
+ video_info = {
+ 'fps': vid.get_avg_fps(),
+ 'n_frames': len(vid),
+ }
+ if self.nframe > 0 and self.fps < 0:
+ step_size = len(vid) / (self.nframe + 1)
+ indices = [int(i * step_size) for i in range(1, self.nframe + 1)]
+ frame_paths = self.frame_paths(video)
+ elif self.fps > 0:
+ # not constrained by num_frames, get frames by fps
+ total_duration = video_info['n_frames'] / video_info['fps']
+ required_frames = int(total_duration * self.fps)
+ step_size = video_info['fps'] / self.fps
+ indices = [int(i * step_size) for i in range(required_frames)]
+ frame_paths = self.frame_paths_fps(video, len(indices))
+
+ flag = np.all([osp.exists(p) for p in frame_paths])
+
+ if not flag:
+ lock_path = osp.splitext(vid_path)[0] + '.lock'
+ with portalocker.Lock(lock_path, 'w', timeout=30):
+ if not np.all([osp.exists(p) for p in frame_paths]):
+ images = [vid[i].asnumpy() for i in indices]
+ images = [Image.fromarray(arr) for arr in images]
+ for im, pth in zip(images, frame_paths):
+ if not osp.exists(pth):
+ im.save(pth)
+
+ return frame_paths
+
+ def save_video_into_images(self, line):
+ frame_paths = self.save_video_frames(line)
+ return frame_paths
+
+ def build_prompt(self, line, video_llm):
+ if isinstance(line, int):
+ assert line < len(self)
+ line = self.data.iloc[line]
+
+ # message = [dict(type='text', value=line['question'])]
+ video_path = os.path.normpath(os.path.join(self.data_root, line['video_path']))
+ if video_llm:
+ message = [dict(type='text', value=self.FRAMES_TMPL_SYS_4VIDEO_LLM)]
+ message.append(dict(type='text', value=line['question']))
+ message.append(dict(type='video', value=video_path))
+ else:
+ img_frame_paths = self.save_video_into_images(line)
+ message = [dict(type='text', value=self.FRAMES_TMPL_SYS.format(len(img_frame_paths)))]
+ message.append(dict(type='text', value=line['question']))
+ for im in img_frame_paths:
+ message.append(dict(type='image', value=im))
+ message.append(dict(type='text', value=self.POST_PROMPT))
+ return message
+
+ @classmethod
+ def evaluate(self, eval_file, **judge_kwargs):
+ assert get_file_extension(eval_file) in ['xlsx', 'json', 'tsv'], 'data file should be an supported format (xlsx/json/tsv) file'
+
+ tmp_file = get_intermediate_file_path(eval_file, '_tmp', 'pkl')
+ score_file = get_intermediate_file_path(eval_file, '_score')
+
+ if not osp.exists(score_file):
+ model = judge_kwargs.setdefault('model', 'exact_matching')
+ assert model in ['exact_matching']
+
+ res = {} if not osp.exists(tmp_file) else load(tmp_file)
+ res = {k: v for k, v in res.items() if FAIL_MSG not in v}
+
+ data = load(eval_file)
+ data_un = data[~pd.isna(data['prediction'])]
+
+ for idx in data['index']:
+ ans = data.loc[data['index'] == idx, 'answer'].values[0]
+ pred = data.loc[data['index'] == idx, 'prediction'].values[0]
+ correct_choice = ans.split('.')[0].strip()
+ correct_answer = ans.split('.')[1].strip()
+
+ if FAIL_MSG in pred:
+ data.loc[idx, 'score'] = -1
+ else:
+ data.loc[idx, 'score'] = int(check_ans_mcq(
+ pred, ans, correct_choice, correct_answer
+ ))
+
+ rejected = [x for x in data['score'] if x == -1]
+
+ print(
+ f'Among {len(data)} questions, failed to obtain prediction for {len(data) - len(data_un)} questions, '
+ f'failed to obtain the score for another {len(rejected)} questions. '
+ f'Those questions will be counted as -1 score in ALL rating, and will not be counted in VALID rating.'
+ )
+
+ dump(data, score_file)
+
+ rating = get_dimension_rating(score_file)
+ return rating
+
+
+class QBench_Video_VQA(VideoBaseDataset):
+
+ MD5 = '49e6181b341c934d0b33ec78bdcc0a3d'
+
+ FRAMES_TMPL_SYS = """
+You will receive {} distinct frames that have been uniformly sampled from a video sequence, arranged in the same temporal order as they appear in the video.
+Please analyze these frames and provide a detailed and accurate answer from the perspective of visual quality based on your observations.
+"""
+
+ FRAMES_TMPL_SYS_4VIDEO_LLM = """
+You will receive several distinct frames that have been uniformly sampled from a video sequence, arranged in the same temporal order as they appear in the video.
+Please analyze these frames and provide a detailed and accurate answer from the perspective of visual quality based on your observations.
+"""
+
+ TYPE = 'Video-VQA'
+
+ def __init__(self, dataset='qbenchvideo_single_VQA', nframe=0, fps=-1):
+ dataset_tsv_name = 'qbenchvideo_single_VQA'
+ super().__init__(dataset=dataset_tsv_name, nframe=nframe, fps=fps)
+
+ @classmethod
+ def supported_datasets(cls):
+ return ['QBench_Video_VQA']
+
+ def prepare_dataset(self, dataset_name='qbenchvideo_single_VQA', repo_id='zhangzicheng/Q-Bench-Video'):
+ def check_integrity(pth):
+ data_file = osp.join(pth, f'{dataset_name}.tsv')
+
+ if not os.path.exists(data_file):
+ return False
+
+ if md5(data_file) != self.MD5:
+ return False
+
+ data = load(data_file)
+ for idx, item in data.iterrows():
+ if not osp.exists(os.path.normpath(osp.join(pth, item['video_path']))):
+ return False
+ return True
+
+ cache_path = get_cache_path(repo_id)
+ if cache_path is not None and check_integrity(cache_path):
+ dataset_path = cache_path
+ else:
+ def unzip_videos(pth):
+ if not osp.exists(osp.join(pth, 'video')):
+ zip_file = osp.join(pth, 'video.zip')
+ with zipfile.ZipFile(zip_file, 'r') as zip_ref:
+ zip_ref.extractall(pth)
+ dataset_path = snapshot_download(repo_id=repo_id, repo_type='dataset')
+ unzip_videos(dataset_path)
+
+ data_file = osp.join(dataset_path, f'{dataset_name}.tsv')
+ return dict(root=dataset_path, data_file=data_file)
+
+ def save_video_frames(self, line):
+ video = line['video']
+ vid_path = os.path.normpath(os.path.join(self.data_root, line['video_path']))
+ import decord
+ vid = decord.VideoReader(vid_path)
+ video_info = {
+ 'fps': vid.get_avg_fps(),
+ 'n_frames': len(vid),
+ }
+ if self.nframe > 0 and self.fps < 0:
+ step_size = len(vid) / (self.nframe + 1)
+ indices = [int(i * step_size) for i in range(1, self.nframe + 1)]
+ frame_paths = self.frame_paths(video)
+ elif self.fps > 0:
+ # not constrained by num_frames, get frames by fps
+ total_duration = video_info['n_frames'] / video_info['fps']
+ required_frames = int(total_duration * self.fps)
+ step_size = video_info['fps'] / self.fps
+ indices = [int(i * step_size) for i in range(required_frames)]
+ frame_paths = self.frame_paths_fps(video, len(indices))
+
+ flag = np.all([osp.exists(p) for p in frame_paths])
+
+ if not flag:
+ lock_path = osp.splitext(vid_path)[0] + '.lock'
+ with portalocker.Lock(lock_path, 'w', timeout=30):
+ if not np.all([osp.exists(p) for p in frame_paths]):
+ images = [vid[i].asnumpy() for i in indices]
+ images = [Image.fromarray(arr) for arr in images]
+ for im, pth in zip(images, frame_paths):
+ if not osp.exists(pth):
+ im.save(pth)
+
+ return frame_paths
+
+ def save_video_into_images(self, line):
+ frame_paths = self.save_video_frames(line)
+ return frame_paths
+
+ def build_prompt(self, line, video_llm):
+ if isinstance(line, int):
+ assert line < len(self)
+ line = self.data.iloc[line]
+
+ video_path = os.path.normpath(os.path.join(self.data_root, line['video_path']))
+ if video_llm:
+ message = [dict(type='text', value=self.FRAMES_TMPL_SYS_4VIDEO_LLM)]
+ message.append(dict(type='text', value=line['question']))
+ message.append(dict(type='video', value=video_path))
+ else:
+ img_frame_paths = self.save_video_into_images(line)
+ message = [dict(type='text', value=self.FRAMES_TMPL_SYS.format(len(img_frame_paths)))]
+ message.append(dict(type='text', value=line['question']))
+ for im in img_frame_paths:
+ message.append(dict(type='image', value=im))
+ return message
+
+ @classmethod
+ def evaluate(self, eval_file, **judge_kwargs):
+ model = judge_kwargs.setdefault('model', 'gpt-4o-0806')
+ assert model in ['gpt-4o-0806', 'gpt-4o']
+
+ score_file = get_intermediate_file_path(eval_file, f'_{model}_score')
+ tmp_file = get_intermediate_file_path(eval_file, f'_{model}', 'pkl')
+ nproc = judge_kwargs.pop('nproc', 4)
+
+ if not osp.exists(score_file):
+ data = load(eval_file)
+ model = build_judge(system_prompt=VQA_JUDGE_SYS_PROMPT, **judge_kwargs)
+ lt = len(data)
+ lines = [data.iloc[i] for i in range(lt)]
+ tups = [(model, line) for line in lines]
+ indices = [line['index'] for line in lines]
+
+ ans = {}
+ if osp.exists(tmp_file):
+ ans = load(tmp_file)
+ tups = [x for x, i in zip(tups, indices) if i not in ans]
+ indices = [i for i in indices if i not in ans]
+
+ if len(indices):
+ _ = track_progress_rich(
+ check_ans_vqa,
+ tups,
+ nproc=nproc,
+ chunksize=nproc,
+ keys=indices,
+ save=tmp_file,
+ )
+ ans = load(tmp_file)
+ for idx in ans:
+ data.loc[data['index'] == idx, 'score'] = int(ans[idx].replace('Score:', '').strip())
+ dump(data, score_file)
+
+ rating = get_dimension_rating(score_file)
+ return rating
diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/text_base.py b/VLMEvalKit-sudoku/vlmeval/dataset/text_base.py
new file mode 100644
index 0000000000000000000000000000000000000000..67bb9378915f4ca08ddec25ecfa02f27dca06d86
--- /dev/null
+++ b/VLMEvalKit-sudoku/vlmeval/dataset/text_base.py
@@ -0,0 +1,88 @@
+from abc import abstractmethod
+from ..smp import *
+
+
+class TextBaseDataset:
+ MODALITY = 'TEXT'
+ DATASET_URL = {}
+ DATASET_MD5 = {}
+
+ def __init__(self, dataset='MMBench', **kwargs):
+ self.dataset_name = dataset
+
+ data = self.load_data(dataset)
+
+ data['index'] = [str(x) for x in data['index']]
+
+ if np.all([istype(x, int) for x in data['index']]):
+ data['index'] = [int(x) for x in data['index']]
+
+ self.data = data
+ self.post_build(dataset)
+
+ def __len__(self):
+ return len(self.data)
+
+ def __getitem__(self, idx):
+ return dict(self.data.iloc[idx])
+
+ def prepare_tsv(self, url, file_md5=None):
+ data_root = LMUDataRoot()
+ os.makedirs(data_root, exist_ok=True)
+ update_flag = False
+ file_name = url.split('/')[-1]
+ data_path = osp.join(data_root, file_name)
+ if osp.exists(data_path) and (file_md5 is None or md5(data_path) == file_md5):
+ pass
+ else:
+ warnings.warn('The dataset tsv is not downloaded')
+ download_file(url, data_path)
+ update_flag = True
+
+ if file_size(data_path, 'GB') > 1:
+ local_path = data_path.replace('.tsv', '_local.tsv')
+ if not osp.exists(local_path) or os.environ.get('FORCE_LOCAL', None) or update_flag:
+ from ..tools import LOCALIZE
+ LOCALIZE(data_path, local_path)
+ data_path = local_path
+ return load(data_path)
+
+ def dump_image(self, line):
+ return []
+
+ def display(self, line):
+ if isinstance(line, int):
+ line = self.data.iloc[line]
+ assert isinstance(line, pd.Series) or isinstance(line, dict)
+ mmqa_display(line)
+
+ # Return a list of dataset names that are supported by this class, can override
+ @classmethod
+ def supported_datasets(cls):
+ return list(cls.DATASET_URL)
+
+ # Given the dataset name, return the dataset as a pandas dataframe, can override
+ def load_data(self, dataset):
+ url = self.DATASET_URL[dataset]
+ file_md5 = self.DATASET_MD5[dataset]
+ return self.prepare_tsv(url, file_md5)
+
+ # Post built hook, will be called after the dataset is built, can override
+ def post_build(self, dataset):
+ pass
+
+ # Given one data record, return the built prompt (a multi-modal message), can override
+ def build_prompt(self, line):
+ if isinstance(line, int):
+ line = self.data.iloc[line]
+
+ question = line['question']
+
+ msgs = []
+ msgs.append(dict(type='text', value=question))
+ return msgs
+
+ # Given the prediction file, return the evaluation results in the format of a dictionary or pandas dataframe
+ @abstractmethod
+ def evaluate(self, eval_file, **judge_kwargs):
+ pass
diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/utils/__pycache__/mlvu.cpython-310.pyc b/VLMEvalKit-sudoku/vlmeval/dataset/utils/__pycache__/mlvu.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a1dfce7f5e73c5df47710f5f8cddc0726eb3c0b5
Binary files /dev/null and b/VLMEvalKit-sudoku/vlmeval/dataset/utils/__pycache__/mlvu.cpython-310.pyc differ
diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/utils/__pycache__/multiple_choice.cpython-310.pyc b/VLMEvalKit-sudoku/vlmeval/dataset/utils/__pycache__/multiple_choice.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..846e8ccb7f9ac9dab4e8e1e0e336b394c5ee551a
Binary files /dev/null and b/VLMEvalKit-sudoku/vlmeval/dataset/utils/__pycache__/multiple_choice.cpython-310.pyc differ
diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/utils/__pycache__/omni_verifier.cpython-310.pyc b/VLMEvalKit-sudoku/vlmeval/dataset/utils/__pycache__/omni_verifier.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..bb6990678c1f9e33601bad61b7f7ca0e340d3e95
Binary files /dev/null and b/VLMEvalKit-sudoku/vlmeval/dataset/utils/__pycache__/omni_verifier.cpython-310.pyc differ
diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/utils/__pycache__/shortqa.cpython-310.pyc b/VLMEvalKit-sudoku/vlmeval/dataset/utils/__pycache__/shortqa.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c0c3d4c6d62f8f0ccd3a61d56fdfe45104e9c239
Binary files /dev/null and b/VLMEvalKit-sudoku/vlmeval/dataset/utils/__pycache__/shortqa.cpython-310.pyc differ
diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/utils/__pycache__/spatial457.cpython-310.pyc b/VLMEvalKit-sudoku/vlmeval/dataset/utils/__pycache__/spatial457.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..8f6c934f28bcfb85814cefe970b888f246e0d1c6
Binary files /dev/null and b/VLMEvalKit-sudoku/vlmeval/dataset/utils/__pycache__/spatial457.cpython-310.pyc differ
diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/utils/__pycache__/tamperbench.cpython-310.pyc b/VLMEvalKit-sudoku/vlmeval/dataset/utils/__pycache__/tamperbench.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..857f323e7e94dedfc0f597e558a388d04a872c6c
Binary files /dev/null and b/VLMEvalKit-sudoku/vlmeval/dataset/utils/__pycache__/tamperbench.cpython-310.pyc differ
diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/utils/__pycache__/tempcompass.cpython-310.pyc b/VLMEvalKit-sudoku/vlmeval/dataset/utils/__pycache__/tempcompass.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2a49c085e52cb798c966ed93bb1f0dbc65d51f74
Binary files /dev/null and b/VLMEvalKit-sudoku/vlmeval/dataset/utils/__pycache__/tempcompass.cpython-310.pyc differ
diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/utils/__pycache__/yorn.cpython-310.pyc b/VLMEvalKit-sudoku/vlmeval/dataset/utils/__pycache__/yorn.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d333e4734749d086ad8a0657bfb7217dbb79ebb1
Binary files /dev/null and b/VLMEvalKit-sudoku/vlmeval/dataset/utils/__pycache__/yorn.cpython-310.pyc differ
diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/README.md b/VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..d55840e98c4f156025486e647f97d81c3351b16a
--- /dev/null
+++ b/VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/README.md
@@ -0,0 +1,51 @@
+# MEGA-Bench: Scaling Multimodal Evaluation to over 500 Real-World Tasks [ICLR 2025]
+
+
+
+MEGA-Bench contains 505 multimodal tasks with diverse data sources, input/output formats, and skill requirements. The taxonomy tree is derived from the application dimension, which guides and calibrates the annotation process. The benchmark is equiped with a suite of 45 evaluation metrics to handle various output formats beyond multiple-choice questions.
+
+Following this doc, the evaluation result contains the final scores and multi-dimensional breakdown, which has a consistent format as [MEGA-Bench Leaderboard](https://huggingface.co/spaces/TIGER-Lab/MEGA-Bench). Below is an example from evaluating `Qwen-2-VL-7B-Instruct` on the core set.
+
+
+## Step-1: Install requirements for MEGA-Bench metrics to obtain the evaluation scores and breakdown analysis
+
+```bash
+pip install -r vlmeval/dataset/utils/megabench/requirements.txt
+```
+
+
+## Step-2: Get the model response and evaluation score files with VLMEvalKit
+
+```bash
+# Core set (440 tasks, in 16-frame setting)
+python3 run.py \
+ --data MEGABench_core_16frame \
+ --model Qwen2-VL-7B-Instruct \
+ --work-dir your/work/dir \
+
+# Open-ended set (65 tasks, in 16-frame setting)
+python3 run.py \
+ --data MEGABench_open_16frame \
+ --model Qwen2-VL-7B-Instruct \
+ --work-dir your/work/dir \
+```
+Note: please set up the `OPENAI_API_KEY` in the .env file to evaluate the open set.
+
+Then you can have 2 score files in the directory like:
+
+```bash
+your/work/dir/Qwen-2-VL-7B-Instruct/T20250706_Gbf63ab2c/megabench_score_core.json
+your/work/dir/Qwen-2-VL-7B-Instruct/T20250707_Gbf63ab2c/megabench_score_open.json
+```
+
+## Step-3(Optional): Run MEGA-Bench scripts to obtain the breakdown analysis
+
+Move the 2 score files into the same directory, then run the script:
+
+```bash
+# Run the metrics for the open-ended set
+cd vlmeval/dataset/utils/megabench/tools
+python3 derive_breakdown_results.py --input_dir your/dir/to/megabench_scores
+```
+
+The results in `your/dir/to/megabench_scores/analysis` are what used by [MEGA-Bench leaderboard](https://huggingface.co/spaces/TIGER-Lab/MEGA-Bench). The leaderboard can be updated by putting the files in the results directory of the leadboard's [HuggingFace space](https://huggingface.co/spaces/TIGER-Lab/MEGA-Bench/tree/main/static/eval_results/Default).
diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/__init__.py b/VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..5b136359db1d036614f0b5c63e26c1b4a4f63b6e
--- /dev/null
+++ b/VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/__init__.py
@@ -0,0 +1,5 @@
+from .aggregation_type import AggregationType
+from .metric_type import MetricType
+from .response_parse_type import ResponseParseType
+
+__all__ = [AggregationType, MetricType, ResponseParseType]
diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/__pycache__/evaluator.cpython-310.pyc b/VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/__pycache__/evaluator.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d9bdaa32166624893ac1c95287ae9c2dcaa0a737
Binary files /dev/null and b/VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/__pycache__/evaluator.cpython-310.pyc differ
diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/__pycache__/response_parse_type.cpython-310.pyc b/VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/__pycache__/response_parse_type.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..071096cbc032edb970c2e949582b2278f6d213cb
Binary files /dev/null and b/VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/__pycache__/response_parse_type.cpython-310.pyc differ
diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/aggregation/min_agg.py b/VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/aggregation/min_agg.py
new file mode 100644
index 0000000000000000000000000000000000000000..1558991ffda6dfdd0d12663e8ef31950bc521ce7
--- /dev/null
+++ b/VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/aggregation/min_agg.py
@@ -0,0 +1,14 @@
+from numbers import Number
+from typing import Dict
+
+
+class MinAggregation:
+ """Take the minimum of all valid scores."""
+
+ @staticmethod
+ def aggregate(scores: Dict[str, Number], weights: Dict[str, Number]) -> Number:
+ """Exact match between targets and responses."""
+ filtered_scores = [s for s in scores.values() if s >= 0]
+ if not filtered_scores:
+ return -1
+ return min(filtered_scores)
diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/aggregation_type.py b/VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/aggregation_type.py
new file mode 100644
index 0000000000000000000000000000000000000000..b921c3db57a2aea6fb3b43f75d27db9ab01fffef
--- /dev/null
+++ b/VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/aggregation_type.py
@@ -0,0 +1,25 @@
+from enum import Enum
+
+class AggregationType(Enum):
+ MEAN = 0
+
+ @classmethod
+ def from_string(cls, s):
+ return cls.MEAN
+
+ def aggregate(self, field_scores, field_weights):
+ if not field_scores:
+ return 0.0
+
+ total_score = 0.0
+ total_weight = 0.0
+
+ for field, score in field_scores.items():
+ weight = field_weights.get(field, 1.0)
+ try:
+ total_score += score * weight
+ except:
+ total_score += score[0] * weight
+ total_weight += weight
+
+ return total_score / total_weight if total_weight > 0 else 0.0
diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/evaluator.py b/VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/evaluator.py
new file mode 100644
index 0000000000000000000000000000000000000000..e2bbcf312b77a2256849bc93cb1bfbb38e433e87
--- /dev/null
+++ b/VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/evaluator.py
@@ -0,0 +1,399 @@
+import argparse
+import json
+import os
+from typing import Any, Dict, List
+import ast
+from vlmeval import load, dump
+
+
+from . import MetricType, AggregationType, ResponseParseType
+from .parsing.common.utils import evaluate_as_string
+
+
+class MEGABenchEvaluator:
+ def __init__(
+ self,
+ subset_name: str,
+ responses_file: str,
+ output_file: str,
+ ):
+ """
+ :param hf_data_file: Path to a file containing HF dataset tasks + their metric configs
+ :param model_responses_file: Path to a JSON file with tasks + model responses
+ :param output_file: Path to store evaluated results
+ """
+ self.hf_data = self._load_hf(subset_name) # e.g. same structure used previously
+ self.data = self._load_json(responses_file) # The model's output
+ self.output_file = output_file
+ self.tmp_output_file = output_file.replace(".json", "_tmp.pkl")
+
+ # Build a dict of {task_name -> metric configuration} for quick lookup
+ self.scoring_functions = {}
+ for task_name, task_samples in self.hf_data.items():
+ self.scoring_functions[task_name] = ast.literal_eval(
+ task_samples[0]["metric_info"]
+ )
+
+ def _load_hf(self, subset_name: str) -> List[Dict[str, Any]]:
+ """
+ Load the HF dataset for the given subset name.
+ """
+ from datasets import load_dataset
+ dataset = load_dataset("TIGER-Lab/MEGA-Bench", subset_name)["test"]
+ task_dict = {}
+ for sample in dataset:
+ task_name = sample["task_name"]
+ if task_name not in task_dict:
+ task_dict[task_name] = []
+ task_dict[task_name].append(sample)
+
+ return task_dict
+
+ def _get_eval_context(self, task_name, query):
+ if "query_idx" in query:
+ query_idx = query["query_idx"]
+ eval_context = self.hf_data[task_name][query_idx]["eval_context"]
+ else:
+ global_idx = query["global_idx"]
+ global_idx_to_sample = {sample["id"]: sample for sample in self.hf_data[task_name]}
+ eval_context = global_idx_to_sample[global_idx]["eval_context"]
+
+ eval_context = ast.literal_eval(eval_context)
+ return eval_context
+
+ def _determine_eval_style(self, task):
+ metric_info = self.scoring_functions[task["task_name"]]
+ all_task_metrics = list(metric_info["field_score_function"].values())
+ eval_type = (
+ "rule"
+ if (
+ "gpt_4o_as_judge" not in all_task_metrics
+ and "ascii_art_gpt4o_judge" not in all_task_metrics
+ )
+ else "llm"
+ )
+ return eval_type
+
+ def evaluate(self):
+ """
+ The main entry point to evaluate all tasks in self.data based on the HF dataset’s metric info.
+ """
+ if os.path.exists(self.tmp_output_file):
+ exist_records = load(self.tmp_output_file)
+ else:
+ exist_records = {}
+ num_tasks = 0
+ num_queries = 0
+ total_query_score = 0.0
+ total_task_score = 0.0
+
+ # Evaluate each task
+ for task in self.data:
+ task_name = task.get("task_name", "")
+ if task_name not in exist_records:
+ exist_records[task_name] = {}
+
+ # If no scoring config is found for the given task_name, skip
+ score_config = self.scoring_functions.get(
+ task_name,
+ {
+ "field_score_function": {},
+ "aggregation": {"function": None, "field_weights": {}},
+ "response_parse_function": None,
+ },
+ )
+ if not task.get("query_response"):
+ # No queries to score
+ continue
+
+ num_tasks += 1
+ task_score_sum = 0.0
+ # Prepare the aggregator
+ aggregator = AggregationType.from_string(score_config["aggregation"]["function"])
+ field_weights = score_config["aggregation"]["field_weights"]
+
+ # Parse the metric definitions
+ field_score_functions = score_config.get("field_score_function", {})
+ global_aux_metrics = score_config.get("global_aux_metrics", {})
+ parser_type_str = score_config.get("response_parse_function", "dummy")
+ parser = ResponseParseType.from_string(parser_type_str)
+
+ # Extract the fields from the first correct_answer (assuming uniform)
+ first_correct = task["query_response"][0]["correct_answer"]
+ all_fields = list(first_correct.keys())
+ # Usually, we only treat “##something” fields as metadata, so skip them:
+ answer_fields = [f for f in all_fields if not f.startswith("##")]
+
+ # For each query in the task
+ for idx, query in enumerate(task["query_response"]):
+ num_queries += 1
+ response_text = query.get("response", "")
+ correct_answer = query["correct_answer"]
+
+ # 1) Parse the response according to the specified parser
+ response_obj = self._parse_response(
+ task_name,
+ parser,
+ response_text,
+ correct_answer,
+ answer_fields,
+ query,
+ task,
+ )
+
+ if idx in exist_records[task_name]:
+ query["scores"] = exist_records[task_name][idx]
+ else:
+ # Initialize scores for this query
+ query["scores"] = {"field": {}, "info": {}}
+
+ # 2) Evaluate each field
+ for fld, fld_metric_name in field_score_functions.items():
+ metric = self._build_metric(fld_metric_name, score_config)
+ self._evaluate_field(
+ task_name,
+ metric,
+ fld,
+ response_obj,
+ correct_answer,
+ query
+ )
+
+ # Evaluate global auxiliary metrics (if any)
+ for fld, fld_metric_name in global_aux_metrics.items():
+ metric = self._build_metric(fld_metric_name, score_config)
+ # Some tasks want the entire response object to do an additional check
+ # So, pass original `response_obj` under `fld` key:
+ tmp_obj = {fld: response_obj}
+ self._evaluate_field(
+ task_name,
+ metric,
+ fld,
+ tmp_obj,
+ correct_answer,
+ query,
+ is_aux=True,
+ )
+
+ exist_records[task_name][idx] = query["scores"]
+ if idx % 10 == 0 or idx == len(task["query_response"]) - 1:
+ dump(exist_records, self.tmp_output_file)
+
+ # 3) Aggregate the query-level score
+ query["scores"]["query"] = aggregator.aggregate(
+ query["scores"]["field"],
+ field_weights,
+ )
+
+ if query["scores"]["query"] >= 0:
+ task_score_sum += query["scores"]["query"]
+
+ # Calculate overall task score
+ if task["query_response"]:
+ mean_score = task_score_sum / len(task["query_response"])
+ else:
+ mean_score = 0.0
+ task["task_score"] = task_score_sum
+ task["mean_task_score"] = mean_score
+ task["eval_type"] = self._determine_eval_style(task)
+
+ total_query_score += task_score_sum
+ total_task_score += mean_score
+
+ print(f"[Task: {task_name}] Score = {task_score_sum} / {len(task['query_response'])}")
+
+ # Produce overall summary stats
+ summary = {}
+ if num_tasks > 0:
+ macro_mean_score = total_task_score / num_tasks
+ summary["macro_mean_score"] = macro_mean_score
+ else:
+ summary["macro_mean_score"] = 0.0
+
+ if num_queries > 0:
+ micro_mean_score = total_query_score / num_queries
+ summary["micro_mean_score"] = micro_mean_score
+ else:
+ summary["micro_mean_score"] = 0.0
+
+ summary["num_tasks"] = num_tasks
+ summary["num_queries"] = num_queries
+ # print(f"\n=== Evaluation Summary ===\n{json.dumps(summary, indent=4)}\n")
+
+ # Write back final data + summary
+ output_data = {
+ "data": self.data,
+ "summary": summary,
+ }
+ self._save_results(self.output_file, output_data)
+ print(f"Evaluation complete! Results saved to {self.output_file}")
+
+ def _evaluate_field(
+ self,
+ task_name: str,
+ metric: Any,
+ field: str,
+ response_obj: Dict[str, Any],
+ correct_answer: Dict[str, Any],
+ query: Dict[str, Any],
+ is_aux: bool = False,
+ ) -> float:
+ """Compute score for a single field using the given metric."""
+ eval_context = self._get_eval_context(task_name, query)
+
+ if metric == MetricType.UNSUPPORTED:
+ print(f"The metric for {field} in task {task_name} is not supported")
+ return 0.0
+ elif metric == MetricType.SYMBOLIC_PLANNING_TEST or metric == MetricType.PROGRAM_JUDGE:
+ query["scores"]["field"][field] = metric.match(
+ response_obj.get(field),
+ eval_context,
+ )
+ elif metric == MetricType.CONSTRAINED_GENERATION:
+ score, eval_info = metric.match(response_obj, eval_context)
+ query["scores"]["field"][field] = score
+ query["scores"]["info"][field] = eval_info
+ elif metric == MetricType.XML_NORM_POINT_IN_BBOX:
+ score, eval_info = metric.match(response_obj.get(field), eval_context)
+ query["scores"]["field"][field] = score
+ query["scores"]["info"][field] = eval_info
+ elif isinstance(metric, MetricType.VLM_AS_JUDGE.class_impl):
+ images = query.get("images", [])
+ question = query.get("question", "")
+ correct_val = correct_answer.get(field, "") if not is_aux else correct_answer
+ response_info = (
+ response_obj.get(field)
+ if isinstance(response_obj, dict)
+ else response_obj
+ )
+ query["scores"]["field"][field] = metric.match(
+ response_info,
+ correct_val,
+ images=images,
+ question=question,
+ eval_context=eval_context,
+ )
+ else:
+ correct_val = correct_answer.get(field, "") if not is_aux else correct_answer
+ correct_val = evaluate_as_string(correct_val) # remove extra formatting
+ predicted_val = response_obj.get(field, "")
+ query["scores"]["field"][field] = metric.match(predicted_val, correct_val)
+
+ def _parse_response(
+ self,
+ task_name: str,
+ parser,
+ response_text: str,
+ correct_answer: Dict[str, Any],
+ answer_fields: List[str],
+ query: Dict[str, Any],
+ task: Dict[str, Any],
+ ) -> Dict[str, Any]:
+ """
+ Parse the raw response into a structured object, depending on the parser.
+ """
+ res_parsing_pass = True
+ if parser.is_single_field_parser():
+ # single field
+ assert (
+ len(answer_fields) == 1
+ ), "The answer_string parse must be used when the answer has a single field"
+ answer_key = answer_fields[0]
+
+ global_description = task["task_description"]
+ query_question = query["question"]
+ is_single_line_ans = "\n" not in correct_answer[answer_key]
+
+ response_obj = parser.parse(
+ response_text,
+ answer_key,
+ global_description=global_description,
+ query_question=query_question,
+ is_single_line_ans=is_single_line_ans,
+ )
+ assert isinstance(response_obj[answer_key], str), "Single-field parsing results must be string"
+ else:
+ # Structural output (using JSON parser or other specified parsing func) or dummy parse (return all)
+ response_obj = parser.parse(response_text)
+
+ if parser == ResponseParseType.JSON and (
+ not isinstance(response_obj, dict) or not response_obj
+ ):
+ # Expect a JSON, but parsing failed,
+ # Record the failure parsing, and use the raw string for each field of the answer
+ res_parsing_pass = False
+ response_obj = {}
+ for field in correct_answer:
+ response_obj[field] = response_text
+
+ if not res_parsing_pass:
+ print(
+ f"Task:{task_name}, cannot parse query with global idx {query['global_idx']}"
+ )
+ return response_obj
+
+ def _build_metric(self, metric_name: str, score_config: Dict[str, Any]):
+ """
+ Given a string for the metric (e.g. 'gpt_4o_as_judge'),
+ return the actual MetricType or a specialized metric class.
+ """
+ metric = MetricType.from_string(metric_name)
+ if metric == MetricType.VLM_AS_JUDGE:
+ # Build the GPT4O metric using the provided config
+ gpt4o_configs = score_config.get("gpt4o_eval_configs", {})
+ metric = metric.class_impl(gpt4o_configs)
+ elif metric == MetricType.ASCII_ART_GPT4O_JUDGE:
+ # Build the ASCII Art metric using the provided config
+ ascii_art_configs = score_config.get("ascii_art_eval_configs", {})
+ metric = metric.class_impl(ascii_art_configs)
+ return metric
+
+ @staticmethod
+ def _load_json(file_path: str) -> Any:
+ with open(file_path, "r", encoding="utf-8") as f:
+ return json.load(f)
+
+ @staticmethod
+ def _save_results(file_path: str, data: Any) -> None:
+ """
+ Safe-write a JSON file via temp file + replace.
+ Since the results file is long, this avoid breaking the file in case of a crash.
+ """
+ temp_filename = f"{file_path}.tmp"
+ with open(temp_filename, "w", encoding="utf-8") as f:
+ json.dump(data, f, ensure_ascii=False, indent=4)
+ os.replace(temp_filename, file_path)
+
+
+def main():
+ parser = argparse.ArgumentParser(description="Simple Evaluator")
+ parser.add_argument(
+ "--subset_name",
+ type=str,
+ required=True,
+ help="The subset of MEGA-Bench to evaluate.",
+ )
+ parser.add_argument(
+ "--submission_file",
+ type=str,
+ required=True,
+ help="Path to a JSON file containing model responses.",
+ )
+ parser.add_argument(
+ "--output_file",
+ type=str,
+ required=True,
+ help="Where to store the evaluation results (JSON).",
+ )
+
+ args = parser.parse_args()
+ evaluator = MEGABenchEvaluator(
+ subset_name=args.subset_name,
+ responses_file=args.submission_file,
+ output_file=args.output_file,
+ )
+ evaluator.evaluate()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/metric_type.py b/VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/metric_type.py
new file mode 100644
index 0000000000000000000000000000000000000000..892b5cba17ae267b0654a67a622fa899501dffa4
--- /dev/null
+++ b/VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/metric_type.py
@@ -0,0 +1,259 @@
+from functools import cached_property
+from enum import Enum
+from .utils import lazy_import
+import logging
+
+
+class MetricType(Enum):
+ """The types of metrics."""
+
+ EXACT_STR_MATCH = "exact_str_match"
+ SIMPLE_STR_MATCH = "simple_str_match"
+ CODE_RESULT_EXACT_STR_MATCH = "code_result_exact_str_match"
+ DICT_EXACT_STR_MATCH_AGG_RECALL = "dict_exact_str_match_agg_recall"
+ EXACT_STR_MATCH_CASE_INSENSITIVE = "exact_str_match_case_insensitive"
+ NORM_SIM_DAMERAU_LEVENSHTEIN = "normalized_similarity_damerau_levenshtein"
+ NEAR_STR_MATCH = "near_str_match"
+ NUMBER_RELATIVE_DIFF_RATIO = "number_rel_diff_ratio"
+ SET_EQUALITY = "set_equality"
+ SET_EQUALITY_CASE_INSENSITIVE = "set_equality_case_insensitive"
+ DICT_SET_EQUALITY_AGG_JACCARD = "dict_set_equality_agg_jaccard"
+ DICT_PRECISION = "dict_precision"
+ JACCARD_INDEX = "jaccard_index"
+ JACCARD_INDEX_CASE_INSENSITIVE = "jaccard_index_case_insensitive"
+ DICT_JACCARD_AGG_JACCARD = "dict_jaccard_agg_jaccard"
+ DICT_EQUALITY = "dict_equality"
+ SET_PRECISION = "set_precision"
+ POSITIVE_INT_MATCH = "positive_int_match"
+ CHESS_MOVE_LIST_JACCARD_INDEX = "chess_move_list_jaccard_index"
+ LONGEST_COMMON_LIST_PREFIX_RATIO = "longest_common_list_prefix_ratio"
+ ASCII_ART_GPT4O_JUDGE = "ascii_art_gpt4o_judge"
+ NLI_ENTAILMENT = "nli_entailment"
+ BLEU = "bleu"
+ GLEU_CN = "gleu_cn"
+ XML_NORM_BBOX_IOU_SINGLE = "xml_nbbox_iou_single"
+ LATEX_EXPR_EQUALITY = "latex_expr_equality"
+ TEXT_WITH_LATEX_EXPR_EQUALITY = "text_with_latex_expr_equality"
+ NORM_BBOX_IOU_TUPLE = "nbbox_iou_tuple"
+ NORM_BBOX_IOU_SINGLE = "nbbox_iou_single"
+ NORM_BBOX_IOU_SEQUENCE = "nbbox_iou_sequence"
+ DICT_NORM_BBOX_IOU_TUPLE_AGG_JACCARD = "dict_nbbox_iou_tuple_agg_jaccard"
+ XML_NORM_POINT_IN_BBOX = "xml_norm_point_in_bbox"
+ XML_NORM_POINT_DISTANCE = "xml_norm_point_distance"
+ GEO_PROXIMITY_LOCATION_DICT = "geo_proximity_location_dict"
+ NORMALIZED_RMSE = "normalized_rmse"
+ PROGRAM_JUDGE = "program_judge"
+ STR_SET_EQUALITY_LINE_BREAK = "str_set_equality_line_break"
+ STR_SET_EQUALITY_COMMA = "str_set_equality_comma"
+ SEQUENCE_EQUALITY = "sequence_equality"
+ SEQUENCE_EQUALITY_CASE_INSENSITIVE = "sequence_equality_case_insensitive"
+ SEQUENCE_ACCURACY_CASE_INSENSITIVE = "sequence_accuracy_case_insensitive"
+ ANGLE_SEQ_FLOAT_RMSE = "angle_seq_float_rmse"
+ SYMBOLIC_PLANNING_TEST = "symbolic_planning_test"
+ MULTI_REF_PHRASE_EVAL = "multi_ref_phrase"
+ GENERAL_SINGLE_NUMERICAL_MATCH = "general_single_numerical_match"
+ BOXED_SINGLE_NUMERICAL_MATCH = "boxed_single_numerical_match"
+ SEQUENCE_COORDS_SIMILARITY = "sequence_coords_similarity"
+ CONSTRAINED_GENERATION = "constrained_generation"
+ VLM_AS_JUDGE = "gpt_4o_as_judge"
+ UNSUPPORTED = "unsupported"
+
+ @cached_property
+ def class_impl(self):
+ lazy_imports = {
+ MetricType.SIMPLE_STR_MATCH: lazy_import(
+ "vlmeval.dataset.utils.megabench.scoring.simple_str_match", "SimpleStrMatch"
+ ),
+ MetricType.EXACT_STR_MATCH: lazy_import(
+ "vlmeval.dataset.utils.megabench.scoring.exact_str_match", "ExactStrMatch"
+ ),
+ MetricType.CODE_RESULT_EXACT_STR_MATCH: lazy_import(
+ "vlmeval.dataset.utils.megabench.scoring.exact_str_match", "CodeResultExactStrMatch"
+ ),
+ MetricType.DICT_EXACT_STR_MATCH_AGG_RECALL: lazy_import(
+ "vlmeval.dataset.utils.megabench.scoring.dict_exact_match_agg_recall",
+ "DictExactStrMatchAggRecall",
+ ),
+ MetricType.EXACT_STR_MATCH_CASE_INSENSITIVE: lazy_import(
+ "vlmeval.dataset.utils.megabench.scoring.exact_str_match_case_insensitive",
+ "ExactStrMatchCaseInsensitive",
+ ),
+ MetricType.NORM_SIM_DAMERAU_LEVENSHTEIN: lazy_import(
+ "vlmeval.dataset.utils.megabench.scoring.normalized_similarity_damerau_levenshtein",
+ "NormalizedSimilarityDamerauLevenshtein",
+ ),
+ MetricType.NEAR_STR_MATCH: lazy_import(
+ "vlmeval.dataset.utils.megabench.scoring.near_str_match", "NearStrMatch"
+ ),
+ MetricType.NUMBER_RELATIVE_DIFF_RATIO: lazy_import(
+ "vlmeval.dataset.utils.megabench.scoring.number_rel_diff_ratio", "NumberRelDiffRatio"
+ ),
+ MetricType.SET_EQUALITY: lazy_import(
+ "vlmeval.dataset.utils.megabench.scoring.set_equality", "SetEquality"
+ ),
+ MetricType.SET_EQUALITY_CASE_INSENSITIVE: lazy_import(
+ "vlmeval.dataset.utils.megabench.scoring.set_equality", "SetEqualityCaseInsensitive"
+ ),
+ MetricType.DICT_SET_EQUALITY_AGG_JACCARD: lazy_import(
+ "vlmeval.dataset.utils.megabench.scoring.dict_set_equality_agg_jaccard",
+ "DictSetEqualityAggJaccard",
+ ),
+ MetricType.DICT_EQUALITY: lazy_import(
+ "vlmeval.dataset.utils.megabench.scoring.dict_equality",
+ "DictEquality",
+ ),
+ MetricType.DICT_PRECISION: lazy_import(
+ "vlmeval.dataset.utils.megabench.scoring.dict_equality",
+ "DictPrecision",
+ ),
+ MetricType.JACCARD_INDEX: lazy_import("vlmeval.dataset.utils.megabench.scoring.jaccard", "Jaccard"),
+ MetricType.JACCARD_INDEX_CASE_INSENSITIVE: lazy_import(
+ "vlmeval.dataset.utils.megabench.scoring.jaccard", "JaccardCaseInsensitive"
+ ),
+ MetricType.DICT_JACCARD_AGG_JACCARD: lazy_import(
+ "vlmeval.dataset.utils.megabench.scoring.dict_jaccard_agg_jaccard", "DictJaccardAggJaccard"
+ ),
+ MetricType.SET_PRECISION: lazy_import(
+ "vlmeval.dataset.utils.megabench.scoring.set_precision", "SetPrecision"
+ ),
+ MetricType.POSITIVE_INT_MATCH: lazy_import(
+ "vlmeval.dataset.utils.megabench.scoring.positive_int_match", "PositiveIntMatch"
+ ),
+ MetricType.CHESS_MOVE_LIST_JACCARD_INDEX: lazy_import(
+ "vlmeval.dataset.utils.megabench.scoring.chess_jaccard", "ChessMoveJaccard"
+ ),
+ MetricType.LONGEST_COMMON_LIST_PREFIX_RATIO: lazy_import(
+ "vlmeval.dataset.utils.megabench.scoring.longest_common_list_prefix_ratio",
+ "LongestCommonListPrefixRatio",
+ ),
+ MetricType.ASCII_ART_GPT4O_JUDGE: lazy_import(
+ "vlmeval.dataset.utils.megabench.scoring.ascii_art_gpt4o_judge",
+ "AsciiArtVLMJudgeScore",
+ ),
+ MetricType.NLI_ENTAILMENT: lazy_import(
+ "vlmeval.dataset.utils.megabench.scoring.nli_entailment", "NliEntailment"
+ ),
+ MetricType.BLEU: lazy_import(
+ "vlmeval.dataset.utils.megabench.scoring.sacrebleu_bleu",
+ "Bleu",
+ ),
+ MetricType.GLEU_CN: lazy_import(
+ "vlmeval.dataset.utils.megabench.scoring.gleu",
+ "GLEUChinese",
+ ),
+ MetricType.XML_NORM_BBOX_IOU_SINGLE: lazy_import(
+ "vlmeval.dataset.utils.megabench.scoring.xml_nbbox_iou", "XmlNbboxIouSingle"
+ ),
+ MetricType.BOXED_SINGLE_NUMERICAL_MATCH: lazy_import(
+ "vlmeval.dataset.utils.megabench.scoring.general_numerical_match", "BoxedSingleNumericalMatch"
+ ),
+ MetricType.GENERAL_SINGLE_NUMERICAL_MATCH: lazy_import(
+ "vlmeval.dataset.utils.megabench.scoring.general_numerical_match", "GeneralSingleNumericalMatch"
+ ),
+ MetricType.SEQUENCE_COORDS_SIMILARITY: lazy_import(
+ "vlmeval.dataset.utils.megabench.scoring.coordinate_sequence_match", "CoordsSequenceSimilarity"
+ ),
+ MetricType.LATEX_EXPR_EQUALITY: lazy_import(
+ "vlmeval.dataset.utils.megabench.scoring.latex_expr_equality",
+ "LatexExprEquality",
+ ),
+ MetricType.TEXT_WITH_LATEX_EXPR_EQUALITY: lazy_import(
+ "vlmeval.dataset.utils.megabench.scoring.latex_expr_equality",
+ "TextLatexExprEquality",
+ ),
+ MetricType.NORM_BBOX_IOU_TUPLE: lazy_import(
+ "vlmeval.dataset.utils.megabench.scoring.nbbox_iou", "NbboxIouTuple"
+ ),
+ MetricType.NORM_BBOX_IOU_SINGLE: lazy_import(
+ "vlmeval.dataset.utils.megabench.scoring.nbbox_iou", "NbboxIouSingle"
+ ),
+ MetricType.NORM_BBOX_IOU_SEQUENCE: lazy_import(
+ "vlmeval.dataset.utils.megabench.scoring.nbbox_iou", "NbboxIouSequence"
+ ),
+ MetricType.DICT_NORM_BBOX_IOU_TUPLE_AGG_JACCARD: lazy_import(
+ "vlmeval.dataset.utils.megabench.scoring.dict_nbbox_iou_tuple_agg_jaccard",
+ "DictNbboxIouTupleAggJaccard",
+ ),
+ MetricType.XML_NORM_POINT_IN_BBOX: lazy_import(
+ "vlmeval.dataset.utils.megabench.scoring.xml_norm_point_in_bbox",
+ "XmlNormPointInBbox",
+ ),
+ MetricType.XML_NORM_POINT_DISTANCE: lazy_import(
+ "vlmeval.dataset.utils.megabench.scoring.xml_norm_point_distance",
+ "XmlNormPointDistance",
+ ),
+ MetricType.GEO_PROXIMITY_LOCATION_DICT: lazy_import(
+ "vlmeval.dataset.utils.megabench.scoring.geo_proximity", "GeoProximityLocationDict"
+ ),
+ MetricType.NORMALIZED_RMSE: lazy_import(
+ "vlmeval.dataset.utils.megabench.scoring.mse", "NormalizedRMSE"
+ ),
+ MetricType.PROGRAM_JUDGE: lazy_import(
+ "vlmeval.dataset.utils.megabench.scoring.program_judge", "ProgramJudge"
+ ),
+ MetricType.STR_SET_EQUALITY_LINE_BREAK: lazy_import(
+ "vlmeval.dataset.utils.megabench.scoring.set_equality", "StringSetEqualityLineSplit"
+ ),
+ MetricType.STR_SET_EQUALITY_COMMA: lazy_import(
+ "vlmeval.dataset.utils.megabench.scoring.set_equality", "StringSetEqualityCommaSplit"
+ ),
+ MetricType.SEQUENCE_EQUALITY: lazy_import(
+ "vlmeval.dataset.utils.megabench.scoring.sequence_equality", "SequenceEquality"
+ ),
+ MetricType.SEQUENCE_EQUALITY_CASE_INSENSITIVE: lazy_import(
+ "vlmeval.dataset.utils.megabench.scoring.sequence_equality", "SequenceEqualityCaseInsensitive"
+ ),
+ MetricType.SEQUENCE_ACCURACY_CASE_INSENSITIVE: lazy_import(
+ "vlmeval.dataset.utils.megabench.scoring.sequence_equality", "SequenceAccuracyCaseInsensitive"
+ ),
+ MetricType.ANGLE_SEQ_FLOAT_RMSE: lazy_import(
+ "vlmeval.dataset.utils.megabench.scoring.mse", "AngleSeqFloatRMSE"
+ ),
+ MetricType.SYMBOLIC_PLANNING_TEST: lazy_import(
+ "vlmeval.dataset.utils.megabench.scoring.symbolic_planning", "SymbolicPlanningMetricTest"
+ ),
+ MetricType.MULTI_REF_PHRASE_EVAL: lazy_import(
+ "vlmeval.dataset.utils.megabench.scoring.multi_ref_phrase", "MultipleReferencePhraseEval"
+ ),
+ MetricType.CONSTRAINED_GENERATION: lazy_import(
+ "vlmeval.dataset.utils.megabench.scoring.constrained_generation", "ConstrainedGenerationEval"
+ ),
+ MetricType.VLM_AS_JUDGE: lazy_import(
+ "vlmeval.dataset.utils.megabench.scoring.vlm_as_judge", "VLMJudgeScore"
+ ),
+ }
+
+ if self not in lazy_imports:
+ logging.error(f"Metric {self} not implemented...")
+
+ importer = lazy_imports.get(
+ self,
+ lazy_import("vlmeval.dataset.utils.megabench.scoring.unsupported_scoring", "UnsupportedScoring"),
+ )
+ return importer()
+
+ def match(self, response: str, correct_answer: str, task_info=None):
+ if not task_info:
+ return self.class_impl.match(response, correct_answer)
+ else:
+ return self.class_impl.match(response, correct_answer, task_info)
+
+ @classmethod
+ def from_string(cls, s):
+ try:
+ if s is None:
+ return cls("unsupported")
+ return cls(s.lower())
+ except KeyError as exc:
+ raise ValueError(f"Invalid metric type: {s}") from exc
+
+ @classmethod
+ def get_all_values(cls):
+ return list(cls)
+
+
+# List all of the supported metrics:
+if __name__ == "__main__":
+ print("All MetricType values:")
+ for metric_type in MetricType.get_all_values():
+ print(f"{metric_type.name}: {metric_type.value}")
diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/parsing/answer_str_parse.py b/VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/parsing/answer_str_parse.py
new file mode 100644
index 0000000000000000000000000000000000000000..6ecdaf6c672b837909b52a7e2213fe457545111b
--- /dev/null
+++ b/VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/parsing/answer_str_parse.py
@@ -0,0 +1,137 @@
+import logging
+from .common.parsers import parse_json
+from .common.utils import (
+ extract_code_block_content,
+ extract_answer_content,
+ evaluate_as_string,
+ drop_additional_text,
+)
+
+logger = logging.getLogger("errorLogger")
+
+
+class AnswerStrParse:
+ """Parse the response for the single answer field."""
+
+ @classmethod
+ def _parse(
+ cls,
+ response: str,
+ *,
+ is_ascii_art: bool = False,
+ should_remove_surrounding_whitespace=True,
+ global_description: str = "",
+ query_question: str = "",
+ is_single_line_ans: bool = None,
+ ) -> dict:
+ """Try to parse a single answer."""
+ if response is None:
+ response = ""
+
+ # Extract the answer content based on "Answer: ..." format
+ answer_content = extract_answer_content(
+ response,
+ is_ascii_art=is_ascii_art,
+ should_remove_surrounding_whitespace=should_remove_surrounding_whitespace,
+ )
+
+ # Extract things from the code block if response is wrapped by a code block
+ answer_content, is_code = extract_code_block_content(
+ answer_content,
+ is_ascii_art=is_ascii_art,
+ should_remove_surrounding_whitespace=should_remove_surrounding_whitespace,
+ )
+
+ if not is_code and is_single_line_ans and not is_ascii_art:
+ answer_content = drop_additional_text(answer_content)
+
+ # Check if the content is a potential dict or list.
+ if answer_content.startswith("{") or answer_content.startswith("["):
+ # Attempt to parse the content as JSON
+ response_obj = parse_json(answer_content)
+ if response_obj == {}:
+ if "{}" not in answer_content:
+ return answer_content
+ elif response_obj == []:
+ # logger.error(
+ # f"Unexpected answer parsing error:\n{response=}\n{global_description=}\n{query_question=}\n{is_ascii_art=}"
+ # )
+ if "[]" not in answer_content:
+ return answer_content
+ return str(response_obj) # make sure the response to the metric is always a string
+ else:
+ # drop the redundant string quotes
+ answer_content = evaluate_as_string(answer_content)
+ return answer_content
+
+ @classmethod
+ def parse(
+ cls,
+ response: str,
+ answer_key: str,
+ *,
+ global_description: str = "",
+ query_question: str = "",
+ is_single_line_ans: bool = None,
+ ) -> dict:
+ """Try to parse a single answer."""
+ response_parsed = cls._parse(
+ response,
+ is_ascii_art=False,
+ global_description=global_description,
+ query_question=query_question,
+ is_single_line_ans=is_single_line_ans,
+ )
+ results = {answer_key: response_parsed}
+ return results
+
+
+class AsciiAnswerStrParse(AnswerStrParse):
+ """Parse the response for the single ASCII answer field."""
+
+ @classmethod
+ def parse(
+ cls,
+ response: str,
+ answer_key: str,
+ *,
+ global_description: str = "",
+ query_question: str = "",
+ is_single_line_ans: bool = None,
+ ) -> dict:
+ """Try to parse a single answer."""
+ response_parsed = cls._parse(
+ response,
+ is_ascii_art=True,
+ global_description=global_description,
+ query_question=query_question,
+ is_single_line_ans=is_single_line_ans,
+ )
+ results = {answer_key: response_parsed}
+ return results
+
+
+class VerbatimAnswerStrParse(AnswerStrParse):
+ """Parse the response for a single answer field that should not have preceding or trailing whitespace removed."""
+
+ @classmethod
+ def parse(
+ cls,
+ response: str,
+ answer_key: str,
+ *,
+ global_description: str = "",
+ query_question: str = "",
+ is_single_line_ans: bool = None,
+ ) -> dict:
+ """Try to parse a single answer."""
+ response_parsed = cls._parse(
+ response,
+ is_ascii_art=True,
+ should_remove_surrounding_whitespace=False,
+ global_description=global_description,
+ query_question=query_question,
+ is_single_line_ans=is_single_line_ans,
+ )
+ results = {answer_key: response_parsed}
+ return results
diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/parsing/dummy_parse.py b/VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/parsing/dummy_parse.py
new file mode 100644
index 0000000000000000000000000000000000000000..21b5a2b15148cbd5416f61a1d72d39a46eb6b35a
--- /dev/null
+++ b/VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/parsing/dummy_parse.py
@@ -0,0 +1,6 @@
+class DummyParse:
+
+ @staticmethod
+ def parse(response: str, *args, **kwargs) -> dict:
+ """return the raw string without doing anything"""
+ return response.strip()
diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/parsing/json_parse.py b/VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/parsing/json_parse.py
new file mode 100644
index 0000000000000000000000000000000000000000..1bc91313a4ce9c49e586f7b81acc92198fa68fe8
--- /dev/null
+++ b/VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/parsing/json_parse.py
@@ -0,0 +1,17 @@
+from .common.parsers import parse_json
+from .common.utils import evaluate_as_string
+
+
+class JsonParse:
+ """Load the response as a JSON object."""
+
+ @staticmethod
+ def parse(response: str):
+ """Parse the JSON object, including nested JSON strings."""
+ parsed_res = parse_json(response)
+ # Drop the potentially duplicated string quotes
+ if isinstance(parsed_res, dict):
+ for key, val in parsed_res.items():
+ parsed_res[key] = evaluate_as_string(val)
+
+ return parsed_res
diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/requirements.txt b/VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/requirements.txt
new file mode 100644
index 0000000000000000000000000000000000000000..bc7e2d9758a7c1e2b97ab147aa16189e320ac912
--- /dev/null
+++ b/VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/requirements.txt
@@ -0,0 +1,15 @@
+antlr4-python3-runtime==4.11.0
+filelock==3.16.1
+geopy==2.4.1
+jieba==0.42.1
+nltk==3.9.1
+numpy==1.26.4
+pronouncing==0.2.0
+rapidfuzz==3.9.5
+regex==2024.7.24
+requests==2.32.3
+requests_cache==1.2.1
+sacrebleu==2.4.3
+sympy==1.13.2
+tqdm==4.66.4
+Unidecode==1.3.8
diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/response_parse_type.py b/VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/response_parse_type.py
new file mode 100644
index 0000000000000000000000000000000000000000..9db9b7ff7bf260c3dad3df8a1ca55cedff3f17bb
--- /dev/null
+++ b/VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/response_parse_type.py
@@ -0,0 +1,54 @@
+from functools import cached_property
+from enum import Enum
+from .parsing.json_parse import JsonParse
+from .parsing.answer_str_parse import (
+ AnswerStrParse,
+ AsciiAnswerStrParse,
+ VerbatimAnswerStrParse,
+)
+from vlmeval.dataset.utils.megabench.parsing.dummy_parse import DummyParse
+
+
+class ResponseParseType(Enum):
+ """Parse the response."""
+
+ JSON = "json"
+ ANSWER_STR = "answer_string"
+ ASCII_ANSWER_STR = "ascii_answer_string"
+ VERBATIM_ANSWER_STR = "verbatim_answer_string"
+ DUMMY = "dummy"
+ UNSUPPORTED = "unsupported"
+
+ @cached_property
+ def class_impl(self):
+ if self == ResponseParseType.ANSWER_STR:
+ return AnswerStrParse
+ elif self == ResponseParseType.ASCII_ANSWER_STR:
+ return AsciiAnswerStrParse
+ elif self == ResponseParseType.VERBATIM_ANSWER_STR:
+ return VerbatimAnswerStrParse
+ elif self == ResponseParseType.DUMMY:
+ return DummyParse
+ else:
+ return JsonParse
+
+ def is_single_field_parser(self):
+ return self in [
+ ResponseParseType.ANSWER_STR,
+ ResponseParseType.ASCII_ANSWER_STR,
+ ResponseParseType.VERBATIM_ANSWER_STR,
+ ]
+
+ def parse(self, response: str, *args, **kwargs):
+ """Parse the response."""
+ return self.class_impl.parse(response, *args, **kwargs)
+
+ @staticmethod
+ def from_string(s):
+ """Initialize the response parsing type from a string."""
+ try:
+ if s is None:
+ return ResponseParseType("unsupported")
+ return ResponseParseType(s.lower())
+ except KeyError as exc:
+ raise ValueError(f"Invalid metric type: {s}") from exc
diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/scoring/nli_entailment.py b/VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/scoring/nli_entailment.py
new file mode 100644
index 0000000000000000000000000000000000000000..71a29042a0b71f8775f7324b988cb96d51b32c9d
--- /dev/null
+++ b/VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/scoring/nli_entailment.py
@@ -0,0 +1,20 @@
+import torch
+from transformers import pipeline
+
+
+device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
+pipe = pipeline(
+ "text-classification", model="microsoft/deberta-large-mnli", device=device
+)
+
+
+class NliEntailment:
+ """NLI entailment, where the correct answer is used as the premise."""
+
+ @staticmethod
+ def match(response, correct_answer) -> int:
+ """Return whether the response and correct answer agree with each other."""
+ if not isinstance(response, str) or isinstance(correct_answer, str):
+ return 0
+ resp = pipe(f"[CLS] {correct_answer.strip()} [SEP] {response.strip()} [SEP]")
+ return 1 if resp[0]["label"] == "ENTAILMENT" else 0
diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/scoring/sacrebleu_bleu.py b/VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/scoring/sacrebleu_bleu.py
new file mode 100644
index 0000000000000000000000000000000000000000..63a2a265ce644f7cf483a77565bcb8e1c92b4164
--- /dev/null
+++ b/VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/scoring/sacrebleu_bleu.py
@@ -0,0 +1,22 @@
+from numbers import Number
+import sacrebleu
+
+
+class Bleu:
+ """Compute BLEU score, using SacreBLEU."""
+
+ @staticmethod
+ def match(response, correct_answer) -> Number:
+ """Compute the BLEU scores between two strings."""
+ if isinstance(response, str) and isinstance(correct_answer, str):
+ resp = [response]
+ corr = [correct_answer]
+ elif isinstance(response, (list, tuple)) and isinstance(
+ correct_answer, (list, tuple)
+ ):
+ resp = tuple(response)
+ corr = tuple(correct_answer)
+ else:
+ return 0
+ result = sacrebleu.corpus_bleu(corr, [resp]).score / 100
+ return result
diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/scoring/simple_str_match.py b/VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/scoring/simple_str_match.py
new file mode 100644
index 0000000000000000000000000000000000000000..e117410d7cfe4ca7498514839d344fbacf297ef4
--- /dev/null
+++ b/VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/scoring/simple_str_match.py
@@ -0,0 +1,29 @@
+from .exact_str_match import ExactStrMatch
+
+
+class SimpleStrMatch:
+ """Basic string matching, without spaces or hyphens."""
+
+ @staticmethod
+ def match(response, correct_answer: str) -> int:
+ """Simple string match between response and correct_answer."""
+ if not isinstance(response, str):
+ response = str(response) # If it is JSON-like
+ response = (
+ response.replace(" ", "")
+ .replace("-", "")
+ .replace("\n", "")
+ .replace("\t", "")
+ .replace(".", "")
+ .lower()
+ )
+ correct_answer = (
+ correct_answer.replace(" ", "")
+ .replace("-", "")
+ .replace("\n", "")
+ .replace("\t", "")
+ .replace(".", "")
+ .lower()
+ )
+
+ return ExactStrMatch.match(response, correct_answer)
diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/utils.py b/VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..8c32d27d6bf6c3624d2dc2e8bf32250bbb38ba95
--- /dev/null
+++ b/VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/utils.py
@@ -0,0 +1,69 @@
+import importlib
+from mimetypes import guess_type
+
+
+def lazy_import(module_name, class_name):
+ """Import the module lazily."""
+
+ def importer():
+ module = importlib.import_module(module_name)
+ return getattr(module, class_name)
+
+ return importer
+
+
+def is_video_file(file_path):
+ mime_type, _ = guess_type(file_path)
+ if not mime_type:
+ return False
+ return mime_type.startswith("video")
+
+
+
+def prepare_megabench_data(dataset_name, dataset_subset_name):
+ """
+ Prepare the MEGA-Bench dataset for evaluation.
+ Return:
+ subset_dataset: The organized data of the specified subset
+ all_dataset: The organized data of all tasks, used for evaluation
+ """
+ from datasets import load_dataset
+ if "single_image" in dataset_subset_name:
+ core_data = load_dataset(dataset_name, "core_single_image")
+ open_data = load_dataset(dataset_name, "open_single_image")
+ else:
+ core_data = load_dataset(dataset_name, "core")
+ open_data = load_dataset(dataset_name, "open")
+ core_test_samples = list(core_data["test"])
+ organized_core_dataset = organize_hf_dataset(core_test_samples)
+ open_test_samples = list(open_data["test"])
+ organized_open_dataset = organize_hf_dataset(open_test_samples)
+ subset_dataset = organized_core_dataset if "core" in dataset_subset_name else organized_open_dataset
+ all_dataset = organized_core_dataset + organized_open_dataset
+ return subset_dataset, all_dataset
+
+
+def organize_hf_dataset(dataset):
+ """
+ Organize the dataset with task-based manner
+
+ Return:
+ organized_dataset: list, each item is a dict, with the following keys:
+ - task_name: str
+ - task_query_samples: list of dicts, each dict contains the sample information
+ """
+ task_dict = {}
+ for sample in dataset:
+ task_name = sample["task_name"]
+ if task_name not in task_dict:
+ task_dict[task_name] = []
+ task_dict[task_name].append(sample)
+
+ organized_dataset = []
+ for task_name, samples in task_dict.items():
+ organized_dataset.append({
+ "task_name": task_name,
+ "task_samples": samples
+ })
+
+ return organized_dataset
diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/utils/multiple_choice.py b/VLMEvalKit-sudoku/vlmeval/dataset/utils/multiple_choice.py
new file mode 100644
index 0000000000000000000000000000000000000000..e965808aa096df2bf0227b3c30b057b96fc96cb8
--- /dev/null
+++ b/VLMEvalKit-sudoku/vlmeval/dataset/utils/multiple_choice.py
@@ -0,0 +1,667 @@
+import pandas as pd
+from ...utils import can_infer, track_progress_rich, can_infer_lego
+from ...smp import *
+import numpy as np
+import re
+
+MMB_abbrs = {
+ 'coarse_perception': 'CP',
+ 'finegrained_perception (instance-level)': 'FP-S',
+ 'finegrained_perception (cross-instance)': 'FP-C',
+ 'logic_reasoning': 'LR',
+ 'relation_reasoning': 'RR',
+ 'attribute_reasoning': 'AR'
+}
+
+MMT_abbrs = {
+ 'visual_recognition': 'VR',
+ 'localization': 'Loc',
+ 'ocr': 'OCR',
+ 'counting': 'Count',
+ 'hallucination': 'HLN',
+ 'image_retrieval': 'IR',
+ 'threed': '3D',
+ 'visual_captioning': 'VC',
+ 'visual_grounding': 'VG',
+ 'doc_understanding': 'DU',
+ 'action_recognition': 'AR',
+ 'pixel_level_perception': 'PLP',
+ 'image-to-image_translation': 'I2IT',
+ 'relation_reasoning': 'RR',
+ 'intelligence_quotient_test': 'IQT',
+ 'emotion': 'Emo',
+ 'visual_illusion': 'VI',
+ 'meme_understanding': 'MemU',
+ 'visual_prompt_understanding': 'VPU',
+ 'anomaly_detection': 'AND',
+ 'keypoint_detection': 'KD',
+ 'visual_commonsense_reasoning': 'VCR',
+ 'image_evaluation_judgement': 'IEJ',
+ 'multiple_image_analysis': 'MIA',
+ 'cross_image_matching': 'CIM',
+ 'temporal_understanding': 'TU',
+ 'visual_code': 'VP',
+ 'medical_understanding': 'MedU',
+ 'autonomous_driving': 'AUD',
+ 'discipline_knowledge_reasoning': 'DKR',
+ 'embodied_ai': 'EA',
+ 'gui_navigation': 'GN'
+}
+
+
+def MMMU_preproc(data):
+ logger = get_logger('Evaluation')
+ cnt = 0
+ As, Bs, Ans = list(data['A']), list(data['B']), list(data['answer'])
+ lt = len(data)
+ for i in range(lt):
+ if pd.isna(As[i]):
+ As[i] = Ans[i]
+ Bs[i] = 'Other Answers'
+ cnt += 1
+ logger.info(f'During MMMU_preproc in Evaluation, {cnt} open questions are re-formulated to multi-choice ones. ')
+ data['A'] = As
+ data['B'] = Bs
+ return data
+
+
+def report_acc(df):
+ # assert group in [None, 'category', 'l2-category']
+ res = defaultdict(list)
+
+ if 'split' in df:
+ splits = list(set(df['split']))
+ res['split'] = splits
+ else:
+ df['split'] = ['none'] * len(df)
+ res['split'] = ['none']
+
+ for group in [None, 'l2-category', 'category']:
+ if group is None:
+ res['Overall'] = [np.mean(df[df['split'] == sp]['hit']) for sp in res['split']]
+ elif group not in df:
+ continue
+ else:
+ abilities = list(set(df[group]))
+ abilities.sort()
+ for ab in abilities:
+ ab_name = MMB_abbrs[ab] if ab in MMB_abbrs else ab
+ sub_df = df[df[group] == ab]
+ res[ab_name] = [np.mean(sub_df[sub_df['split'] == sp]['hit']) for sp in res['split']]
+ return pd.DataFrame(res)
+
+
+def report_acc_MMT(df):
+ # assert group in [None, 'category', 'l2-category']
+ res = defaultdict(list)
+ res['split'] = list()
+ res['Overall'] = list()
+ for _, name in MMT_abbrs.items():
+ res[name] = list()
+
+ if 'split' in df:
+ splits = list(set(df['split']))
+ res['split'] = splits
+
+ else:
+ df['split'] = ['none'] * len(df)
+ res['split'] = ['none']
+
+ for group in [None, 'category', 'l2-category']:
+ if group is None:
+ res['Overall'] = [np.mean(df[df['split'] == sp]['hit']) for sp in res['split']]
+ res['Overall'].extend([np.mean(df['hit'])])
+ elif group not in df:
+ continue
+ elif group == 'category':
+ abilities = list(set(df[group]))
+ abilities.sort()
+ for ab in abilities:
+ ab_name = ab
+ sub_df = df[df[group] == ab]
+ res[ab_name] = [np.mean(sub_df[sub_df['split'] == sp]['hit']) for sp in res['split']]
+ res[ab_name].extend([np.mean(sub_df['hit'])])
+ else:
+ abilities = list(set(df[group]))
+ abilities.sort()
+ for ab in abilities:
+ sub_task_name_list = df[df['l2-category'] == ab]['category'].unique()
+ sub_task_acc = []
+ for sub_task_name in sub_task_name_list:
+ sub_df = df[df['category'] == sub_task_name]
+ sub_task_acc.append([np.mean(sub_df[sub_df['split'] == sp]['hit']) for sp in res['split']])
+
+ new_acc = []
+ for i in range(len(sub_task_acc[0])):
+ new_acc.append(sum([_[i] for _ in sub_task_acc]) / len([_ for _ in sub_task_acc]))
+ ab_name = MMT_abbrs[ab] if ab in MMT_abbrs else ab
+ res[ab_name] = new_acc
+
+ sub_task_acc = []
+ for sub_task_name in sub_task_name_list:
+ sub_df = df[df['category'] == sub_task_name]
+ sub_task_acc.append([np.mean(sub_df['hit'])])
+ new_acc = []
+ for i in range(len(sub_task_acc[0])):
+ new_acc.append(sum([_[i] for _ in sub_task_acc]) / len([_ for _ in sub_task_acc]))
+
+ res[ab_name].extend(new_acc)
+
+ res['split'].append('ALL')
+ return pd.DataFrame(res)
+
+
+def report_acc_MMSci(df):
+
+ df_filtered = df[df['setting'].isin(['Fig2Cap', 'SubFig2Cap', 'SubCap2Fig'])]
+
+ subject_acc = df_filtered.groupby(['subject', 'setting'])['hit'].mean().unstack(fill_value=0)
+ subject_acc['Avg'] = subject_acc.mean(axis=1)
+ subject_acc.reset_index(inplace=True)
+
+ category_acc = df_filtered.groupby(['category', 'setting'])['hit'].mean().unstack(fill_value=0)
+ category_acc['Avg'] = category_acc.mean(axis=1)
+ category_acc.reset_index(inplace=True)
+ category_acc['category'] = 'CATEGORY_' + category_acc['category']
+ category_acc.rename(columns={'category': 'subject'}, inplace=True)
+
+ overall_acc = df_filtered.groupby(['setting'])['hit'].mean().to_frame().T
+ overall_acc['Avg'] = overall_acc.mean(axis=1)
+ overall_acc['subject'] = 'Overall'
+
+ full_acc_df = pd.concat([subject_acc, category_acc, overall_acc], ignore_index=True)
+ column_order = ['subject', 'Fig2Cap', 'SubFig2Cap', 'SubCap2Fig', 'Avg']
+ full_acc_df = full_acc_df[column_order]
+ return full_acc_df
+
+
+def report_topviewrs_acc(df):
+ # assert group in [None, 'category', 'l2-category']
+ res = defaultdict(list)
+ print(df.columns)
+
+ if 'split' in df:
+ splits = list(set(df['split']))
+ res['split'] = splits
+ else:
+ df['split'] = ['none'] * len(df)
+ res['split'] = ['none']
+
+ for group in [None, 'l2-category', 'category']:
+ if group is None:
+
+ res['Overall'] = [np.mean(df[df['split'] == sp]['hit']) for sp in res['split']]
+
+ if 'partial_match' in df:
+ res['Overall_PM'] = [np.mean(df[df['split'] == sp]['partial_match']) for sp in res['split']]
+ elif group not in df:
+ continue
+ else:
+ abilities = list(set(df[group]))
+ abilities.sort()
+ for ab in abilities:
+ ab_name = MMB_abbrs[ab] if ab in MMB_abbrs else ab
+ sub_df = df[df[group] == ab]
+ res[ab_name] = [np.mean(sub_df[sub_df['split'] == sp]['hit']) for sp in res['split']]
+ if 'partial_match' in df:
+ res[f'{ab_name}_PM'] = [
+ np.mean(sub_df[sub_df['split'] == sp]['partial_match'])
+ for sp in res['split']
+ ]
+
+ return pd.DataFrame(res)
+
+
+def build_prompt(question, options, prediction):
+ tmpl = (
+ 'You are an AI assistant who will help me to match '
+ 'an answer with several options of a single-choice question. '
+ 'You are provided with a question, several options, and an answer, '
+ 'and you need to find which option is most similar to the answer. '
+ 'If the meaning of all options are significantly different from the answer, output Z. '
+ 'Your should output a single uppercase character in A, B, C, D (if they are valid options), and Z. \n'
+ 'Example 1: \n'
+ 'Question: What is the main object in image?\nOptions: A. teddy bear B. rabbit C. cat D. dog\n'
+ 'Answer: a cute teddy bear\nYour output: A\n'
+ 'Example 2: \n'
+ 'Question: What is the main object in image?\nOptions: A. teddy bear B. rabbit C. cat D. dog\n'
+ 'Answer: Spider\nYour output: Z\n'
+ 'Example 3: \n'
+ 'Question: {}?\nOptions: {}\nAnswer: {}\nYour output: '
+ )
+ return tmpl.format(question, options, prediction)
+
+
+def build_prompt_wemath(question, options, prediction):
+ tmpl = (
+ 'You are an AI assistant who will help me to match '
+ 'an answer with several options of a single-choice question. '
+ 'You are provided with a question, several options, and an answer, '
+ 'and you need to find which option is most similar to the answer. '
+ 'If the meaning of all options are significantly different from the answer, output Z. '
+ 'Your should output a single uppercase character in A, B, C, D, E, F, G (if they are valid options), and Z. \n'
+ 'Example 1: \n'
+ 'Question: \nWhat is the main object in image?\nOptions: A. teddy bear B. rabbit C. cat D. dog\n\n'
+ 'Answer: \na cute teddy bear\n\nYour output: A\n'
+ 'Example 2: \n'
+ 'Question: \nWhat is the main object in image?\nOptions: A. teddy bear B. rabbit C. cat D. dog\n\n'
+ 'Answer: \nSpider\n\nYour output: Z\n'
+ 'Example 3: \n'
+ 'Question: \n{}\nOptions: {}\n\nAnswer: \n{}\n\nYour output: '
+ )
+ question = question.replace(
+ (
+ "Regarding the format, please answer following the template below, and be sure to include two <> symbols:\n"
+ ": <> : <>"
+ ),
+ '',
+ )
+ return tmpl.format(question, options, prediction)
+
+
+def build_prompt_blink(question, options, prediction):
+ tmpl = (
+ 'You are an AI assistant who will help me to match an answer with several options of a single-choice question. '
+ 'You are provided with a question, several options, and an answer, '
+ 'and you need to find which option is most similar to the answer. '
+ "If the answer says things like refuse to answer, I'm sorry cannot help, etc., output Z."
+ 'If the meaning of all options are significantly different from the answer, '
+ 'or the answer does not select any option, output Z. '
+ 'Your should output one of the choices, A, B, C, D (if they are valid options), or Z.\n'
+ 'Example 1: \n'
+ 'Question: Which point is closer to the camera?\nSelect from the following choices.\n'
+ 'Options: A. Point A\nB. Point B\n(Z) Failed\n'
+ 'Answer: Point B, where the child is sitting, is closer to the camera.\nYour output: (B)\n'
+ 'Example 2: \n'
+ 'Question: Which point is closer to the camera?\nSelect from the following choices.\n'
+ 'Options: (A) Point A\n(B) Point B\n(Z) Failed\n'
+ "Answer: I'm sorry, but I can't assist with that request.\nYour output: (Z)\n"
+ 'Example 3: \n'
+ 'Question: Which point is corresponding to the reference point?\nSelect from the following choices.\n'
+ 'Options: (A) Point A\n(B) Point B\n(Z) Failed\n'
+ 'Answer:The reference point (REF) on the first image is at the tip of the pot, '
+ 'which is the part used to Poke if the pots were used for that action. Looking at the second image, '
+ 'we need to find the part of the object that would correspond to poking.\n'
+ "(A) Point A is at the tip of the spoon's handle, which is not used for poking.\n"
+ '(B) Point B is at the bottom of the spoon, which is not used for poking.\n'
+ '(C) Point C is on the side of the pspoonot, which is not used for poking.\n'
+ '(D) Point D is at the tip of the spoon, which is not used for poking.\n'
+ '\nTherefore, there is no correct answer in the choices\nYour output: (Z)\n'
+ 'Example 4: \n'
+ 'Question: {}?\nOptions: {}\n(Z) Failed\nAnswer: {}\nYour output: '
+ )
+ return tmpl.format(question, options, prediction)
+
+
+def build_prompt_cn(question, options, prediction):
+ tmpl = (
+ '你是一个帮助我匹配答案与单选题中多个选项的 AI 助手。'
+ '你会被提供:一个问题,多个选项,一个答案。你的任务是找到与答案意义最相近的选项。'
+ '如果所有选项的意义都与答案显著不同,则输出 Z。'
+ '你应该输出一个单个的大写字母,例如 A, B, C, D(如果它们是有效选项),或 Z。'
+ '例 1:'
+ '问题: 图中最主要的物体是什么?\n选项: A. 泰迪熊 B. 兔子 C. 猫 D. 狗\n答案: 一只可爱的泰迪熊\n输出: A\n'
+ '例 2: \n'
+ '问题: 图中最主要的物体是什么?\n选项: A. 泰迪熊 B. 兔子 C. 猫 D. 狗\n答案: 蜘蛛\n输出: Z\n'
+ '例 3: \n'
+ '问题: {}?\n选项: {}\n答案: {}\n输出: '
+ )
+ return tmpl.format(question, options, prediction)
+
+
+def build_prompt_LEGO(question, options, prediction,question_type):
+ if question_type == 'sort':
+ tmpl = (
+ 'You are an AI assistant who will help me to arrange options in the correct order. '
+ 'You are provided with a question, four options, and an answer. '
+ 'You need to determine the correct ordering of options based on the answer. '
+ 'Output should be a permutation of ABCD (like DCBA or BADC).\n'
+ 'Example 1:\n'
+ 'Question: Arrange these historical events chronologically\n'
+ 'Options: A. Renaissance B. Middle Ages C. Industrial Revolution D. Digital Age\n'
+ 'Answer: From Middle Ages to Renaissance, then Industrial Revolution, finally Digital Age\n'
+ 'Output: BACD\n\n'
+ 'Example 2:\n'
+ 'Question: Sort colors by wavelength (longest to shortest)\n'
+ 'Options: A. Red B. Green C. Blue D. Violet\n'
+ 'Answer: Red has longest wavelength, followed by green then blue, shortest is violet\n'
+ 'Output: ABCD\n\n'
+ 'Example 3:\n'
+ 'Question: {}\nOptions: {}\nAnswer: {}\nOutput:'
+ )
+ return tmpl.format(question, options, prediction)
+ else:
+ return build_prompt(question, options, prediction)
+
+
+def build_choices(item):
+ ret = {}
+ for ch in string.ascii_uppercase:
+ if ch in item and (not pd.isna(item[ch])):
+ ret[ch] = item[ch]
+ return ret
+
+
+def prefetch_answer(item):
+ choices = build_choices(item)
+ return can_infer(item['prediction'], choices)
+
+
+def extract_answer_from_item(model, item, dataset_name=None):
+ logger = get_logger('Evaluation')
+ # It will return: (pred, raw, llm_time)
+ choices = build_choices(item)
+ option_str = build_option_str(choices)
+
+ if dataset_name == 'BLINK':
+ prompt = build_prompt_blink(item['question'], option_str, item['prediction'])
+ elif dataset_name == 'WeMath':
+ prompt = build_prompt_wemath(item['question'], option_str, item['prediction'])
+ elif cn_string(item['question']):
+ prompt = build_prompt_cn(item['question'], option_str, item['prediction'])
+ elif dataset_name is not None and 'LEGO' in dataset_name:
+ prompt = build_prompt_LEGO(item['question'], option_str, item['prediction'],item['question_type'])
+ else:
+ prompt = build_prompt(item['question'], option_str, item['prediction'])
+ retry = 3
+
+ if dataset_name is not None and 'LEGO' in dataset_name:
+ ret = can_infer_lego(item['prediction'], item['question_type'], choices)
+ else:
+ ret = can_infer(item['prediction'], choices)
+ if ret:
+ return dict(opt=ret, log=item['prediction'])
+ if model is None:
+ return dict(opt='Z', log='Failed in Prefetch, no GPT-based answer matching under `exact_matching` policy.')
+
+ while retry:
+ ans = model.generate(prompt)
+ if 'Failed to obtain answer via API' in ans:
+ logger.warning('GPT API failed to answer. ')
+ else:
+ if dataset_name is not None and 'LEGO' in dataset_name:
+ ret = can_infer_lego(ans, item['question_type'], choices)
+ else:
+ ret = can_infer(ans, choices)
+ if ret:
+ return dict(opt=ret, log=ans)
+ else:
+ logger.warning(
+ f'Failed to in infer: prediction is {ans}, choice labels are {set(choices)}'
+ f', Answer is {item["answer"]}' if "answer" in item else ""
+ )
+ retry -= 1
+
+ if retry == 0:
+ options = list(choices) + ['Z'] if 'Z' not in choices else []
+ return dict(opt=rd.choice(options), log='Failed to predict, thus randomly generate one. ')
+
+
+# For Circular Evaluation
+def prefetch_circular_group(sub_data, verbose=False):
+ lt = len(sub_data)
+ GT, PRED = [], []
+ for i in range(lt):
+ item = sub_data.iloc[i]
+ GT.append(item['GT'])
+ PRED.append(prefetch_answer(item))
+ if PRED[-1] and (GT[-1] != PRED[-1]):
+ log = (
+ f'Failed in Prefetching Rolling {i}: Answer is {GT[-1]}, '
+ f"Prediction is {item['prediction']}, Pre-fetched is {PRED[-1]}. "
+ )
+ return dict(hit=0, log=log)
+ flag = True
+ for g, p in zip(GT, PRED):
+ if g != p:
+ flag = False
+ ret = (dict(hit=1, log='Succeed During Pre-fetching'), ) if flag else (None, )
+ ret = ret + (GT, PRED) if verbose else ret
+ return ret if len(ret) > 1 else ret[0]
+
+
+def eval_vanilla(model, item, dataset_name=None):
+ res = extract_answer_from_item(model, item, dataset_name=dataset_name)
+ opt, match_log = res['opt'], res['log']
+ if opt == item['GT']:
+ return dict(hit=1, log=f'Match Log: {match_log}. ')
+ else:
+ return dict(hit=0, log=f'Match Log: {match_log}. ')
+
+
+# For Circular Evaluation
+def eval_circular_group(model, sub_data, dataset_name=None):
+ prefetched = prefetch_circular_group(sub_data, verbose=True)
+ if isinstance(prefetched, dict) and 'hit' in prefetched:
+ return prefetched
+
+ res, GT, PRED = prefetch_circular_group(sub_data, verbose=True)
+ if res is not None:
+ return res
+
+ lt = len(sub_data)
+ log = ''
+ for i in range(lt):
+ if PRED[i]:
+ log += f'Rolling {i} Matched.\n'
+ else:
+ res = extract_answer_from_item(model, sub_data.iloc[i], dataset_name=dataset_name)
+ opt, match_log = res['opt'], res['log']
+ PRED[i] = opt
+ if PRED[i] != GT[i]:
+ log += (
+ f"Failed in Rolling {i}: Answer is {GT[i]}; Prediction is {sub_data.iloc[i]['prediction']}; "
+ f'Pre-fetched is {PRED[i]}; Match Log is {match_log}.\n'
+ )
+ return dict(hit=0, log=log)
+ else:
+ log += (
+ f"Rolling {i}: Answer is {GT[i]}, Prediction is {sub_data.iloc[i]['prediction']}, "
+ f'Pre-fetched is {PRED[i]}.\n'
+ )
+
+ return dict(hit=1, log=log)
+
+
+# data, meta are pd.DataFrame, result_file is a path
+def mcq_vanilla_eval(model, data, meta, nproc, result_file, dataset_name=None):
+ result = {}
+ if osp.exists(result_file):
+ result = load(result_file)
+ answer_map = {i: c for i, c in zip(meta['index'], meta['answer'])}
+
+ if 'MMMU' in dataset_name:
+ data = MMMU_preproc(data)
+ answer_map = {k: (v if v in list(string.ascii_uppercase) else 'A') for k, v in answer_map.items()}
+
+ data = data[data['index'].isin(answer_map)]
+ data['GT'] = [answer_map[idx] for idx in data['index']]
+ items = []
+
+ for i in range(len(data)):
+ # Dealing with the normal part
+ item = data.iloc[i]
+ if item['index'] not in result:
+ items.append(item)
+
+ tups = [dict(model=model, item=x, dataset_name=dataset_name) for x in items]
+ keys = [x['index'] for x in items]
+ if len(tups):
+ res = track_progress_rich(eval_vanilla, tups, nproc=nproc, chunksize=nproc, save=result_file, keys=keys)
+ result = load(result_file)
+ for k, v in zip(keys, res):
+ if k not in result:
+ result[k] = v
+ data['hit'] = [result[i]['hit'] for i in data['index']]
+ data['log'] = [result[i]['log'] for i in data['index']]
+ if 'GT' in data:
+ data.pop('GT')
+ return data
+
+
+# data, meta are pd.DataFrame, result_file is a path
+def mcq_circular_eval(model, data, meta, nproc, result_file, dataset_name=None):
+ result = {}
+ if osp.exists(result_file):
+ result = load(result_file)
+ # Build Answer Map
+ answer_map = {i: c for i, c in zip(meta['index'], meta['answer'])}
+
+ for idx in list(meta['index']) + list(data['index']):
+ assert istype(idx, int)
+ if 'g_index' not in data:
+ data['g_index'] = [int(x % 1e6) for x in data['index']]
+
+ # Only keep those lines in the meta data
+ data = data[data['index'].isin(answer_map)]
+ data['GT'] = [answer_map[idx] for idx in data['index']]
+
+ data['tmp_flag'] = [x == y for x, y in zip(data['index'], data['g_index'])]
+ data_main = data[data['tmp_flag']]
+ data_main.pop('tmp_flag')
+
+ data_groups = []
+ for i in range(len(data_main)):
+ # Dealing with the normal part
+ idx = data_main.iloc[i]['index']
+ if idx not in result:
+ sub_data = data[data['g_index'] == idx]
+ data_groups.append(sub_data)
+
+ if len(data_groups):
+ prefetched = [prefetch_circular_group(g, verbose=False) for g in data_groups]
+ remain = []
+ for dg, pf in zip(data_groups, prefetched):
+ if pf is not None:
+ result[dg.iloc[0]['g_index']] = pf
+ else:
+ remain.append(dg)
+ dump(result, result_file)
+
+ tups = [dict(model=model, sub_data=x, dataset_name=dataset_name) for x in remain]
+ keys = [x.iloc[0]['g_index'] for x in remain]
+
+ if len(tups) == 0:
+ pass
+ elif model is None:
+ logger = get_logger('Evaluation')
+ logger.warning('Exact Matching mode, will not do GPT-based answer matching. ')
+ for k in keys:
+ result[k] = dict(
+ hit=0, log='Failed in Prefetch, no GPT-based answer matching under `exact_matching` policy.')
+ else:
+ res = track_progress_rich(
+ eval_circular_group,
+ tups,
+ nproc=nproc,
+ chunksize=nproc,
+ save=result_file,
+ keys=keys)
+ result = load(result_file)
+ for k, v in zip(keys, res):
+ if k not in result:
+ result[k] = v
+
+ tmp_ext = get_pred_file_format()
+ tmp_pth = f'/tmp/{timestr()}.{tmp_ext}'
+ dump(data_main, tmp_pth)
+ data_main = load(tmp_pth)
+ indices = data_main['index']
+ data_main['hit'] = [result[i]['hit'] for i in indices]
+ data_main['log'] = [result[i]['log'] for i in indices]
+ if 'GT' in data_main:
+ data_main.pop('GT')
+
+ return data_main
+
+
+def extract_characters_regex(s, choices=['(A)', '(B)', '(C)', '(D)', '(E)']):
+ if type(s) is dict:
+ s = ''
+ s = s.strip()
+ answer_prefixes = [
+ 'The best answer is',
+ 'The correct answer is',
+ 'The answer is',
+ 'The answer',
+ 'The best option is'
+ 'The correct option is',
+ 'Best answer:'
+ 'Best option:',
+ ]
+ for answer_prefix in answer_prefixes:
+ s = s.replace(answer_prefix, '')
+
+ if len(s.split()) > 10 and not re.search('[ABCDE]', s):
+ return ''
+ matches = re.search(r'[ABCDE]', s)
+ if matches is None:
+ for choice in choices:
+ if s.lower() in choice.lower():
+ return choice[1]
+ return ''
+ return matches[0]
+
+
+def get_dimension_rating(data_path):
+ TASKS = [
+ 'Reasoning',
+ 'Perception',
+ ]
+
+ SUBTASKS = [
+ 'Monitoring',
+ 'Autonomous_Driving',
+ 'OCR with Complex Context',
+ 'Diagram and Table',
+ 'Remote Sensing',
+ ]
+ data = load(data_path)
+ results = {}
+ results['Overall'] = {}
+ for task in TASKS:
+ results[f'{task}'] = {}
+ for subtask in SUBTASKS:
+ results[f'{task}'][f'{subtask}'] = {}
+
+ for i in range(len(data)):
+ question = data.iloc[i]
+ Task = question['category'].split('/')[0]
+ Subtask = question['category'].split('/')[1]
+ Category = question['l2-category'].lower()
+ if 'attribute' in Category.lower():
+ Category = Category.split('/')[0] + '/attribute'
+ if question['score'] >= 0:
+ cnt = question['score']
+ if Category not in results[Task][Subtask].keys():
+ results[Task][Subtask][f'{Category}'] = {'true': cnt, 'false': 1 - cnt}
+ else:
+ results[Task][Subtask][f'{Category}']['true'] += cnt
+ results[Task][Subtask][f'{Category}']['false'] += 1 - cnt
+
+ sum_all, succ_all = 0, 0
+ for task, tasks_values in results.items():
+ cnt_task, sum_task = 0, 0
+ for substask, subtask_value in tasks_values.items():
+ cnt_subtask, sum_subtask = 0, 0
+ for category, category_dict in subtask_value.items():
+ cnt_subtask += category_dict['true']
+ sum_subtask += category_dict['false'] + category_dict['true']
+ acc = category_dict['true'] / (category_dict['false'] + category_dict['true'])
+ results[task][substask][category] = acc
+ if sum_subtask == 0:
+ acc_subtasks = 0
+ else:
+ acc_subtasks = cnt_subtask / sum_subtask
+ cnt_task += cnt_subtask
+ sum_task += sum_subtask
+ results[task][substask]['Avg'] = acc_subtasks
+ if sum_task == 0:
+ acc_task = 0
+ else:
+ acc_task = cnt_task / sum_task
+ succ_all += cnt_task
+ sum_all += sum_task
+ results[task]['Avg'] = acc_task
+ results['Overall'] = succ_all / sum_all
+ return results
diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/utils/naturalbench.py b/VLMEvalKit-sudoku/vlmeval/dataset/utils/naturalbench.py
new file mode 100644
index 0000000000000000000000000000000000000000..ed9a9576d2a61c61424de3f6cbbbd1456d627291
--- /dev/null
+++ b/VLMEvalKit-sudoku/vlmeval/dataset/utils/naturalbench.py
@@ -0,0 +1,145 @@
+import re
+
+
+def extract_answer(output_string, task_type="yes_no"):
+ """
+ Extracts the answer from the output string based on the task type.
+
+ Parameters:
+ output_string (str): The output string.
+ task_type (str): The type of task. Must be either "yes_no" or "multiple_choice".
+
+ Returns:
+ int:
+ 1 if "yes" or "A"
+ 0 if "no" or "B"
+ -1 if no relevant answer is found.
+ Raises a ValueError if an unsupported task_type is provided.
+ """
+
+ def find_word_position(string, word):
+ pattern = r'\b' + re.escape(word) + r'\b'
+ match = re.search(pattern, string, re.IGNORECASE)
+ if match:
+ return match.start()
+ return -1
+
+ if task_type not in ["yes_no", "multiple_choice"]:
+ raise ValueError(f"Task type {task_type} not supported. Must be 'yes_no' or 'multiple_choice'.")
+
+ if task_type == "yes_no":
+ position_yes_and_a = find_word_position(output_string, "yes")
+ position_no_and_b = find_word_position(output_string, "no")
+ elif task_type == "multiple_choice":
+ position_yes_and_a = find_word_position(output_string, "A")
+ position_no_and_b = find_word_position(output_string, "B")
+
+ if position_yes_and_a == -1 and position_no_and_b == -1:
+ print(f"No answer found in the output string: {output_string}.")
+ return -1
+ elif position_yes_and_a != -1 and position_no_and_b != -1:
+ return 1 if position_yes_and_a < position_no_and_b else 0
+ else:
+ return 0 if position_yes_and_a == -1 else 1
+
+
+def get_scores(scores):
+ """
+ Calculate various scores based on the given results.
+
+ Args:
+ scores (dict or list): A dictionary or list containing results where each result can be:
+ - dict: {id: {"q0_i0": 1 or 0, "q0_i1": 1 or 0, "q1_i0": 1 or 0, "q1_i1": 1 or 0}, ...}
+ - list: [[q0_i0 (1 or 0), q0_i1 (1 or 0), q1_i0 (1 or 0), q1_i1 (1 or 0)], ...]
+
+ The keys "q0_i0", "q0_i1", "q1_i0", "q1_i1" represent combinations of questions and images:
+ - "q0_i0" means question_0 on image_0
+ - "q0_i1" means question_0 on image_1
+ - "q1_i0" means question_1 on image_0
+ - "q1_i1" means question_1 on image_1
+
+ Returns:
+ dict: A dictionary containing the calculated scores:
+ - 'Q_Acc': Average question score
+ - 'I_Acc': Average image score
+ - 'Acc': Average binary VQA score
+ - 'G_Acc': Average group score
+ """
+ Q_Acc = 0.0
+ I_Acc = 0.0
+ Acc = 0.0
+ G_Acc = 0.0
+
+ num_samples = len(scores)
+
+ def calculate_image_score(result):
+ image_correct = 0
+ if isinstance(result, dict):
+ if result["q0_i0"] == 1.0 and result["q1_i0"] == 0.0:
+ image_correct += 1
+ if result["q1_i1"] == 1.0 and result["q0_i1"] == 0.0:
+ image_correct += 1
+ elif isinstance(result, list):
+ if result[0] == 1.0 and result[2] == 0.0:
+ image_correct += 1
+ if result[3] == 1.0 and result[1] == 0.0:
+ image_correct += 1
+ return image_correct
+
+ def calculate_question_score(result):
+ text_correct = 0
+ if isinstance(result, dict):
+ if result["q0_i0"] == 1.0 and result["q0_i1"] == 0.0:
+ text_correct += 1
+ if result["q1_i1"] == 1.0 and result["q1_i0"] == 0.0:
+ text_correct += 1
+ else:
+ if result[0] == 1.0 and result[1] == 0.0:
+ text_correct += 1
+ if result[3] == 1.0 and result[2] == 0.0:
+ text_correct += 1
+ return text_correct
+
+ def calculate_binary_score(result):
+ binary_score_correct = 0
+ if isinstance(result, dict):
+ binary_score_correct += 1 if result["q0_i0"] == 1.0 else 0
+ binary_score_correct += 1 if result["q0_i1"] == 0.0 else 0
+ binary_score_correct += 1 if result["q1_i0"] == 0.0 else 0
+ binary_score_correct += 1 if result["q1_i1"] == 1.0 else 0
+ else:
+ binary_score_correct += 1 if result[0] == 1.0 else 0
+ binary_score_correct += 1 if result[1] == 0.0 else 0
+ binary_score_correct += 1 if result[2] == 0.0 else 0
+ binary_score_correct += 1 if result[3] == 1.0 else 0
+
+ return binary_score_correct
+
+ def calculate_group(result):
+ group_correct = 0
+ if calculate_question_score(result) == 2 and calculate_image_score(result) == 2:
+ group_correct += 1
+
+ return group_correct
+
+ if isinstance(scores, dict):
+ for _, result in scores.items():
+ Q_Acc += calculate_question_score(result)
+ I_Acc += calculate_image_score(result)
+ Acc += calculate_binary_score(result)
+ G_Acc += calculate_group(result)
+ else:
+ for result in scores:
+ Q_Acc += calculate_question_score(result)
+ I_Acc += calculate_image_score(result)
+ Acc += calculate_binary_score(result)
+ G_Acc += calculate_group(result)
+
+ results = {
+ 'Q_Acc': Q_Acc / float(num_samples * 2),
+ 'I_Acc': I_Acc / float(num_samples * 2),
+ 'Acc': Acc / float(num_samples * 4),
+ 'G_Acc': G_Acc / num_samples
+ }
+
+ return results
diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/utils/ocrbrnch_v2_eval.py b/VLMEvalKit-sudoku/vlmeval/dataset/utils/ocrbrnch_v2_eval.py
new file mode 100644
index 0000000000000000000000000000000000000000..abceeaf5b6095e451401076e3fbf604f49ac83a8
--- /dev/null
+++ b/VLMEvalKit-sudoku/vlmeval/dataset/utils/ocrbrnch_v2_eval.py
@@ -0,0 +1,441 @@
+# flake8: noqa
+import os
+import json
+import ipdb
+import argparse
+
+import os
+import re
+import ast
+import json
+import argparse
+import numpy as np
+from tqdm import tqdm
+from .Ocrbench_v2.vqa_metric import vqa_evaluation, cn_vqa_evaluation, math_expression_evaluation, vqa_evaluation_case_sensitive, counting_evaluation, cn_math_expression_evaluation
+from .Ocrbench_v2.IoUscore_metric import vqa_with_position_evaluation, calculate_iou, extract_coordinates
+from .Ocrbench_v2.TEDS_metric import TEDS, convert_markdown_table_to_html, convert_str_to_dict, convert_str_to_multi_dict, generate_combinations, dict_to_html, compute_f1_score, doc_parsing_evaluation, wrap_html_table
+from .Ocrbench_v2.page_ocr_metric import cal_per_metrics
+from .Ocrbench_v2.spotting_metric import extract_bounding_boxes_robust, spotting_evaluation
+
+
+def is_nan_value(value):
+ if value is None:
+ return True
+ if isinstance(value, str) and value.lower() == 'nan':
+ return True
+ try:
+ import pandas as pd
+ if pd.isna(value):
+ return True
+ except:
+ pass
+ return False
+
+
+def get_value_or_zero(value):
+ return 0.0 if value is None else value
+
+
+def calculate_average(scores_dict):
+ averages = {key: sum(values) / len(values) for key, values in scores_dict.items() if len(values) > 0}
+ return averages
+
+
+def process_predictions(predict_file):
+ teds = TEDS(n_jobs=32)
+
+ res_data_list = []
+
+ for index, data_item in enumerate(tqdm(predict_file)):
+ if data_item["type"] == "APP agent en" or data_item["type"] == "ASCII art classification en" or data_item["type"] == "math QA en" \
+ or data_item["type"] == "reasoning VQA en" or data_item["type"] == "science QA en" \
+ or data_item["type"] == "text recognition en" or data_item["type"] == "document classification en" \
+ or data_item["type"] == "cognition VQA en" or data_item["type"] == "diagram QA en":
+ if "eval" in data_item.keys():
+ if data_item["eval"] == "multiple choice":
+ if not isinstance(data_item["answers"], list):
+ data_item["answers"] = [data_item["answers"]]
+ assert len(data_item["answers"]) == 1
+
+ if not isinstance(data_item["predict"], str):
+ data_item["score"] = 0
+ else:
+ predict = ''.join(c for c in data_item["predict"] if c.isalpha())
+
+ if predict == data_item["answers"][0]:
+ data_item["score"] = 1
+ else:
+ data_item["score"] = 0
+ elif data_item["eval"] == "case sensitive":
+ data_item["score"] = vqa_evaluation_case_sensitive(data_item["predict"], data_item["answers"])
+ else:
+ raise ValueError("No such evaluation method")
+ else:
+ data_item["score"] = vqa_evaluation(data_item["predict"], data_item["answers"])
+
+ elif data_item["type"] == "cognition VQA cn" or data_item["type"] == "reasoning VQA cn":
+ if "eval" in data_item.keys():
+ if data_item["eval"] == "multiple choice":
+ assert len(data_item["answers"]) == 1
+ predict = ''.join(c for c in data_item["predict"] if c.isalpha())
+
+ if predict == data_item["answers"][0]:
+ data_item["score"] = 1
+ else:
+ data_item["score"] = 0
+ elif data_item["eval"] == "case sensitive":
+ data_item["score"] = vqa_evaluation_case_sensitive(data_item["predict"], data_item["answers"])
+ else:
+ raise ValueError("No such evaluation method")
+ else:
+ data_item["score"] = cn_vqa_evaluation(data_item["predict"], data_item["answers"])
+
+ elif data_item["type"] == "handwritten answer extraction cn":
+ if "简答" in data_item["question"]:
+ ocr_metric = cal_per_metrics(data_item["predict"], data_item["answers"][0])
+ data_item["score"] = (
+ get_value_or_zero(ocr_metric["bleu"]) +
+ get_value_or_zero(ocr_metric["meteor"]) +
+ get_value_or_zero(ocr_metric["f_measure"]) +
+ (1 - get_value_or_zero(ocr_metric["edit_dist"]))
+ ) / 4
+ else:
+ assert len(data_item["answers"]) == 1
+ answer = data_item["answers"][0]
+ chars = list(answer)
+ if len(answer) > 1:
+
+ answer_list = [
+ "".join(chars),
+ ".".join(chars),
+ ". ".join(chars),
+ ",".join(chars),
+ ", ".join(chars),
+ "、".join(chars),
+ ";".join(chars),
+ "; ".join(chars),
+ " ".join(chars),
+ "和".join(chars)
+ ]
+ max_score = 0
+ for answer in answer_list:
+ if answer in data_item["predict"]:
+ temp_score = 1
+ else:
+ temp_score = 0
+ if temp_score > max_score:
+ max_score = temp_score
+ data_item["score"] = max_score
+
+ else:
+ if data_item["answers"][0] in data_item["predict"]:
+ data_item["score"] = 1
+ else:
+ data_item["score"] = 0
+
+ elif data_item["type"] == "formula recognition cn":
+ if is_nan_value(data_item["predict"]):
+ data_item["score"] = 0
+ else:
+ data_item["score"] = cn_math_expression_evaluation(data_item["predict"], data_item["answers"])
+
+ elif data_item["type"] == "text counting en":
+ data_item["score"] = counting_evaluation(data_item["predict"], data_item["answers"], data_item["eval"])
+
+ elif data_item["type"] == "formula recognition en":
+ data_item["score"] = math_expression_evaluation(data_item["predict"], data_item["answers"])
+
+ elif data_item["type"] == "table parsing en":
+ if type(data_item["answers"])==list and len(data_item["answers"]) == 1:
+ if not isinstance(data_item["predict"], str):
+ data_item["score"] = 0
+ elif not isinstance(data_item["question"], str):
+ data_item["ignore"] = "True"
+ data_item["score"] = 0
+
+ elif "html" in data_item["question"].lower():
+ no_find = False
+ predict_table = data_item["predict"].replace('\n','')
+ if " max_score:
+ max_score = data_item["score"]
+ data_item["score"] = max_score
+
+ elif data_item["type"] == "key information extraction cn":
+ assert len(data_item["answers"]) == 1
+ answers = ast.literal_eval(data_item["answers"][0])
+ answers = {k: v if isinstance(v, list) else [v] for k, v in answers.items()}
+ answers = generate_combinations(answers)
+ if type(answers)==list and len(answers) == 1:
+ if not isinstance(data_item["predict"], str):
+ data_item["score"] = 0
+ else:
+ pred_kie_dict = convert_str_to_dict(data_item["predict"])
+ data_item["score"] = compute_f1_score(pred_kie_dict, answers[0])
+ else:
+ max_score = 0
+ for answer in answers:
+ pred_kie_dict = convert_str_to_dict(data_item["predict"])
+ data_item["score"] = compute_f1_score(pred_kie_dict, answer)
+
+ if data_item["score"] > max_score:
+ max_score = data_item["score"]
+ data_item["score"] = max_score
+
+ elif data_item["type"] == "VQA with position en":
+ if not isinstance(data_item["predict"], str):
+ data_item["score"] = 0
+ else:
+ pred_dict = convert_str_to_dict(data_item["predict"])
+ data_item["score"] = vqa_with_position_evaluation(pred_dict, data_item)
+
+ elif data_item["type"] == "text translation cn":
+ if len(data_item["predict"]) == 0:
+ data_item["score"] = 0
+ elif len(data_item["answers"][0]) == 0:
+ data_item["score"] = 0
+ data_item["ignore"] = "True"
+ else:
+ ocr_metric = cal_per_metrics(data_item["predict"], data_item["answers"][0])
+ data_item["score"] = (ocr_metric["bleu"] + ocr_metric["meteor"] + ocr_metric["f_measure"] + (1 - ocr_metric["edit_dist"])) / 4
+
+ elif data_item["type"] == "fine-grained text recognition en":
+ if not isinstance(data_item["predict"], str):
+ data_item["score"] = 0
+ elif len(data_item["predict"]) == 0:
+ data_item["score"] = 0
+ else:
+ ocr_metric = cal_per_metrics(data_item["predict"], data_item["answers"][0])
+ data_item["score"] = (
+ get_value_or_zero(ocr_metric["bleu"]) +
+ get_value_or_zero(ocr_metric["meteor"]) +
+ get_value_or_zero(ocr_metric["f_measure"]) +
+ (1 - get_value_or_zero(ocr_metric["edit_dist"]))
+ ) / 4
+ elif data_item["type"] == "full-page OCR en":
+ if not data_item["predict"]:
+ data_item["score"] = 0
+ else:
+ ocr_metric = cal_per_metrics(data_item["predict"], data_item["answers"][0])
+ data_item["score"] = (
+ get_value_or_zero(ocr_metric["bleu"]) +
+ get_value_or_zero(ocr_metric["meteor"]) +
+ get_value_or_zero(ocr_metric["f_measure"]) +
+ (1 - get_value_or_zero(ocr_metric["edit_dist"]))
+ ) / 4
+
+ elif data_item["type"] == "full-page OCR cn":
+ if not isinstance(data_item["predict"], str):
+ data_item["score"] = 0
+ else:
+ if len(data_item["predict"]) == 0:
+ data_item["score"] = 0
+ else:
+ ocr_metric = cal_per_metrics(data_item["predict"], data_item["answers"][0])
+ data_item["score"] = (ocr_metric["bleu"] + ocr_metric["meteor"] + ocr_metric["f_measure"] + (1 - ocr_metric["edit_dist"])) / 4
+
+ elif data_item["type"] == "text grounding en":
+ if not isinstance(data_item["predict"], str):
+ data_item["score"] = 0
+ else:
+ predict_bbox = extract_coordinates(data_item["predict"])
+ if not predict_bbox:
+ data_item["score"] = 0
+ else:
+ data_item["score"] = calculate_iou(predict_bbox, data_item["answers"])
+
+ elif data_item["type"] == "text spotting en":
+ if not isinstance(data_item["predict"], str):
+ data_item["score"] = 0
+ else:
+ predict_bbox = extract_bounding_boxes_robust(data_item["predict"])
+ if not predict_bbox:
+ data_item["score"] = 0
+ else:
+ data_item["score"] = spotting_evaluation(predict_bbox, data_item)
+
+ else:
+ raise ValueError("Unknown task type!")
+
+ res_data_list.append(data_item)
+
+
+ return res_data_list
+
+
+
+def ocrbench_v2_aggregate_accuracy(data_list):
+ en_text_recognition_list, en_text_detection_list, en_text_spotting_list, en_relationship_extraction_list = [], [], [], []
+ en_element_parsing_list, en_mathematical_calculation_list, en_visual_text_understanding_list = [], [], []
+ en_knowledge_reasoning_list = []
+
+ cn_text_recognition_list, cn_relationship_extraction_list = [], []
+ cn_element_parsing_list, cn_visual_text_understanding_list = [], []
+ cn_knowledge_reasoning_list = []
+
+ res_list = []
+ for item in data_list:
+ if "ignore" in item.keys():
+ assert item["ignore"] == "True"
+
+ elif item["type"] == "text recognition en" or item["type"] == "fine-grained text recognition en" or item["type"] == "full-page OCR en":
+ en_text_recognition_list.append(item["score"])
+
+ elif item["type"] == "text grounding en" or item["type"] == "VQA with position en":
+ en_text_detection_list.append(item["score"])
+
+ elif item["type"] == "text spotting en":
+ en_text_spotting_list.append(item["score"])
+
+ elif item["type"] == "key information extraction en" or item["type"] == "key information mapping en":
+ en_relationship_extraction_list.append(item["score"])
+
+ elif item["type"] == "document parsing en" or item["type"] == "chart parsing en" \
+ or item["type"] == "table parsing en" or item["type"] == "formula recognition en":
+ en_element_parsing_list.append(item["score"])
+
+ elif item["type"] == "math QA en" or item["type"] == "text counting en":
+ en_mathematical_calculation_list.append(item["score"])
+
+ elif item["type"] == "document classification en" \
+ or item["type"] == "cognition VQA en" or item["type"] == "diagram QA en":
+ en_visual_text_understanding_list.append(item["score"])
+
+ elif item["type"] == "reasoning VQA en" or item["type"] == "science QA en" \
+ or item["type"] == "APP agent en" or item["type"] == "ASCII art classification en":
+ en_knowledge_reasoning_list.append(item["score"])
+
+ elif item["type"] == "full-page OCR cn":
+ cn_text_recognition_list.append(item["score"])
+
+ elif item["type"] == "key information extraction cn" or item["type"] == "handwritten answer extraction cn":
+ cn_relationship_extraction_list.append(item["score"])
+
+ elif item["type"] == "document parsing cn" or item["type"] == "table parsing cn" or item["type"] == "formula recognition cn":
+ cn_element_parsing_list.append(item["score"])
+
+ elif item["type"] == "cognition VQA cn":
+ cn_visual_text_understanding_list.append(item["score"])
+
+ elif item["type"] == "reasoning VQA cn" or item["type"] == "text translation cn":
+ cn_knowledge_reasoning_list.append(item["score"])
+
+ else:
+ raise ValueError("Unknown task type!")
+
+ en_scores = {
+ "en_text_recognition": en_text_recognition_list,
+ "en_text_detection": en_text_detection_list,
+ "en_text_spotting": en_text_spotting_list,
+ "en_relationship_extraction": en_relationship_extraction_list,
+ "en_element_parsing": en_element_parsing_list,
+ "en_mathematical_calculation": en_mathematical_calculation_list,
+ "en_visual_text_understanding": en_visual_text_understanding_list,
+ "en_knowledge_reasoning": en_knowledge_reasoning_list
+ }
+
+ cn_scores = {
+ "cn_text_recognition": cn_text_recognition_list,
+ "cn_relationship_extraction": cn_relationship_extraction_list,
+ "cn_element_parsing": cn_element_parsing_list,
+ "cn_visual_text_understanding": cn_visual_text_understanding_list,
+ "cn_knowledge_reasoning": cn_knowledge_reasoning_list
+ }
+
+ en_averages = calculate_average(en_scores)
+ cn_averages = calculate_average(cn_scores)
+
+ return en_averages,cn_averages
diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/utils/shapegrid.py b/VLMEvalKit-sudoku/vlmeval/dataset/utils/shapegrid.py
new file mode 100644
index 0000000000000000000000000000000000000000..badf5e1526b4621128bdb3c2969f5aae8a5ddc1a
--- /dev/null
+++ b/VLMEvalKit-sudoku/vlmeval/dataset/utils/shapegrid.py
@@ -0,0 +1,28 @@
+from ...smp import *
+import re
+
+def ShapeGrid_acc(result_file):
+ data = load(result_file)
+ tot = defaultdict(lambda: 0)
+ score = defaultdict(lambda: 0)
+ lt = len(data)
+ data['prediction'] = data['prediction'].astype(str).str.replace(r"\.$", "", regex=True)
+ for i in range(lt):
+ item = data.iloc[i]
+ if 'location' in item['category']:
+ if item['answer'] in item['prediction']:
+ score['Overall'] += 1
+ elif item['category'] == 'count':
+ if item['prediction'] == str(item['answer']) or item['prediction'] == item['answer']:
+ score['Overall'] += 1
+ else:
+ if item['prediction'] == item['answer']:
+ score['Overall'] += 1
+ tot['Overall'] += 1
+
+ res = defaultdict(list)
+ res['tot'].append(tot['Overall'])
+ res['acc'].append(score['Overall'] / tot['Overall'] * 100)
+ res['category'].append(item['category'])
+ res = pd.DataFrame(res)
+ return res
\ No newline at end of file
diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/utils/vgrpbench/configs/formating-prompt/coloredsudoku/filter_prompt.json b/VLMEvalKit-sudoku/vlmeval/dataset/utils/vgrpbench/configs/formating-prompt/coloredsudoku/filter_prompt.json
new file mode 100644
index 0000000000000000000000000000000000000000..228fa6bfe1a083c77deb93632cb770eda7c4bbd2
--- /dev/null
+++ b/VLMEvalKit-sudoku/vlmeval/dataset/utils/vgrpbench/configs/formating-prompt/coloredsudoku/filter_prompt.json
@@ -0,0 +1,66 @@
+[
+ {
+ "role": "system",
+ "content": "You are a text formater, your goal is to extract two parts, perception and answer from the answer and put them into a good format. Please according to the conversation history with the user feedback. The desired format is a 2d array, where there is only two types values 0 or numbers; if you see * or _ or - or e or #, you should convert them into 0. If you see other numbers such as 1, 2, or 3, you just put it there.If the perception / answer is not a valid solution or not existe, please put a Wrong there. Your goal is only to format the user's input, not to check if it is correct or not"
+ },
+ {
+ "role": "user",
+ "content": "json\n{\n \"Initial State\": [[1, 2, \"*\", \"*\"], [2, \"*\", \"*\", \"*\"], [\"*\", \"*\", 1, \"*\"], [\"*\", \"*\", \"*\", \"*\"]],\n \"Thought\": \"To solve this Sudoku puzzle, I need to fill in the empty cells ensuring that each number from 1 to 4 appears exactly once in each row, column, and 2x2 block. I will start by identifying the possible numbers for each empty cell based on the existing ones.\",\n \"Solution\": [[1, 2, 4, 3], [2, 3, 4, 1], [3, 4, 1, 2], [4, 1, 2, 4]]\n}\n"
+ },
+ {
+ "role": "assistant",
+ "content": "Perception\n[[1,2,0,0],[2,0,0,0],[0,0,1,0],[0,0,0,0]]\nAnswer\n[[1,2,4,3],[2,3,4,1],[3,4,1,2],[4,1,2,4]]"
+ },
+ {
+ "role": "user",
+ "content": "Yes. "
+ },
+ {
+ "role": "user",
+ "content": "json\n{\n \"Initial State\": [[4, 1, \"*\", \"*\"], [2, \"*\", \"*\", \"*\"], [\"*\", \"*\", \"*\", \"*\"], [\"*\", \"*\", \"*\", 1]],\n \"Thought\": \"The grid consists of a 4x4 layout divided into 2x2 blocks. Considering existing numbers, I will analyze which numbers can fit into the empty cells while ensuring each row, column, and 2x2 block adhere to Sudoku rules. I will start filling from top left to bottom right.\",\n \"Solution\": [[4, 1, 3, 2], [2, 3, 1, 4], [1, 4, 2, 3], [3, 2, 4, 1]]\n}\n"
+ },
+ {
+ "role": "assistant",
+ "content": "Perception\n[[4,1,0,0],[2,0,0,0],[0,0,0,0],[0,0,0,1]]\nAnswer\n[[4,1,3,2],[2,3,1,4],[1,4,2,3],[3,2,4,1]]"
+ },
+ {
+ "role": "user",
+ "content": "Yes. "
+ },
+ {
+ "role": "user",
+ "content": "```json\n{\n\"Initial State\": [\n[[\"*\"], [\"*\"], [\"*\"], [\"4\", \"1\"]], \n[[\"*\"], [\"*\"], [\"*\"], [\"1\"]], \n[[\"*\"], [\"*\"], [\"*\"], [\"*\"]], \n[[\"1\"], [\"*\"], [\"*\"], [\"3\"]]\n],\n\n\"Thought\": \"The grid is divided into 2x2 blocks. We need to ensure each block, row, and column contains unique numbers from 1 to 4. The current numbers are 1 and 3 in the top-right block and 4 and 1 in the top-left block. We need to fill the remaining cells with numbers 2 and 4 to make sure each block, row, and column has unique numbers. We can start with placing 2 in the bottom-left cell of the top-right block and 4 in the bottom-right cell of the top-left block.\",\n\"Solution\": {\n\"Thought\": \"Considering the existing numbers and the rule that each block, row, and column must contain unique numbers, we can place the numbers in the remaining cells. The numbers 2 and 4 are available to fill the cells. We need to ensure that each of these numbers is placed in a way that does not violate the Sudoku rules. For example, we can place 2 in the bottom-left cell of the top-right block and 4 in the bottom-right cell of the top-left block.\",\n\"Solution\": {\n\"Thought\": \"In the bottom-left cell of the top-right block, we can place the number 2. In the bottom-right cell of the top-left block, we can place the number 4. Now, we need to fill the remaining cells in the bottom-left block of the top-right block and the top-right cell of the top-left block with the remaining numbers 1 and 3.\",\n\"Solution\": {\n\"Thought\": \"We can place the number 1 in the bottom-left cell of the top-right block and the number 3 in the top-right cell of the top-left block. This will ensure that each block, row, and column contains unique numbers from 1 to 4.\",\n\"Solution\": {\n\"Thought\": \"The solution is to place the numbers as follows: 2 in the bottom-left cell of the top-right block, 4 in the bottom-right cell of the top-left block, 1 in the bottom-left cell of the top-right block, and 3 in the top-right cell of the top-left block.\",\n\"Solution\": {\n\"Thought\": \"The solution to the Sudoku puzzle is as follows: 2 in the bottom-left cell of the top-right block, 4 in the bottom-right cell of the top-left block, 1 in the bottom-left cell of the top-right block, and 3 in the top-right cell of the top-left block. This will ensure that each block, row, and column contains unique numbers from 1 to 4.\",\n\"Solution\": {\n\"Thought\": \"In the bottom-left cell of the top-right block, we can\n``"
+ },
+ {
+ "role": "assistant",
+ "content": "Perception\nWrong\nAnswer\nWrong"
+ },
+ {
+ "role": "user",
+ "content": "Yes. as perception is not a 2D array and answer is missing. For perception, its first row is 3 single item and 1 2-item array, not valid. Thus, putting Wrong for both perception and answer."
+ },
+ {
+ "role": "user",
+ "content": "``\n{\n \"Initial State\": [\n [3, \"*\", 2, \"*\"],\n [\"*\", \"*\", \"*\", \"*\"],\n [\"*\", 2, 3, \"*\"],\n [\"*\", \"*\", \"*\", \"*\"]\n ],\n \"Thought\": \"The given Sudoku puzzle has some numbers already filled in. We need to fill in the missing numbers such that each row, column, and 2x2 sub-grid contains the numbers 1-4 without repetition.\",\n \"Solution\": [\n [3, 1, 2, 4],\n [4, 3, 1, 2],\n [1, 2, 3, 4],\n [2, 4, 4, 1]\n ]\n}\n```\n\nThe solution is obtained by applying the standard Sudoku solving techniques, such as focusing on the most constrained regions first and using logical deductions to eliminate possibilities."
+ },
+ {
+ "role": "assistant",
+ "content": "Perception\n[[3,0,2,0],[0,0,0,0],[0,2,3,0],[0,0,0,0]]\nAnswer\n[[3,1,2,4],[4,3,1,2],[1,2,3,4],[2,4,4,1]]"
+ },
+ {
+ "role": "user",
+ "content": "Yes. ."
+ },
+ {
+ "role": "user",
+ "content": "```json\n{\"perception\": [[3, 1, \"*\", 4], [2, 4, 1, 3], [\"*\", \"*\", 4, \"*\"], [4, \"*\", 3, \"*\"]], \"think\": \"Looking at the first row, the only missing number is 2. \\nLooking at the last row, the missing numbers are 1 and 2. Since the cell at index (3,1) is of the same color as the cell at index (1,1) which contains the number 4, and the cell at index (3,0) contains the number 4, the cell at index (3,1) can only be 1 or 2. Since the cell at index (3,0) already contains 4, the cell at index (3,1) must be 1. Then the cell at index (3,3) must be 2. \\nLooking at the third row, the missing numbers are 1, 2, and 3. Since the cell at index (2,3) is of the same color as the cell at index (0,3) which contains 4, and the cell at index (1,3) contains 3, and the cell at index (2,2) contains 4, the cell at index (2,3) must be 1 or 2. Since the cell at index (3,3) contains 2, the cell at index (2,3) must be 1. Then the cell at index (2,0) must be 3, and the cell at index (2,1) must be 2. \\nTherefore, the solution is [[3, 1, 2, 4], [2, 4, 1, 3], [3, 2, 4, 1], [4, 1, 3, 2]].\", \"answer\": [[3, 1, 2, 4], [2, 4, 1, 3], [3, 2, 4, 1], [4, 1, 3, 2]]}\n```"
+ },
+ {
+ "role": "assistant",
+ "content": "Perception\n[[3,1,0,4],[2,4,1,3],[0,0,4,0],[4,0,3,0]]\nAnswer\n[[3,1,2,4],[2,4,1,3],[3,2,4,1],[4,1,3,2]]"
+ },
+ {
+ "role": "user",
+ "content": "Yes. ."
+ }
+]
\ No newline at end of file
diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/utils/vgrpbench/configs/formating-prompt/futoshiki/filter_prompt.json b/VLMEvalKit-sudoku/vlmeval/dataset/utils/vgrpbench/configs/formating-prompt/futoshiki/filter_prompt.json
new file mode 100644
index 0000000000000000000000000000000000000000..228fa6bfe1a083c77deb93632cb770eda7c4bbd2
--- /dev/null
+++ b/VLMEvalKit-sudoku/vlmeval/dataset/utils/vgrpbench/configs/formating-prompt/futoshiki/filter_prompt.json
@@ -0,0 +1,66 @@
+[
+ {
+ "role": "system",
+ "content": "You are a text formater, your goal is to extract two parts, perception and answer from the answer and put them into a good format. Please according to the conversation history with the user feedback. The desired format is a 2d array, where there is only two types values 0 or numbers; if you see * or _ or - or e or #, you should convert them into 0. If you see other numbers such as 1, 2, or 3, you just put it there.If the perception / answer is not a valid solution or not existe, please put a Wrong there. Your goal is only to format the user's input, not to check if it is correct or not"
+ },
+ {
+ "role": "user",
+ "content": "json\n{\n \"Initial State\": [[1, 2, \"*\", \"*\"], [2, \"*\", \"*\", \"*\"], [\"*\", \"*\", 1, \"*\"], [\"*\", \"*\", \"*\", \"*\"]],\n \"Thought\": \"To solve this Sudoku puzzle, I need to fill in the empty cells ensuring that each number from 1 to 4 appears exactly once in each row, column, and 2x2 block. I will start by identifying the possible numbers for each empty cell based on the existing ones.\",\n \"Solution\": [[1, 2, 4, 3], [2, 3, 4, 1], [3, 4, 1, 2], [4, 1, 2, 4]]\n}\n"
+ },
+ {
+ "role": "assistant",
+ "content": "Perception\n[[1,2,0,0],[2,0,0,0],[0,0,1,0],[0,0,0,0]]\nAnswer\n[[1,2,4,3],[2,3,4,1],[3,4,1,2],[4,1,2,4]]"
+ },
+ {
+ "role": "user",
+ "content": "Yes. "
+ },
+ {
+ "role": "user",
+ "content": "json\n{\n \"Initial State\": [[4, 1, \"*\", \"*\"], [2, \"*\", \"*\", \"*\"], [\"*\", \"*\", \"*\", \"*\"], [\"*\", \"*\", \"*\", 1]],\n \"Thought\": \"The grid consists of a 4x4 layout divided into 2x2 blocks. Considering existing numbers, I will analyze which numbers can fit into the empty cells while ensuring each row, column, and 2x2 block adhere to Sudoku rules. I will start filling from top left to bottom right.\",\n \"Solution\": [[4, 1, 3, 2], [2, 3, 1, 4], [1, 4, 2, 3], [3, 2, 4, 1]]\n}\n"
+ },
+ {
+ "role": "assistant",
+ "content": "Perception\n[[4,1,0,0],[2,0,0,0],[0,0,0,0],[0,0,0,1]]\nAnswer\n[[4,1,3,2],[2,3,1,4],[1,4,2,3],[3,2,4,1]]"
+ },
+ {
+ "role": "user",
+ "content": "Yes. "
+ },
+ {
+ "role": "user",
+ "content": "```json\n{\n\"Initial State\": [\n[[\"*\"], [\"*\"], [\"*\"], [\"4\", \"1\"]], \n[[\"*\"], [\"*\"], [\"*\"], [\"1\"]], \n[[\"*\"], [\"*\"], [\"*\"], [\"*\"]], \n[[\"1\"], [\"*\"], [\"*\"], [\"3\"]]\n],\n\n\"Thought\": \"The grid is divided into 2x2 blocks. We need to ensure each block, row, and column contains unique numbers from 1 to 4. The current numbers are 1 and 3 in the top-right block and 4 and 1 in the top-left block. We need to fill the remaining cells with numbers 2 and 4 to make sure each block, row, and column has unique numbers. We can start with placing 2 in the bottom-left cell of the top-right block and 4 in the bottom-right cell of the top-left block.\",\n\"Solution\": {\n\"Thought\": \"Considering the existing numbers and the rule that each block, row, and column must contain unique numbers, we can place the numbers in the remaining cells. The numbers 2 and 4 are available to fill the cells. We need to ensure that each of these numbers is placed in a way that does not violate the Sudoku rules. For example, we can place 2 in the bottom-left cell of the top-right block and 4 in the bottom-right cell of the top-left block.\",\n\"Solution\": {\n\"Thought\": \"In the bottom-left cell of the top-right block, we can place the number 2. In the bottom-right cell of the top-left block, we can place the number 4. Now, we need to fill the remaining cells in the bottom-left block of the top-right block and the top-right cell of the top-left block with the remaining numbers 1 and 3.\",\n\"Solution\": {\n\"Thought\": \"We can place the number 1 in the bottom-left cell of the top-right block and the number 3 in the top-right cell of the top-left block. This will ensure that each block, row, and column contains unique numbers from 1 to 4.\",\n\"Solution\": {\n\"Thought\": \"The solution is to place the numbers as follows: 2 in the bottom-left cell of the top-right block, 4 in the bottom-right cell of the top-left block, 1 in the bottom-left cell of the top-right block, and 3 in the top-right cell of the top-left block.\",\n\"Solution\": {\n\"Thought\": \"The solution to the Sudoku puzzle is as follows: 2 in the bottom-left cell of the top-right block, 4 in the bottom-right cell of the top-left block, 1 in the bottom-left cell of the top-right block, and 3 in the top-right cell of the top-left block. This will ensure that each block, row, and column contains unique numbers from 1 to 4.\",\n\"Solution\": {\n\"Thought\": \"In the bottom-left cell of the top-right block, we can\n``"
+ },
+ {
+ "role": "assistant",
+ "content": "Perception\nWrong\nAnswer\nWrong"
+ },
+ {
+ "role": "user",
+ "content": "Yes. as perception is not a 2D array and answer is missing. For perception, its first row is 3 single item and 1 2-item array, not valid. Thus, putting Wrong for both perception and answer."
+ },
+ {
+ "role": "user",
+ "content": "``\n{\n \"Initial State\": [\n [3, \"*\", 2, \"*\"],\n [\"*\", \"*\", \"*\", \"*\"],\n [\"*\", 2, 3, \"*\"],\n [\"*\", \"*\", \"*\", \"*\"]\n ],\n \"Thought\": \"The given Sudoku puzzle has some numbers already filled in. We need to fill in the missing numbers such that each row, column, and 2x2 sub-grid contains the numbers 1-4 without repetition.\",\n \"Solution\": [\n [3, 1, 2, 4],\n [4, 3, 1, 2],\n [1, 2, 3, 4],\n [2, 4, 4, 1]\n ]\n}\n```\n\nThe solution is obtained by applying the standard Sudoku solving techniques, such as focusing on the most constrained regions first and using logical deductions to eliminate possibilities."
+ },
+ {
+ "role": "assistant",
+ "content": "Perception\n[[3,0,2,0],[0,0,0,0],[0,2,3,0],[0,0,0,0]]\nAnswer\n[[3,1,2,4],[4,3,1,2],[1,2,3,4],[2,4,4,1]]"
+ },
+ {
+ "role": "user",
+ "content": "Yes. ."
+ },
+ {
+ "role": "user",
+ "content": "```json\n{\"perception\": [[3, 1, \"*\", 4], [2, 4, 1, 3], [\"*\", \"*\", 4, \"*\"], [4, \"*\", 3, \"*\"]], \"think\": \"Looking at the first row, the only missing number is 2. \\nLooking at the last row, the missing numbers are 1 and 2. Since the cell at index (3,1) is of the same color as the cell at index (1,1) which contains the number 4, and the cell at index (3,0) contains the number 4, the cell at index (3,1) can only be 1 or 2. Since the cell at index (3,0) already contains 4, the cell at index (3,1) must be 1. Then the cell at index (3,3) must be 2. \\nLooking at the third row, the missing numbers are 1, 2, and 3. Since the cell at index (2,3) is of the same color as the cell at index (0,3) which contains 4, and the cell at index (1,3) contains 3, and the cell at index (2,2) contains 4, the cell at index (2,3) must be 1 or 2. Since the cell at index (3,3) contains 2, the cell at index (2,3) must be 1. Then the cell at index (2,0) must be 3, and the cell at index (2,1) must be 2. \\nTherefore, the solution is [[3, 1, 2, 4], [2, 4, 1, 3], [3, 2, 4, 1], [4, 1, 3, 2]].\", \"answer\": [[3, 1, 2, 4], [2, 4, 1, 3], [3, 2, 4, 1], [4, 1, 3, 2]]}\n```"
+ },
+ {
+ "role": "assistant",
+ "content": "Perception\n[[3,1,0,4],[2,4,1,3],[0,0,4,0],[4,0,3,0]]\nAnswer\n[[3,1,2,4],[2,4,1,3],[3,2,4,1],[4,1,3,2]]"
+ },
+ {
+ "role": "user",
+ "content": "Yes. ."
+ }
+]
\ No newline at end of file
diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/utils/vgrpbench/configs/formating-prompt/hitori/filter_prompt.json b/VLMEvalKit-sudoku/vlmeval/dataset/utils/vgrpbench/configs/formating-prompt/hitori/filter_prompt.json
new file mode 100644
index 0000000000000000000000000000000000000000..4ba6b1976e694a26b0d85125fc2f438dbf73a9b8
--- /dev/null
+++ b/VLMEvalKit-sudoku/vlmeval/dataset/utils/vgrpbench/configs/formating-prompt/hitori/filter_prompt.json
@@ -0,0 +1,79 @@
+[
+ {
+ "role": "system",
+ "content": "You are a text formater, your goal is to extract two parts, perception and answer from the answer and put them into a good format. Please according to the conversation history with the user feedback. The desired format is a 2d array, where there is only three types values e or s or numbers; if you see 0, you should convert them into e. If you see other numbers such as 1, 2, or 3, you just put it there.If the perception / answer is not a valid solution or not existe, please put a Wrong there. Your goal is only to format the user's input, not to check if it is correct or not"
+ },
+ {
+ "role": "user",
+ "content": "```json\n{\n\"perception\": [\n [3, 2, 2, 1],\n [4, 1, 3, 1],\n [2, 2, 1, 4],\n [1, 2, 1, 3]\n],\n\"think\": \"To solve this Hitori puzzle, I'll look for cells that must be shaded to avoid duplicates in rows and columns. The 1 in the bottom left corner must be shaded since there are two other 1s in its row and column. This leaves the 3 in the top left unshaded. Similarly, the 4 in the bottom right must be shaded to avoid duplicating the 4 in the second row. The 3 in the second row can then remain unshaded. For the 2s in the second and third rows, I'll shade the one in the top right corner to leave the other two 2s unshaded. This ensures all unshaded cells are connected.\",\n\"answer\": [\n [\"e\", \"e\", \"e\", \"e\"],\n [\"e\", \"e\", \"e\", \"s\"],\n [\"e\", \"e\", \"e\", \"s\"],\n [\"s\", \"e\", \"e\", \"e\"]\n]\n}\n```"
+ },
+ {
+ "role": "assistant",
+ "content": "Initial State\n[[3,2,2,1],[4,1,3,1],[2,2,1,4],[1,2,1,3]]\nSolution\n[[0,0,0,0],[0,0,0,s],[0,0,0,s],[s,0,0,0]]"
+ },
+ {
+ "role": "user",
+ "content": "Yes. "
+ },
+ {
+ "role": "user",
+ "content": "```json\n{\n\"perception\": [[1, 3, 4, 3], \n [3, 4, 2, 1],\n [1, 3, 1, 4], \n [4, 1, 3, 3]],\n\"answer\": [[\"e\", \"s\", \"e\", \"s\"],\n [\"s\", \"e\", \"s\", \"e\"], \n [\"e\", \"s\", \"e\", \"s\"],\n [\"s\", \"e\", \"s\", \"e\"]]\n}\n```"
+ },
+ {
+ "role": "assistant",
+ "content": "Initial State\n[[1,3,4,3],[3,4,2,1],[1,3,1,4],[4,1,3,3]]\nSolution\n[[0,s,0,s],[s,0,s,0],[0,s,0,s],[s,0,s,0]]"
+ },
+ {
+ "role": "user",
+ "content": "Yes. "
+ },
+ {
+ "role": "user",
+ "content":"**Game State:**\n\n{\"perception\": [[1, 3, 4, 3], [3, 4, 2, 1], [1, 3, 1, 4], [4, 1, 3, 3]]}\n\n**Thinking Process:**\n\nTo solve this Hitori puzzle, we need to identify the numbers that appear more than once in each row and column among unshaded cells. We will start by looking for these numbers in the first row. The numbers 1, 3, and 4 appear more than once in the first row, so we can shade the cell with number 1.\n\nNext, we will look at the second row. The numbers 2 and 4 appear more than once in the second row, so we can shade the cell with number 2.\n\nThen, we will examine the third row. The numbers 1 and 3 appear more than once in the third row, so we can shade the cell with number 1.\n\nFinally, we will analyze the fourth row. The numbers 3 and 4 appear more than once in the fourth row, so we can shade the cell with number 3.\n\n**Solution:**\n\n{\"answer\": [[s, s, 4, s], [s, s, s, s], [s, 3, s, s], [4, s, s, s]]}\n\nNote: The \"s\" indicates a shaded cell, and the \"e\" indicates an empty cell."
+ },
+ {
+ "role": "assistant",
+ "content": "Initial State\n[[1,3,4,3],[3,4,2,1],[1,3,1,4],[4,1,3,3]]\nSolution\n[[s,s,4,s],[s,s,s,s],[s,3,s,s],[4,s,s,s]]"
+ },
+ {
+ "role": "user",
+ "content": "Yes."
+ },
+ {
+ "role": "user",
+ "content": "```json\n{\n\"perception\": [\n[3, 2, 2, 1],\n[4, 1, 3, 1],\n[2, 2, 1, 4],\n[1, 2, 1, 3]\n],\n\"answer\": [\n[s, s, s, s],\n[s, s, s, s],\n[s, s, s, s],\n[s, s, s, s]\n]\n}\n```"
+ },
+ {
+ "role": "assistant",
+ "content": "Initial State\n[[3,2,2,1],[4,1,3,1],[2,2,1,4],[1,2,1,3]]\nSolution\n[[s,s,s,s],[s,s,s,s],[s,s,s,s],[s,s,s,s]]"
+ },
+ {
+ "role": "user",
+ "content": "Yes. "
+ },
+ {
+ "role": "user",
+ "content": "```json\n{\n \"perception\": [[1, 3, 4, 3], [3, 4, 2, 1], [1, 3, 1, 4], [4, 1, 3, 3]],\n \"think\": \"I need to shade cells such that no number appears more than once in each row and column among unshaded cells, while ensuring shaded cells are not adjacent and that all unshaded cells are connected. I will start by analyzing each row and column for duplicate numbers and possible shading without breaking the adjacency rule. I aim to shade strategically to meet all conditions.\",\n \"answer\": [[\"e\", \"s\", \"e\", \"s\"], [\"s\", \"e\", \"e\", \"e\"], [\"e\", \"s\", \"e\", \"s\"], [\"s\", \"e\", \"s\", \"e\"]]\n}\n```"
+ },
+ {
+ "role": "assistant",
+ "content": "Initial State\n[[1,3,4,3],[3,4,2,1],[1,3,1,4],[4,1,3,3]]\nSolution\n[[0,s,0,s],[s,0,0,0],[0,s,0,s],[s,0,s,0]]"
+ },
+ {
+ "role": "user",
+ "content": "Yes."
+ },
+ {
+ "role": "user",
+ "content": "**Step 1: Identify the Problem**\n\nThe problem is a Hitori game, where we need to shade some cells in the grid such that no number appears more than once in each row and column among unshaded cells.\n\n**Step 2: Analyze the Game State**\n\nThe current game state is represented by a 4x4 grid with some numbers already filled in. The goal is to determine which cells need to be shaded to satisfy the rules of the game.\n\n**Step 3: Determine the Solution**\n\nTo solve this problem, we need to analyze the game state and determine which cells can be shaded without violating the rules of the game. We will use the following steps:\n\n* Identify the numbers that appear more than once in each row and column.\n* Determine which cells need to be shaded to eliminate duplicate numbers.\n* Ensure that shaded cells are not adjacent and all unshaded cells are connected.\n\n**Step 4: Implement the Solution**\n\nBased on the analysis, we can determine the solution as follows:\n\n```\n\"perception\": [[1, 3, 1, 4], [4, 2, 1, 3], [4, 1, 4, 2], [2, 4, 3, 3]],\n\"think\": \"We need to shade the cells that contain duplicate numbers in each row and column. We also need to ensure that shaded cells are not adjacent and all unshaded cells are connected.\",\n\"answer\": [[\"s\", \"e\", \"s\", \"e\"], [\"e\", \"s\", \"s\", \"e\"], [\"e\", \"s\", \"e\", \"s\"], [\"s\", \"e\", \"s\", \"e\"]]\n```"
+ },
+ {
+ "role": "assistant",
+ "content": "Initial State\n[[1,3,1,4],[4,2,1,3],[4,1,4,2],[2,4,3,3]]\nSolution\n[[s,0,s,0],[0,s,s,0],[0,s,0,s],[s,0,s,0]]"
+
+ },
+ {
+ "role": "user",
+ "content": "Yes."
+ }
+]
\ No newline at end of file
diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/utils/vgrpbench/configs/formating-prompt/kakurasu/filter_prompt.json b/VLMEvalKit-sudoku/vlmeval/dataset/utils/vgrpbench/configs/formating-prompt/kakurasu/filter_prompt.json
new file mode 100644
index 0000000000000000000000000000000000000000..af93d0b16cc3e4e78e2b3024dbde627daf35fd3e
--- /dev/null
+++ b/VLMEvalKit-sudoku/vlmeval/dataset/utils/vgrpbench/configs/formating-prompt/kakurasu/filter_prompt.json
@@ -0,0 +1,80 @@
+[
+ {
+ "role": "system",
+ "content": "You are a text formater, your goal is to extract two parts, perception and answer from the answer and put them into a good format. Please according to the conversation history with the user feedback. The desired format is a 2d array, where there is only 0 or s; *, or 0, or _, or -, or e might indicates empty that you should convert to 0. If the perception / answer is not a valid solution or not existe, please put a Wrong there. Your goal is only to format the user's input, not to check if it is correct or not."
+ },
+ {
+ "role": "user",
+ "content": "```json\n{\n\"perception\": [\n [\"*\", \"*\", \"*\", \"*\"],\n [\"*\", \"*\", \"*\", \"*\"],\n [\"*\", \"*\", \"*\", \"*\"],\n [\"*\", \"*\", \"*\", \"*\"]\n],\n\"think\": \"To solve this Kakurasu puzzle, I need to shade some cells in the grid such that the sum of the weights of the shaded cells in each row and column matches the given clues. The weights increase from left to right and top to bottom, starting from 1. I'll analyze the clues row by row and column by column to determine which cells need to be shaded.\",\n\"answer\": [\n [\"e\", \"s\", \"e\", \"e\"],\n [\"e\", \"e\", \"e\", \"s\"],\n [\"s\", \"e\", \"s\", \"e\"],\n [\"e\", \"s\", \"e\", \"e\"]\n]\n}\n```"
+ },
+ {
+ "role": "assistant",
+ "content": "Initial State\n[[0,0,0,0],[0,0,0,0],[0,0,0,0],[0,0,0,0]]\nSolution\n[[0,s,0,0],[0,0,0,s],[s,0,s,0],[0,s,0,0]]"
+ },
+ {
+ "role": "user",
+ "content": "Yes. "
+ },
+ {
+ "role": "user",
+ "content": "```json\n{\n\"perception\": [\n [\"s\", \"*\", \"*\", \"*\"],\n [\"*\", \"*\", \"*\", \"*\"], \n [\"*\", \"*\", \"*\", \"*\"],\n [\"*\", \"*\", \"*\", \"*\"]\n],\n\"think\": \"The first row has a clue of 4, which means the sum of the weights of the shaded cells in that row must be 4. Since the weights increase from left to right, starting with 1, the only way to get a sum of 4 in the first row is to shade the leftmost cell with a weight of 1. For the other rows and columns, there is not enough information yet to determine which cells to shade.\",\n\"answer\": [\n [\"s\", \"e\", \"e\", \"e\"],\n [\"e\", \"e\", \"e\", \"e\"],\n [\"e\", \"e\", \"e\", \"e\"], \n [\"e\", \"e\", \"e\", \"e\"]\n]\n}\n```"
+ },
+ {
+ "role": "assistant",
+ "content": "Initial State\n[[s,0,0,0],[0,0,0,0],[0,0,0,0],[0,0,0,0]]\nSolution\n[[s,0,0,0],[0,0,0,0],[0,0,0,0],[0,0,0,0]]"
+ },
+ {
+ "role": "user",
+ "content": "Yes. "
+ },
+ {
+ "role": "user",
+ "content": "```json\n{\n \"perception\": [\n [\"*\", \"*\", \"*\", \"*\"],\n [\"*\", \"*\", \"*\", \"*\"],\n [\"*\", \"*\", \"*\", \"*\"],\n [\"*\", \"*\", \"*\", \"*\"]\n ],\n \"think\": \"The Kakurasu puzzle requires shading cells such that the sum of the weights of shaded cells in each row and column equals the given clue. The weights are the cell's coordinates (starting from 1). Let's analyze the clues:\\n\\nRow 0: 3. Possible combinations are 1+2, 3. \\nRow 1: 0. No cells are shaded.\\nRow 2: 9. Only combination is 1+2+3+4.\\nRow 3: 2. Possible combinations are 2, 1+1 (but this is not possible).\\n\\nColumn 0: 0. No cells are shaded.\\nColumn 1: 7. Possible combinations are 1+2+4, 3+4.\\nColumn 2: 4. Possible combinations are 4, 1+3, 2+2.\\nColumn 3: 3. Possible combinations are 3, 1+2.\\n\\nLet's start with Row 2 (sum 9). This means all cells in Row 2 must be shaded. This also helps us deduce Column 1 and Column 2. Since Row 2 is fully shaded, Column 1 must have 7 (2+3+4) and Column 2 must have 4 (3+1). This leaves only 3 for Column 3. Row 0 must be 1+2, and Row 3 must be 2. This gives us the solution.\",\n \"answer\": [\n [\"e\", \"s\", \"s\", \"e\"],\n [\"e\", \"e\", \"e\", \"e\"],\n [\"s\", \"s\", \"s\", \"s\"],\n [\"e\", \"e\", \"e\", \"s\"]\n ]\n}\n```"
+ },
+ {
+ "role": "assistant",
+ "content": "Initial State\n[[0,0,0,0],[0,0,0,0],[0,0,0,0],[0,0,0,0]]\nSolution\n[[0,s,s,0],[0,0,0,0],[s,s,s,s],[0,0,0,s]]"
+ },
+ {
+ "role": "user",
+ "content": "Yes. "
+ },
+ {
+ "role": "user",
+ "content": "```json\n{\n \"perception\": [\n [\"*\", \"*\", \"*\", \"*\"],\n [\"*\", \"*\", \"*\", \"*\"],\n [\"*\", \"*\", \"*\", \"*\"],\n [\"*\", \"*\", \"*\", \"*\"]\n ],\n \"think\": \"Given the row clues are [3, 7, 4, 3] and the column clues are [3, 0, 9, 2], we need to fill the cells with 's' only if the sum of the weights (indexed from 1) from left to right and top to bottom matches the clue for that particular row or column. The second row must sum up to 7, where the only combination is [2, 3] in columns 0 and 2. The third row sums up to 4 and the only option is choosing column 0, the fourth row clue is 3 which can be satisfied only by choosing column 0. For columns, the third column needs to sum up to 9, achieved by choosing cells in rows 1 and 3. Each row and column must match the required sums without overfilling or misplacing any shaded cells.\",\n \"answer\": [\n [\"e\", \"e\", \"s\", \"e\"],\n [\"s\", \"e\", \"s\", \"e\"],\n [\"s\", \"e\", \"e\", \"e\"],\n [\"s\", \"e\", \"e\", \"e\"]\n ]\n}\n```"
+ },
+ {
+ "role": "assistant",
+ "content": "Initial State\n[[0,0,0,0],[0,0,0,0],[0,0,0,0],[0,0,0,0]]\nSolution\n[[0,0,s,0],[s,0,s,0],[s,0,0,0],[s,0,0,0]]"
+ },
+ {
+ "role": "user",
+ "content": "Yes. "
+ },
+ {
+ "role": "user",
+ "content": "**Problem Statement**\n\nThe problem is a Kakurasu puzzle, where the goal is to shade some cells in a grid such that the sum of the weights of selected cells in each row and column matches the given clues. The weights increase from left to right (for rows) and top to bottom (for columns), starting from 1. Indexing starts at 0.\n\n**Current Game State**\n\nThe current game state is represented as a 2D array:\n\n```json\n{\"perception\": [[\"1,1\",\"1,2\",\"3\",\"7\",\"1\",\"4\"], [\"2,1\",\"2,2\",\"2,3\",\"2,4\",\"3,1\",\"3,4\",\"4,1\",\"4,2\",\"4,3\",\"4,4\"]]}\n```\n\n**Thinking Process**\n\nTo solve this problem, I will use a systematic approach to try different combinations of shaded cells.\n\n* Start with the first row and try to find a combination of shaded cells that satisfies the weight constraint.\n* Move to the second row and try to find a combination of shaded cells that satisfies the weight constraint.\n* Repeat the process for all rows.\n* Once all rows are satisfied, move to the columns and repeat the process.\n* Continue this process until all constraints are satisfied or it becomes clear that there is no solution.\n\n**Solution**\n\nAfter trying different combinations, I found a solution that satisfies all constraints:\n\n```json\n{\"answer\": [[\"s\",\"e\",\"e\",\"e\",\"e\",\"e\"],[\"e\",\"e\",\"s\",\"e\",\"e\",\"e\"],[\"e\",\"e\",\"e\",\"s\",\"e\",\"e\"],[\"e\",\"e\",\"e\",\"e\",\"s\",\"e\"],[\"e\",\"e\",\"e\",\"e\",\"e\",\"s\"],[\"e\",\"e\",\"e\",\"e\",\"e\",\"e\"]]}\n```\n\nThis solution satisfies all constraints and is a valid solution to the Kakurasu puzzle.\n\n**Conclusion**\n\nThe solution to the Kakurasu puzzle is a 2D array where each cell is either shaded (s) or unshaded (e). The shaded cells are located in the first, third, and fifth rows, as well as in the fourth and sixth columns. This solution satisfies all constraints and is a valid solution to the puzzle."
+ },
+ {
+ "role": "assistant",
+ "content": "Initial State\nWrong\nSolution\nWrong"
+ },
+ {
+ "role": "user",
+ "content": "Yes. Because other numbers like 1 and 2 is undefined, so put a Wrong there"
+ },
+ {
+ "role": "user",
+ "content": "```json\n{\n\"perception\": [\n[0, 0, 0, 0, 0, 0],\n[0, 0, 0, 0, 0, 0],\n[0, 0, 0, 0, 0, 0],\n[0, 0, 0, 0, 0, 0],\n[0, 0, 0, 0, 0, 0]\n],\n\"think\": \"The puzzle has not been started yet, so there are no shaded cells.\",\n\"answer\": [\n[0, 0, 0, 0, 0, 0],\n[0, 0, 0, 0, 0, 0],\n[0, 0, 0, 0, 0, 0],\n[0, 0, 0, 0, 0, 0],\n[0, 0, 0, 0, 0, 0]\n]\n}\n```"
+ },
+ {
+ "role": "assistant",
+ "content": "Initial State\n[[0,0,0,0,0,0],[0,0,0,0,0,0],[0,0,0,0,0,0],[0,0,0,0,0,0],[0,0,0,0,0,0]]\nSolution\n[[0,0,0,0,0,0],[0,0,0,0,0,0],[0,0,0,0,0,0],[0,0,0,0,0,0],[0,0,0,0,0,0]]"
+
+ },
+ {
+ "role": "user",
+ "content": "Yes."
+ }
+
+]
\ No newline at end of file
diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/utils/vgrpbench/configs/formating-prompt/oddevensudoku/filter_prompt.json b/VLMEvalKit-sudoku/vlmeval/dataset/utils/vgrpbench/configs/formating-prompt/oddevensudoku/filter_prompt.json
new file mode 100644
index 0000000000000000000000000000000000000000..228fa6bfe1a083c77deb93632cb770eda7c4bbd2
--- /dev/null
+++ b/VLMEvalKit-sudoku/vlmeval/dataset/utils/vgrpbench/configs/formating-prompt/oddevensudoku/filter_prompt.json
@@ -0,0 +1,66 @@
+[
+ {
+ "role": "system",
+ "content": "You are a text formater, your goal is to extract two parts, perception and answer from the answer and put them into a good format. Please according to the conversation history with the user feedback. The desired format is a 2d array, where there is only two types values 0 or numbers; if you see * or _ or - or e or #, you should convert them into 0. If you see other numbers such as 1, 2, or 3, you just put it there.If the perception / answer is not a valid solution or not existe, please put a Wrong there. Your goal is only to format the user's input, not to check if it is correct or not"
+ },
+ {
+ "role": "user",
+ "content": "json\n{\n \"Initial State\": [[1, 2, \"*\", \"*\"], [2, \"*\", \"*\", \"*\"], [\"*\", \"*\", 1, \"*\"], [\"*\", \"*\", \"*\", \"*\"]],\n \"Thought\": \"To solve this Sudoku puzzle, I need to fill in the empty cells ensuring that each number from 1 to 4 appears exactly once in each row, column, and 2x2 block. I will start by identifying the possible numbers for each empty cell based on the existing ones.\",\n \"Solution\": [[1, 2, 4, 3], [2, 3, 4, 1], [3, 4, 1, 2], [4, 1, 2, 4]]\n}\n"
+ },
+ {
+ "role": "assistant",
+ "content": "Perception\n[[1,2,0,0],[2,0,0,0],[0,0,1,0],[0,0,0,0]]\nAnswer\n[[1,2,4,3],[2,3,4,1],[3,4,1,2],[4,1,2,4]]"
+ },
+ {
+ "role": "user",
+ "content": "Yes. "
+ },
+ {
+ "role": "user",
+ "content": "json\n{\n \"Initial State\": [[4, 1, \"*\", \"*\"], [2, \"*\", \"*\", \"*\"], [\"*\", \"*\", \"*\", \"*\"], [\"*\", \"*\", \"*\", 1]],\n \"Thought\": \"The grid consists of a 4x4 layout divided into 2x2 blocks. Considering existing numbers, I will analyze which numbers can fit into the empty cells while ensuring each row, column, and 2x2 block adhere to Sudoku rules. I will start filling from top left to bottom right.\",\n \"Solution\": [[4, 1, 3, 2], [2, 3, 1, 4], [1, 4, 2, 3], [3, 2, 4, 1]]\n}\n"
+ },
+ {
+ "role": "assistant",
+ "content": "Perception\n[[4,1,0,0],[2,0,0,0],[0,0,0,0],[0,0,0,1]]\nAnswer\n[[4,1,3,2],[2,3,1,4],[1,4,2,3],[3,2,4,1]]"
+ },
+ {
+ "role": "user",
+ "content": "Yes. "
+ },
+ {
+ "role": "user",
+ "content": "```json\n{\n\"Initial State\": [\n[[\"*\"], [\"*\"], [\"*\"], [\"4\", \"1\"]], \n[[\"*\"], [\"*\"], [\"*\"], [\"1\"]], \n[[\"*\"], [\"*\"], [\"*\"], [\"*\"]], \n[[\"1\"], [\"*\"], [\"*\"], [\"3\"]]\n],\n\n\"Thought\": \"The grid is divided into 2x2 blocks. We need to ensure each block, row, and column contains unique numbers from 1 to 4. The current numbers are 1 and 3 in the top-right block and 4 and 1 in the top-left block. We need to fill the remaining cells with numbers 2 and 4 to make sure each block, row, and column has unique numbers. We can start with placing 2 in the bottom-left cell of the top-right block and 4 in the bottom-right cell of the top-left block.\",\n\"Solution\": {\n\"Thought\": \"Considering the existing numbers and the rule that each block, row, and column must contain unique numbers, we can place the numbers in the remaining cells. The numbers 2 and 4 are available to fill the cells. We need to ensure that each of these numbers is placed in a way that does not violate the Sudoku rules. For example, we can place 2 in the bottom-left cell of the top-right block and 4 in the bottom-right cell of the top-left block.\",\n\"Solution\": {\n\"Thought\": \"In the bottom-left cell of the top-right block, we can place the number 2. In the bottom-right cell of the top-left block, we can place the number 4. Now, we need to fill the remaining cells in the bottom-left block of the top-right block and the top-right cell of the top-left block with the remaining numbers 1 and 3.\",\n\"Solution\": {\n\"Thought\": \"We can place the number 1 in the bottom-left cell of the top-right block and the number 3 in the top-right cell of the top-left block. This will ensure that each block, row, and column contains unique numbers from 1 to 4.\",\n\"Solution\": {\n\"Thought\": \"The solution is to place the numbers as follows: 2 in the bottom-left cell of the top-right block, 4 in the bottom-right cell of the top-left block, 1 in the bottom-left cell of the top-right block, and 3 in the top-right cell of the top-left block.\",\n\"Solution\": {\n\"Thought\": \"The solution to the Sudoku puzzle is as follows: 2 in the bottom-left cell of the top-right block, 4 in the bottom-right cell of the top-left block, 1 in the bottom-left cell of the top-right block, and 3 in the top-right cell of the top-left block. This will ensure that each block, row, and column contains unique numbers from 1 to 4.\",\n\"Solution\": {\n\"Thought\": \"In the bottom-left cell of the top-right block, we can\n``"
+ },
+ {
+ "role": "assistant",
+ "content": "Perception\nWrong\nAnswer\nWrong"
+ },
+ {
+ "role": "user",
+ "content": "Yes. as perception is not a 2D array and answer is missing. For perception, its first row is 3 single item and 1 2-item array, not valid. Thus, putting Wrong for both perception and answer."
+ },
+ {
+ "role": "user",
+ "content": "``\n{\n \"Initial State\": [\n [3, \"*\", 2, \"*\"],\n [\"*\", \"*\", \"*\", \"*\"],\n [\"*\", 2, 3, \"*\"],\n [\"*\", \"*\", \"*\", \"*\"]\n ],\n \"Thought\": \"The given Sudoku puzzle has some numbers already filled in. We need to fill in the missing numbers such that each row, column, and 2x2 sub-grid contains the numbers 1-4 without repetition.\",\n \"Solution\": [\n [3, 1, 2, 4],\n [4, 3, 1, 2],\n [1, 2, 3, 4],\n [2, 4, 4, 1]\n ]\n}\n```\n\nThe solution is obtained by applying the standard Sudoku solving techniques, such as focusing on the most constrained regions first and using logical deductions to eliminate possibilities."
+ },
+ {
+ "role": "assistant",
+ "content": "Perception\n[[3,0,2,0],[0,0,0,0],[0,2,3,0],[0,0,0,0]]\nAnswer\n[[3,1,2,4],[4,3,1,2],[1,2,3,4],[2,4,4,1]]"
+ },
+ {
+ "role": "user",
+ "content": "Yes. ."
+ },
+ {
+ "role": "user",
+ "content": "```json\n{\"perception\": [[3, 1, \"*\", 4], [2, 4, 1, 3], [\"*\", \"*\", 4, \"*\"], [4, \"*\", 3, \"*\"]], \"think\": \"Looking at the first row, the only missing number is 2. \\nLooking at the last row, the missing numbers are 1 and 2. Since the cell at index (3,1) is of the same color as the cell at index (1,1) which contains the number 4, and the cell at index (3,0) contains the number 4, the cell at index (3,1) can only be 1 or 2. Since the cell at index (3,0) already contains 4, the cell at index (3,1) must be 1. Then the cell at index (3,3) must be 2. \\nLooking at the third row, the missing numbers are 1, 2, and 3. Since the cell at index (2,3) is of the same color as the cell at index (0,3) which contains 4, and the cell at index (1,3) contains 3, and the cell at index (2,2) contains 4, the cell at index (2,3) must be 1 or 2. Since the cell at index (3,3) contains 2, the cell at index (2,3) must be 1. Then the cell at index (2,0) must be 3, and the cell at index (2,1) must be 2. \\nTherefore, the solution is [[3, 1, 2, 4], [2, 4, 1, 3], [3, 2, 4, 1], [4, 1, 3, 2]].\", \"answer\": [[3, 1, 2, 4], [2, 4, 1, 3], [3, 2, 4, 1], [4, 1, 3, 2]]}\n```"
+ },
+ {
+ "role": "assistant",
+ "content": "Perception\n[[3,1,0,4],[2,4,1,3],[0,0,4,0],[4,0,3,0]]\nAnswer\n[[3,1,2,4],[2,4,1,3],[3,2,4,1],[4,1,3,2]]"
+ },
+ {
+ "role": "user",
+ "content": "Yes. ."
+ }
+]
\ No newline at end of file
diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/utils/vgrpbench/configs/formating-prompt/skyscraper/filter_prompt.json b/VLMEvalKit-sudoku/vlmeval/dataset/utils/vgrpbench/configs/formating-prompt/skyscraper/filter_prompt.json
new file mode 100644
index 0000000000000000000000000000000000000000..228fa6bfe1a083c77deb93632cb770eda7c4bbd2
--- /dev/null
+++ b/VLMEvalKit-sudoku/vlmeval/dataset/utils/vgrpbench/configs/formating-prompt/skyscraper/filter_prompt.json
@@ -0,0 +1,66 @@
+[
+ {
+ "role": "system",
+ "content": "You are a text formater, your goal is to extract two parts, perception and answer from the answer and put them into a good format. Please according to the conversation history with the user feedback. The desired format is a 2d array, where there is only two types values 0 or numbers; if you see * or _ or - or e or #, you should convert them into 0. If you see other numbers such as 1, 2, or 3, you just put it there.If the perception / answer is not a valid solution or not existe, please put a Wrong there. Your goal is only to format the user's input, not to check if it is correct or not"
+ },
+ {
+ "role": "user",
+ "content": "json\n{\n \"Initial State\": [[1, 2, \"*\", \"*\"], [2, \"*\", \"*\", \"*\"], [\"*\", \"*\", 1, \"*\"], [\"*\", \"*\", \"*\", \"*\"]],\n \"Thought\": \"To solve this Sudoku puzzle, I need to fill in the empty cells ensuring that each number from 1 to 4 appears exactly once in each row, column, and 2x2 block. I will start by identifying the possible numbers for each empty cell based on the existing ones.\",\n \"Solution\": [[1, 2, 4, 3], [2, 3, 4, 1], [3, 4, 1, 2], [4, 1, 2, 4]]\n}\n"
+ },
+ {
+ "role": "assistant",
+ "content": "Perception\n[[1,2,0,0],[2,0,0,0],[0,0,1,0],[0,0,0,0]]\nAnswer\n[[1,2,4,3],[2,3,4,1],[3,4,1,2],[4,1,2,4]]"
+ },
+ {
+ "role": "user",
+ "content": "Yes. "
+ },
+ {
+ "role": "user",
+ "content": "json\n{\n \"Initial State\": [[4, 1, \"*\", \"*\"], [2, \"*\", \"*\", \"*\"], [\"*\", \"*\", \"*\", \"*\"], [\"*\", \"*\", \"*\", 1]],\n \"Thought\": \"The grid consists of a 4x4 layout divided into 2x2 blocks. Considering existing numbers, I will analyze which numbers can fit into the empty cells while ensuring each row, column, and 2x2 block adhere to Sudoku rules. I will start filling from top left to bottom right.\",\n \"Solution\": [[4, 1, 3, 2], [2, 3, 1, 4], [1, 4, 2, 3], [3, 2, 4, 1]]\n}\n"
+ },
+ {
+ "role": "assistant",
+ "content": "Perception\n[[4,1,0,0],[2,0,0,0],[0,0,0,0],[0,0,0,1]]\nAnswer\n[[4,1,3,2],[2,3,1,4],[1,4,2,3],[3,2,4,1]]"
+ },
+ {
+ "role": "user",
+ "content": "Yes. "
+ },
+ {
+ "role": "user",
+ "content": "```json\n{\n\"Initial State\": [\n[[\"*\"], [\"*\"], [\"*\"], [\"4\", \"1\"]], \n[[\"*\"], [\"*\"], [\"*\"], [\"1\"]], \n[[\"*\"], [\"*\"], [\"*\"], [\"*\"]], \n[[\"1\"], [\"*\"], [\"*\"], [\"3\"]]\n],\n\n\"Thought\": \"The grid is divided into 2x2 blocks. We need to ensure each block, row, and column contains unique numbers from 1 to 4. The current numbers are 1 and 3 in the top-right block and 4 and 1 in the top-left block. We need to fill the remaining cells with numbers 2 and 4 to make sure each block, row, and column has unique numbers. We can start with placing 2 in the bottom-left cell of the top-right block and 4 in the bottom-right cell of the top-left block.\",\n\"Solution\": {\n\"Thought\": \"Considering the existing numbers and the rule that each block, row, and column must contain unique numbers, we can place the numbers in the remaining cells. The numbers 2 and 4 are available to fill the cells. We need to ensure that each of these numbers is placed in a way that does not violate the Sudoku rules. For example, we can place 2 in the bottom-left cell of the top-right block and 4 in the bottom-right cell of the top-left block.\",\n\"Solution\": {\n\"Thought\": \"In the bottom-left cell of the top-right block, we can place the number 2. In the bottom-right cell of the top-left block, we can place the number 4. Now, we need to fill the remaining cells in the bottom-left block of the top-right block and the top-right cell of the top-left block with the remaining numbers 1 and 3.\",\n\"Solution\": {\n\"Thought\": \"We can place the number 1 in the bottom-left cell of the top-right block and the number 3 in the top-right cell of the top-left block. This will ensure that each block, row, and column contains unique numbers from 1 to 4.\",\n\"Solution\": {\n\"Thought\": \"The solution is to place the numbers as follows: 2 in the bottom-left cell of the top-right block, 4 in the bottom-right cell of the top-left block, 1 in the bottom-left cell of the top-right block, and 3 in the top-right cell of the top-left block.\",\n\"Solution\": {\n\"Thought\": \"The solution to the Sudoku puzzle is as follows: 2 in the bottom-left cell of the top-right block, 4 in the bottom-right cell of the top-left block, 1 in the bottom-left cell of the top-right block, and 3 in the top-right cell of the top-left block. This will ensure that each block, row, and column contains unique numbers from 1 to 4.\",\n\"Solution\": {\n\"Thought\": \"In the bottom-left cell of the top-right block, we can\n``"
+ },
+ {
+ "role": "assistant",
+ "content": "Perception\nWrong\nAnswer\nWrong"
+ },
+ {
+ "role": "user",
+ "content": "Yes. as perception is not a 2D array and answer is missing. For perception, its first row is 3 single item and 1 2-item array, not valid. Thus, putting Wrong for both perception and answer."
+ },
+ {
+ "role": "user",
+ "content": "``\n{\n \"Initial State\": [\n [3, \"*\", 2, \"*\"],\n [\"*\", \"*\", \"*\", \"*\"],\n [\"*\", 2, 3, \"*\"],\n [\"*\", \"*\", \"*\", \"*\"]\n ],\n \"Thought\": \"The given Sudoku puzzle has some numbers already filled in. We need to fill in the missing numbers such that each row, column, and 2x2 sub-grid contains the numbers 1-4 without repetition.\",\n \"Solution\": [\n [3, 1, 2, 4],\n [4, 3, 1, 2],\n [1, 2, 3, 4],\n [2, 4, 4, 1]\n ]\n}\n```\n\nThe solution is obtained by applying the standard Sudoku solving techniques, such as focusing on the most constrained regions first and using logical deductions to eliminate possibilities."
+ },
+ {
+ "role": "assistant",
+ "content": "Perception\n[[3,0,2,0],[0,0,0,0],[0,2,3,0],[0,0,0,0]]\nAnswer\n[[3,1,2,4],[4,3,1,2],[1,2,3,4],[2,4,4,1]]"
+ },
+ {
+ "role": "user",
+ "content": "Yes. ."
+ },
+ {
+ "role": "user",
+ "content": "```json\n{\"perception\": [[3, 1, \"*\", 4], [2, 4, 1, 3], [\"*\", \"*\", 4, \"*\"], [4, \"*\", 3, \"*\"]], \"think\": \"Looking at the first row, the only missing number is 2. \\nLooking at the last row, the missing numbers are 1 and 2. Since the cell at index (3,1) is of the same color as the cell at index (1,1) which contains the number 4, and the cell at index (3,0) contains the number 4, the cell at index (3,1) can only be 1 or 2. Since the cell at index (3,0) already contains 4, the cell at index (3,1) must be 1. Then the cell at index (3,3) must be 2. \\nLooking at the third row, the missing numbers are 1, 2, and 3. Since the cell at index (2,3) is of the same color as the cell at index (0,3) which contains 4, and the cell at index (1,3) contains 3, and the cell at index (2,2) contains 4, the cell at index (2,3) must be 1 or 2. Since the cell at index (3,3) contains 2, the cell at index (2,3) must be 1. Then the cell at index (2,0) must be 3, and the cell at index (2,1) must be 2. \\nTherefore, the solution is [[3, 1, 2, 4], [2, 4, 1, 3], [3, 2, 4, 1], [4, 1, 3, 2]].\", \"answer\": [[3, 1, 2, 4], [2, 4, 1, 3], [3, 2, 4, 1], [4, 1, 3, 2]]}\n```"
+ },
+ {
+ "role": "assistant",
+ "content": "Perception\n[[3,1,0,4],[2,4,1,3],[0,0,4,0],[4,0,3,0]]\nAnswer\n[[3,1,2,4],[2,4,1,3],[3,2,4,1],[4,1,3,2]]"
+ },
+ {
+ "role": "user",
+ "content": "Yes. ."
+ }
+]
\ No newline at end of file
diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/utils/vgrpbench/evaluation.py b/VLMEvalKit-sudoku/vlmeval/dataset/utils/vgrpbench/evaluation.py
new file mode 100644
index 0000000000000000000000000000000000000000..f14ae0c765700213693a9f174f04ccbf0cb85029
--- /dev/null
+++ b/VLMEvalKit-sudoku/vlmeval/dataset/utils/vgrpbench/evaluation.py
@@ -0,0 +1,135 @@
+import ast
+import os
+import json
+import numpy as np
+import pandas as pd
+from collections import defaultdict
+from .score import evaluate_single_puzzle
+
+def get_eval(judge, content):
+ """
+ Generate evaluation using the judge model.
+
+ Args:
+ judge: The evaluation model
+ content: Input content for the evaluation
+
+ Returns:
+ The generated evaluation output
+ """
+ return judge.generate(content)
+
+def VGRPBench_atomeval(model, prompt, line):
+ """
+ Perform atomic evaluation for a VGRPBench puzzle.
+
+ Args:
+ model: The evaluation model
+ prompt: Input prompt for evaluation
+ line: Dictionary containing puzzle information
+
+ Returns:
+ dict: Evaluation scores
+ """
+ print("raw output", prompt)
+ output = get_eval(model, prompt)
+ print("formatted output", output)
+ scores = parse_score(line, output)
+ return scores
+
+def parse_score(line, output):
+ """
+ Parse the score from the model's output for a VGRPBench puzzle.
+
+ Args:
+ line: Dictionary-like object containing puzzle information
+ output: The model's output string
+
+ Returns:
+ dict: Dictionary with perception_correct and answer_correct results
+ """
+
+ # Extract category to determine puzzle type
+ category = line['category']
+ puzzle_type = category.split('_')[0] # e.g., "thermometers" from "thermometers_4x4"
+
+ # Parse the puzzle state from the states field
+ puzzle_data = line['states']
+ puzzle_data = ast.literal_eval(puzzle_data)
+
+ # Evaluate the puzzle solution
+ evaluation_result = evaluate_single_puzzle(output, puzzle_data, puzzle_type)
+
+ return evaluation_result
+
+def VGRPBench_score(data):
+ """
+ Calculate scores for VGRPBench puzzles by category.
+
+ Args:
+ data: DataFrame containing evaluation results
+
+ Returns:
+ pandas.DataFrame: Aggregated scores by category
+ """
+ # Get unique categories without 'overall'
+ cates = list(set(data['category']))
+ ret = defaultdict(list)
+
+ for c in cates:
+ ret['category'].append(c)
+ # Filter data for the current category
+ sub = data[data['category'] == c]
+
+ # Calculate perception score (as percentage with 2 decimal places)
+ perception_score = round(np.mean(sub['perception_correct']) * 100, 2)
+ ret['Perception Score'].append(perception_score)
+
+ # Calculate answer score (as percentage with 2 decimal places)
+ answer_score = round(np.mean(sub['answer_correct']) * 100, 2)
+ ret['Answer Score'].append(answer_score)
+
+ return pd.DataFrame(ret)
+
+def build_prompt(line):
+ """
+ Build a prompt from the prediction field in the data line.
+
+ Args:
+ line: Dictionary containing a 'prediction' field
+
+ Returns:
+ str: The prediction text to be used as a prompt
+ """
+ # Get the prediction entry from the prediction column
+ return line['prediction']
+
+def VGRPBench_get_system_prompt(line):
+ """
+ Get the system prompt for a specific puzzle type in VGRPBench.
+
+ Args:
+ line: A data row containing a 'category' field that defines the puzzle type
+
+ Returns:
+ str: A formatted system prompt loaded from the corresponding filter_prompt.json file
+ """
+ # Extract puzzle type from category (e.g., "thermometers" from "thermometers_4x4")
+ puzzle_type = line['category'].split('_')[0]
+
+ # Construct path to the filter_prompt.json file for this puzzle type
+ prompt_file = os.path.join(
+ os.path.dirname(os.path.abspath(__file__)),
+ "configs",
+ "formating-prompt",
+ puzzle_type,
+ "filter_prompt.json"
+ )
+
+ # Load and return the prompt from the JSON file
+ with open(prompt_file, 'r') as f:
+ prompt = json.load(f)
+
+ prompt = str(prompt) + "According to the conversation history with the user feedback do the formatting for the current answer." # noqa: E501
+
+ return prompt
diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/utils/vgrpbench/puzzles/battleships.py b/VLMEvalKit-sudoku/vlmeval/dataset/utils/vgrpbench/puzzles/battleships.py
new file mode 100644
index 0000000000000000000000000000000000000000..914311cf3543fcf472265f1ea146c06c8f854437
--- /dev/null
+++ b/VLMEvalKit-sudoku/vlmeval/dataset/utils/vgrpbench/puzzles/battleships.py
@@ -0,0 +1,160 @@
+import random
+import argparse
+import os
+from typing import List, Dict, Any, Tuple
+
+from .common_puzzle_factory import PuzzleFactory
+from .common_constriants import Constraint
+
+class ConstraintBattleships(Constraint):
+ def check(self, game_state: Dict[str, Any]) -> bool:
+ board = game_state["board"]
+ size = len(board)
+ # Check if ships touch diagonally or orthogonally
+ for i in range(size):
+ for j in range(size):
+ if isinstance(board[i][j], tuple): # Check if it's a revealed ship with direction
+ ship_cell, direction = board[i][j]
+ # Add direction-specific checks here
+ if direction in "<>-": # Horizontal ship
+ # Check cells above and below
+ for di in [-1, 1]:
+ if 0 <= i + di < size and board[i + di][j] == "s":
+ return False
+ elif direction in "^V|": # Vertical ship
+ # Check cells left and right
+ for dj in [-1, 1]:
+ if 0 <= j + dj < size and board[i][j + dj] == "s":
+ return False
+ elif board[i][j] == "s":
+ # Regular ship cell checks
+ for di in [-1, 0, 1]:
+ for dj in [-1, 0, 1]:
+ if di == 0 and dj == 0:
+ continue
+ ni, nj = i + di, j + dj
+ if (0 <= ni < size and 0 <= nj < size and
+ (board[ni][nj] == "s" or (isinstance(board[ni][nj], tuple) and board[ni][nj][0] == "s")) and
+ (di != 0 and dj != 0)): # Diagonal check
+ return False
+ return True
+
+class ConstraintBattleshipsHints(Constraint):
+ def check(self, game_state: Dict[str, Any]) -> bool:
+ board = game_state["board"]
+ hints = game_state["hints"]
+ row_hints = hints["row_hints"]
+ col_hints = hints["col_hints"]
+ ships = hints["ships"]
+ size = len(board)
+ # Calculate total required ship cells from ships configuration
+ total_ship_cells_required = sum(int(length) * int(count) for length, count in ships.items())
+ total_ship_cells_selected = sum(1 for i in range(size) for j in range(size) if board[i][j] == "s")
+ total_undefined_cells = sum(1 for i in range(size) for j in range(size) if board[i][j] == 0)
+
+ # Check if we have enough cells (placed + potential) to fit all ships
+ if total_ship_cells_selected + total_undefined_cells < total_ship_cells_required:
+ return False
+
+ # Check if we haven't exceeded the total required ship cells
+ if total_ship_cells_selected > total_ship_cells_required:
+ return False
+
+ # Check row hints
+ for i in range(size):
+ row_selected = sum(1 for j in range(size) if board[i][j] == "s")
+ row_undefined = sum(1 for j in range(size) if board[i][j] == 0)
+ # Consider both undefined (0) and non-revealed water cells for potential ships
+ if all(cell != 0 and cell != -1 for cell in board[i]): # if row is complete
+ if row_selected != row_hints[i]:
+ return False
+ else: # if row is incomplete
+ if row_selected > row_hints[i]: # too many selected
+ return False
+ if row_selected + row_undefined < row_hints[i]: # impossible to reach target
+ return False
+ # Check column hints
+ for j in range(size):
+ col_selected = sum(1 for i in range(size) if board[i][j] == "s")
+ col_undefined = sum(1 for i in range(size) if board[i][j] == 0)
+ if all(board[i][j] != 0 and board[i][j] != -1 for i in range(size)): # if column is complete
+ if col_selected != col_hints[j]:
+ return False
+ else: # if column is incomplete
+ if col_selected > col_hints[j]: # too many selected
+ return False
+ if col_selected + col_undefined < col_hints[j]: # impossible to reach target
+ return False
+ # When all cells are filled, check ship shapes
+ if total_undefined_cells == 0:
+ # Find all ships by finding connected components
+ visited = [[False] * size for _ in range(size)]
+ ship_lengths = []
+
+ def get_ship_length(i: int, j: int) -> int:
+ if (i < 0 or i >= size or j < 0 or j >= size or
+ visited[i][j] or board[i][j] != "s"):
+ return 0
+
+ visited[i][j] = True
+ length = 1
+
+ # Check if ship is horizontal
+ if (j + 1 < size and board[i][j + 1] == "s"):
+ # Add all horizontal cells
+ for col in range(j + 1, size):
+ if board[i][col] != "s":
+ break
+ visited[i][col] = True
+ length += 1
+ # Check if ship is vertical
+ elif (i + 1 < size and board[i + 1][j] == "s"):
+ # Add all vertical cells
+ for row in range(i + 1, size):
+ if board[row][j] != "s":
+ break
+ visited[row][j] = True
+ length += 1
+
+ return length
+
+ # Find all ships
+ for i in range(size):
+ for j in range(size):
+ if not visited[i][j] and board[i][j] == "s":
+ ship_lengths.append(get_ship_length(i, j))
+ # Count ships of each length
+ ship_counts = {}
+ for length in ship_lengths:
+ ship_counts[length] = ship_counts.get(length, 0) + 1
+ # Verify against required ships
+ for length, count in ships.items():
+ if ship_counts.get(int(length), 0) != int(count):
+ return False
+ return True
+
+class BattleshipsPuzzleFactory(PuzzleFactory):
+ def __init__(self, size: int) -> None:
+ super().__init__()
+ self.game_name = "battleships"
+ self.size = size
+ self.constraints = [
+ ConstraintBattleships(),
+ ConstraintBattleshipsHints()
+ ]
+ self.all_possible_values = ["e", "s"]
+
+ def get_possible_values(self, game_state: Dict[str, Any], row: int, col: int) -> List[int]:
+ board = game_state["board"]
+ if board[row][col] != 0: # If cell is already filled
+ return []
+
+ possible_values = []
+ original_value = board[row][col]
+
+ for value in self.all_possible_values:
+ board[row][col] = value
+ if self.check(game_state):
+ possible_values.append(value)
+ board[row][col] = original_value
+ return possible_values
diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/utils/vgrpbench/puzzles/kakurasu.py b/VLMEvalKit-sudoku/vlmeval/dataset/utils/vgrpbench/puzzles/kakurasu.py
new file mode 100644
index 0000000000000000000000000000000000000000..9ac8e9136969194120cab46c4fb2248ebe64f85a
--- /dev/null
+++ b/VLMEvalKit-sudoku/vlmeval/dataset/utils/vgrpbench/puzzles/kakurasu.py
@@ -0,0 +1,78 @@
+from typing import List, Dict, Any, Tuple
+import random
+import copy
+import os
+import json
+import argparse
+
+from .common_puzzle_factory import PuzzleFactory
+from .common_constriants import Constraint
+
+class ConstraintKakurasuSum(Constraint):
+ def __init__(self) -> None:
+ super().__init__()
+ self.name = "constraint_kakurasu_sum"
+ def check(self, game_state: Dict[str, Any]) -> bool:
+ board = game_state["board"]
+ clues = game_state.get("clues", {"row_clues": [], "col_clues": []})
+ size = len(board)
+ weights = [i + 1 for i in range(size)]
+ # Check row sums
+ for i in range(size):
+ row_sum = sum(weights[j] for j in range(size) if board[i][j] == "s")
+ # If row is complete (no 0s), sum must equal clue
+ if 0 not in board[i]:
+ if row_sum != clues["row_clues"][i]:
+ return False
+ # If row is incomplete, check both conditions:
+ else:
+ # 1. Current sum must not exceed clue
+ if row_sum > clues["row_clues"][i]:
+ return False
+ # 2. Check if remaining undefined cells can potentially reach the clue
+ undefined_cells = [j for j in range(size) if board[i][j] == 0]
+ max_possible_sum = row_sum + sum(weights[j] for j in undefined_cells)
+ if max_possible_sum < clues["row_clues"][i]:
+ return False
+ # Check column sums
+ for i in range(size):
+ col_sum = sum(weights[j] for j in range(size) if board[j][i] == "s")
+ # If column is complete (no 0s), sum must equal clue
+ if all(board[j][i] != 0 for j in range(size)):
+ if col_sum != clues["col_clues"][i]:
+ return False
+ # If column is incomplete, check both conditions:
+ else:
+ # 1. Current sum must not exceed clue
+ if col_sum > clues["col_clues"][i]:
+ return False
+ # 2. Check if remaining undefined cells can potentially reach the clue
+ undefined_cells = [j for j in range(size) if board[j][i] == 0]
+ max_possible_sum = col_sum + sum(weights[j] for j in undefined_cells)
+ if max_possible_sum < clues["col_clues"][i]:
+ return False
+ return True
+
+class KakurasuPuzzleFactory(PuzzleFactory):
+ def __init__(self, size: int) -> None:
+ super().__init__()
+ if size < 3:
+ raise ValueError("Grid size must be at least 3")
+ self.game_name = "kakurasu"
+ self.size = size
+ self.constraints = [
+ ConstraintKakurasuSum()
+ ]
+ self.all_possible_values = ["e", "s"]
+ self.weights = [i + 1 for i in range(size)]
+
+ def get_possible_values(self, game_state: Dict[str, Any], row: int, col: int) -> List[int]:
+ possible_values = []
+ board = game_state["board"]
+ original_value = board[row][col]
+ for value in self.all_possible_values:
+ board[row][col] = value
+ if self.check(game_state):
+ possible_values.append(value)
+ board[row][col] = original_value
+ return possible_values
diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/utils/vgrpbench/score.py b/VLMEvalKit-sudoku/vlmeval/dataset/utils/vgrpbench/score.py
new file mode 100644
index 0000000000000000000000000000000000000000..9880f814a428df7008179c9228eebef780673a97
--- /dev/null
+++ b/VLMEvalKit-sudoku/vlmeval/dataset/utils/vgrpbench/score.py
@@ -0,0 +1,438 @@
+"""
+VGRPBench scoring module for evaluating visual grid reasoning puzzle solutions.
+
+This module provides functions to evaluate puzzle solutions from language models,
+including parsing model outputs, checking perception accuracy, and verifying solutions.
+"""
+
+import json
+import argparse
+import os
+import re
+import sys
+from tqdm import tqdm
+import numpy as np
+from json_repair import repair_json
+from . import puzzles
+from .puzzles import common_get_game_factory as get_game_factory
+
+# Global variable to store the puzzle grid size
+GRID_SIZE = None
+
+
+def extract_perception_and_answer(model_output):
+ """
+ Extract both perception and answer from model output.
+
+ Parses the model's output to extract the perceived initial state and the solution.
+ Handles different output formats and section headers.
+
+ Args:
+ model_output (str): The raw output from the model
+
+ Returns:
+ tuple: (initial_state, solution) where both are 2D arrays or None if parsing fails
+ """
+ try:
+ # Handle plain text format
+ if "Initial State" in model_output:
+ parts = model_output.split('Initial State\n', 1)
+ elif "Perception" in model_output:
+ parts = model_output.split('Perception\n', 1)
+ else:
+ return None, None
+
+ if len(parts) != 2:
+ return None, None
+ content = parts[1]
+
+ if "Answer" in content:
+ perception_answer = content.split('\nAnswer\n')
+ elif "Solution" in content:
+ perception_answer = content.split('\nSolution\n')
+ else:
+ return None, None
+
+ if len(perception_answer) != 2:
+ return None, None
+
+ perception, answer = perception_answer
+
+ if perception.strip() == "Wrong":
+ initial_state = None
+ # Remove outer brackets and split into rows
+ raw_solution = answer.strip()[2:-2].split('],[')
+ solution = [[c for c in row.split(',')] for row in raw_solution]
+ else:
+ if answer.strip() == "Wrong":
+ raw_perception = perception.strip()[2:-2].split('],[')
+ initial_state = [[c for c in row.split(',')] for row in raw_perception]
+ solution = None
+ else:
+ # Remove outer brackets and split into rows
+ raw_perception = perception.strip()[2:-2].split('],[')
+ initial_state = [[c for c in row.split(',')] for row in raw_perception]
+ raw_solution = answer.strip()[2:-2].split('],[')
+ solution = [[c for c in row.split(',')] for row in raw_solution]
+
+ initial_state = [[cell if cell != '*' else 0 for cell in row] for row in initial_state]
+
+ return initial_state, solution
+ except Exception as e:
+ print(f"Error parsing output: {e}")
+ return None, None
+
+
+def check_perception(thoughts, init_board, game_type):
+ """
+ Check if model's perception matches the initial board.
+
+ Compares the model's understanding of the initial state with the actual initial state,
+ with game-specific adjustments for different puzzle types.
+
+ Args:
+ thoughts (list): 2D array representing the model's perception of the initial state
+ init_board (list): 2D array representing the actual initial state
+ game_type (str): Type of puzzle game
+ Returns:
+ bool: True if perception matches initial board, False otherwise
+ """
+ # Game-specific adjustments
+ if game_type == "battleships":
+ init_board = [[0 if cell == 'e' else cell for cell in row] for row in init_board]
+ thoughts = [[0 if cell == 'e' else cell for cell in row] for row in thoughts]
+ if game_type == "lightup":
+ for i in range(len(init_board)):
+ for j in range(len(init_board[i])):
+ cell = init_board[i][j]
+ # Check if cell is a number (not 0) or not a string/character
+ if (isinstance(cell, (int, float)) and cell != 0) or (isinstance(cell, str) and not cell.isalpha()):
+ init_board[i][j] = 'w'
+ if game_type == "fieldexplore":
+ # Convert -1 to 0 in init_board
+ init_board = [[0 if cell == -1 else cell for cell in row] for row in init_board]
+ # Convert string representation to 2D grid if needed
+ if isinstance(init_board, str):
+ init_grid = [[c for c in row] for row in init_board.strip().split('\n')]
+ else:
+ init_grid = init_board
+
+ # Check dimensions match
+ if len(thoughts) != len(init_grid) or any(len(row) != len(init_grid[0]) for row in thoughts):
+ return False
+ # Check cell by cell
+ for i in range(len(init_grid)):
+ for j in range(len(init_grid[0])):
+ if str(init_grid[i][j]) != str(thoughts[i][j]):
+ return False
+ return True
+
+
+def check_answer(answer, init_board, game_factory):
+ """
+ Verify if the model's answer is correct for the given puzzle.
+
+ Performs game-specific validations and uses the game factory to check solution correctness.
+
+ Args:
+ answer (list): 2D array representing the model's solution
+ init_board (list): 2D array representing the initial state
+ game_factory (GameFactory): Factory object for the specific game type
+ Returns:
+ bool: True if the answer is correct, False otherwise
+ """
+ global GRID_SIZE
+ # Game-specific preprocessing for answers
+ if game_factory.game_name in ["treesandtents", "starbattle", "hitori", "aquarium", "kakurasu"]:
+ for i in range(len(answer)):
+ for j in range(len(answer[i])):
+ if answer[i][j] in [0, '0']:
+ answer[i][j] = 'e'
+ if game_factory.game_name == "oddevensudoku":
+ for i in range(len(answer)):
+ for j in range(len(answer[i])):
+ try:
+ answer[i][j] = int(answer[i][j])
+ except Exception as e:
+ return False
+ if game_factory.game_name == "lightup":
+ # Convert '0' to 'e'
+ for i in range(len(answer)):
+ for j in range(len(answer[i])):
+ if answer[i][j] == '0':
+ answer[i][j] = 'e'
+ # Convert string representation to 2D grid if needed
+ if isinstance(init_board, str):
+ init_grid = [[c for c in row] for row in init_board.strip().split('\n')]
+ else:
+ init_grid = init_board
+ # Check dimensions
+ if len(answer) != GRID_SIZE or any(len(row) != GRID_SIZE for row in answer):
+ return False
+
+ # Game-specific validation for initial values
+ if game_factory.game_name == "hitori":
+ # Compare with game_factory.additional_board
+ for i in range(GRID_SIZE):
+ for j in range(GRID_SIZE):
+ if game_factory.additional_board[i][j] not in [0, '0'] and str(game_factory.additional_board[i][j]) != str(answer[i][j]):
+ return False
+ elif game_factory.game_name == "nonogram":
+ # Convert 0, '0', '*' in answer to 'e'
+ for i in range(GRID_SIZE):
+ for j in range(GRID_SIZE):
+ if answer[i][j] in [0, '0', '*']:
+ answer[i][j] = 'e'
+ for i in range(GRID_SIZE):
+ for j in range(GRID_SIZE):
+ if init_grid[i][j] not in [0, '0'] and str(init_grid[i][j]) != str(answer[i][j]):
+ return False
+ elif game_factory.game_name == "fieldexplore":
+ for i in range(GRID_SIZE):
+ for j in range(GRID_SIZE):
+ # 's' on the initial board must be kept
+ if init_grid[i][j] == 's' and not answer[i][j] == 's':
+ return False
+ try:
+ cell_value = int(init_grid[i][j])
+ if cell_value > 0 and str(answer[i][j]) == 's':
+ return False
+ except (ValueError, TypeError):
+ # Cell is not a number, continue with other checks
+ pass
+ return True
+ else:
+ for i in range(GRID_SIZE):
+ for j in range(GRID_SIZE):
+ if init_grid[i][j] not in [0, '0', 'e'] and str(init_grid[i][j]) != str(answer[i][j]):
+ return False
+ # Prepare game state for validation
+ game_state = {
+ "board": answer,
+ "size": GRID_SIZE,
+ }
+
+ # Add game-specific state information
+ if game_factory.game_name == "skyscraper":
+ game_state["clues"] = game_factory.clues
+ elif game_factory.game_name == "coloredsudoku":
+ game_state["colors"] = game_factory.current_colors
+ elif game_factory.game_name == "futoshiki":
+ game_state["inequalities"] = game_factory.current_inequalities
+ elif game_factory.game_name == "killersudoku":
+ game_state["cages"] = game_factory.cages
+ elif game_factory.game_name == "renzoku":
+ game_state["hints"] = game_factory.hints
+ elif game_factory.game_name == 'kakuro':
+ game_state["sums"] = game_factory.current_sums
+ elif game_factory.game_name == "thermometers":
+ game_state["clues"] = game_factory.clues
+ elif game_factory.game_name == "treesandtents":
+ game_state["clues"] = game_factory.clues
+ elif game_factory.game_name == "starbattle":
+ game_state["regions"] = game_factory.regions
+ elif game_factory.game_name == "hitori":
+ game_state["numbers"] = game_factory.numbers
+ elif game_factory.game_name == "aquarium":
+ game_state["clues"] = game_factory.clues
+ elif game_factory.game_name == "kakurasu":
+ game_state["clues"] = game_factory.clues
+ elif game_factory.game_name == "oddevensudoku":
+ game_state["cell_types"] = game_factory.cell_types
+ elif game_factory.game_name == "nonogram":
+ game_state["hints"] = game_factory.hints
+ elif game_factory.game_name == "lightup":
+ game_state["wall_numbers"] = game_factory.wall_numbers
+ elif game_factory.game_name == "battleships":
+ game_state["hints"] = game_factory.hints
+ # Validate the solution using the game factory
+ try:
+ return game_factory.check(game_state)
+ except Exception as e:
+ print(f"Error checking answer: {e}")
+ return False
+
+
+def calculate_group_statistics(outcomes, num_groups=5):
+ """
+ Calculate group-wise means and the standard deviation between groups.
+
+ Splits outcomes into groups and calculates statistics to estimate variance.
+
+ Args:
+ outcomes (list): Binary outcomes (0 or 1) for each puzzle
+ num_groups (int): Number of groups to split the data into
+
+ Returns:
+ tuple: (group_means, group_std) where group_means is a list of percentages
+ and group_std is the standard deviation between groups
+ """
+ if not outcomes:
+ return [], 0.0
+
+ # Convert to numpy array for easier manipulation
+ outcomes = np.array(outcomes)
+
+ # Calculate number of items per group
+ group_size = len(outcomes) // num_groups
+
+ # Split into groups and calculate mean for each group
+ group_means = []
+ for i in range(num_groups):
+ start_idx = i * group_size
+ end_idx = start_idx + group_size if i < num_groups - 1 else len(outcomes)
+ group = outcomes[start_idx:end_idx]
+ group_means.append(np.mean(group) * 100) # Convert to percentage
+
+ # Calculate standard deviation between group means
+ group_std = np.std(group_means)
+
+ return group_means, group_std
+
+
+def evaluate_single_puzzle(model_output, puzzle_data, game_type):
+ """
+ Evaluate a single puzzle solution.
+
+ Processes model output and puzzle data to determine if the model correctly
+ understood the puzzle and provided a valid solution.
+
+ Args:
+ model_output (str): The raw output from the model
+ puzzle_data (dict): Puzzle data including initialization
+ game_type (str): Type of puzzle game (e.g., "thermometers", "sudoku")
+ Returns:
+ dict: Evaluation results including perception_correct, answer_correct, and score
+ """
+ # Add puzzle directory to path if needed
+ curr_dir = os.path.dirname(os.path.abspath(__file__))
+ puzzle_dir = os.path.join(curr_dir, "puzzles")
+ if puzzle_dir not in sys.path:
+ sys.path.append(puzzle_dir)
+
+ # Initialize the appropriate game factory for the puzzle type
+ GameFactory = get_game_factory.get_game_factory(game_type)
+
+ init_board = puzzle_data['initialization']
+
+ game_factory = GameFactory(size=4)
+
+ # Game-specific initialization handling
+ if game_type == "coloredsudoku":
+ colors = puzzle_data.get('colors', None)
+ game_factory.current_colors = colors
+ elif game_type == "binairo":
+ init_board = puzzle_data.get('initialization', None)
+ elif game_type == "futoshiki":
+ row_inequalities = puzzle_data.get('row_inequalities', None)
+ col_inequalities = puzzle_data.get('col_inequalities', None)
+ game_factory.current_inequalities = {
+ "row": row_inequalities,
+ "col": col_inequalities
+ }
+ elif game_type == "killersudoku":
+ cages = puzzle_data.get('cages', None)
+ game_factory.cages = cages
+ elif game_type == "renzoku":
+ hints = puzzle_data.get('hints', None)
+ game_factory.hints = hints
+ elif game_type == "kakuro":
+ sums = puzzle_data.get('sums', None)
+ game_factory.current_sums = sums
+ elif game_type == "skyscraper":
+ clues = puzzle_data.get('initialization', None).get('clues')
+ init_board = puzzle_data.get('initialization', None).get('board') # Special case
+ game_factory.clues = clues
+ elif game_type == "thermometers":
+ clues = puzzle_data.get('initialization', None).get('clues')
+ game_factory.clues = clues
+ init_board = puzzle_data.get('initialization', None).get('board')
+ elif game_type == "treesandtents":
+ clues = puzzle_data.get('clues', None)
+ game_factory.clues = clues
+ init_board = puzzle_data.get('initialization', None)
+ elif game_type == "starbattle":
+ init_board = puzzle_data.get('initialization', None)
+ game_factory.regions = puzzle_data.get('regions', None)
+ elif game_type == "hitori":
+ init_board = puzzle_data.get('initialization').get('numbers', None)
+ game_factory.numbers = puzzle_data.get('initialization', None).get('numbers')
+ game_factory.additional_board = puzzle_data.get('initialization', None).get('board')
+ elif game_type == "aquarium":
+ init_board = puzzle_data.get('initialization', None).get('board')
+ game_factory.clues = puzzle_data.get('initialization', None).get('clues', None)
+ elif game_type == "kakurasu":
+ init_board = puzzle_data.get('initialization', None).get('board')
+ game_factory.clues = puzzle_data.get('initialization', None).get('clues', None)
+ elif game_type == "oddevensudoku":
+ game_factory.cell_types = puzzle_data.get('cell_types')
+ init_board = puzzle_data.get('initialization', None)
+ elif game_type == "battleships":
+ init_board = puzzle_data.get('initialization', None)
+ game_factory.hints = puzzle_data.get('hints', None)
+ elif game_type == "jigsawsudoku":
+ init_board = puzzle_data.get('initialization', None)
+ elif game_type == "nonogram":
+ init_board = puzzle_data.get('initialization', None)
+ game_factory.hints = puzzle_data.get('hints', None)
+ elif game_type == "lightup":
+ init_board = puzzle_data.get('initialization', None)
+ game_factory.wall_numbers = puzzle_data.get('wall_numbers', None)
+ # Set grid size
+ global GRID_SIZE
+ GRID_SIZE = len(init_board) if GRID_SIZE is None else GRID_SIZE
+
+ # Extract model's perception and answer from its output
+ thoughts, answer = extract_perception_and_answer(model_output)
+ # Early return if parsing failed
+ if thoughts is None or answer is None:
+ return {
+ "perception_correct": False,
+ "answer_correct": False,
+ "number_of_samples": 1
+ }
+
+ # Game-specific preprocessing
+ try:
+ if game_type == "starbattle":
+ for i in range(len(thoughts)):
+ for j in range(len(thoughts[i])):
+ if thoughts[i][j] == "*":
+ thoughts[i][j] = "0"
+ except Exception as e:
+ print(f"starbattle: Error converting thoughts to 0: {e}")
+ try:
+ if game_type == "killersudoku":
+ answer = [[int(cell) for cell in row] for row in answer]
+ except Exception as e:
+ answer = None
+
+ # Special handling for trees and tents
+ if game_type == "treesandtents":
+ # Convert shorthand symbols to standard format
+ for i in range(len(thoughts)):
+ for j in range(len(thoughts[i])):
+ if thoughts[i][j] == 't':
+ thoughts[i][j] = 'tt'
+ elif thoughts[i][j] == 'r':
+ thoughts[i][j] = 'tr'
+ for i in range(len(answer)):
+ for j in range(len(answer[i])):
+ if answer[i][j] == 't':
+ answer[i][j] = 'tt'
+ elif answer[i][j] == 'r':
+ answer[i][j] = 'tr'
+
+ # Check perception and answer
+ perception_correct = check_perception(thoughts, init_board, game_type)
+ answer_correct = check_answer(answer, init_board, game_factory) if perception_correct else False
+
+ return {
+ "perception_correct": perception_correct,
+ "answer_correct": answer_correct,
+ "number_of_samples": 1
+ }
+
+
+if __name__ == "__main__":
+ main()
diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/utils/vlm2bench.py b/VLMEvalKit-sudoku/vlmeval/dataset/utils/vlm2bench.py
new file mode 100644
index 0000000000000000000000000000000000000000..6cc1cc20e61b333a561a0b9cbb8b9073b62ea740
--- /dev/null
+++ b/VLMEvalKit-sudoku/vlmeval/dataset/utils/vlm2bench.py
@@ -0,0 +1,243 @@
+"""
+vlm2bench utils for eval
+
+Input sample format: contains the following fields:
+ - index (original q_id)
+ - question
+ - answer (correct answer, i.e., gt_answer)
+ - category
+ - prediction (model output, i.e., model answer)
+
+The categories of each sub-task include:
+ gc-mat, gc-trk, oc-cpr, pc-cpr --> tf pair task (the last character of the same index distinguishes positive or negative with _p or _n)
+ oc-cnt, pc-cnt --> cnt type
+ oc-grp, pc-grp --> grp (MCQ) type
+""" # noqa: E501
+
+import os
+import re
+import json
+from collections import defaultdict
+from PIL import Image
+
+##########################################
+# 1. General Functions
+##########################################
+
+
+def common_doc_to_text(sample, **kwargs):
+ """
+ General: directly return the "question" field from the sample.
+ """
+ return sample.get("question", "")
+
+
+def common_doc_to_target(sample, **kwargs):
+ """
+ General: return the "answer" field from the sample as the correct answer.
+ """
+ return sample.get("answer", "")
+
+
+def common_process_results(results):
+ """
+ Since the input file fields are already index, question, answer, category, prediction,
+ directly return the original results without field mapping conversion.
+ """
+ return results
+
+##########################################
+# 2. TF Pair Task Evaluation (suitable for gc-mat, gc-trk, oc-cpr, pc-cpr)
+##########################################
+
+
+def parse_tf_answer(model_answer):
+ """
+ Extract 'T' or 'F' from the tf type model_answer.
+ Supports formats like 'T', 'F', 'True', 'False'; returns an error flag if multiple matches are found.
+ """
+ pattern = re.compile(r'\b(t|f|true|false)\b', re.IGNORECASE)
+ matches = pattern.findall(model_answer)
+ extracted = [match.upper()[0] for match in matches]
+ if len(extracted) == 1:
+ return extracted[0], None
+ elif len(extracted) > 1:
+ return None, 'multiple_answers_found'
+ else:
+ return None, 'no_answer_found'
+
+
+def tf_pair_aggregate_accuracy(results):
+ """
+ Aggregate evaluation results for the tf pair task.
+ Group by index, where the index format is like "pc-cpr_1_p" and "pc-cpr_1_n",
+ taking the prefix (removing the last _p or _n) as the identifier for the same group.
+ If all records in the group have predictions that match the answer ("T" or "F"), the group is considered correct,
+ returning the ratio of correct groups to total groups.
+ """
+ groups = defaultdict(list)
+ for item in results:
+ idx = item.get("index", "")
+ if "_" not in idx:
+ continue
+ base_id = "_".join(idx.split("_")[:-1])
+ groups[base_id].append(item)
+ total_groups = len(groups)
+ correct_groups = 0
+ for base_id, items in groups.items():
+ # At least two records are required in the group
+ if len(items) < 2:
+ continue
+ group_correct = True
+ for item in items:
+ gt = item.get("answer", "").strip().upper()
+ pred = item.get("prediction", "").strip().upper()
+ parsed, err = parse_tf_answer(pred)
+ if parsed != gt:
+ group_correct = False
+ break
+ if group_correct:
+ correct_groups += 1
+ return (correct_groups / total_groups) * 100 if total_groups > 0 else 0
+
+##########################################
+# 3. CNT Task Evaluation (suitable for oc-cnt, pc-cnt)
+##########################################
+
+
+NUM_WORDS = {
+ "zero": 0, "one": 1, "two": 2, "three": 3, "four": 4, "five": 5,
+ "six": 6, "seven": 7, "eight": 8, "nine": 9, "ten": 10,
+ "eleven": 11, "twelve": 12, "thirteen": 13, "fourteen": 14,
+ "fifteen": 15, "sixteen": 16, "seventeen": 17, "eighteen": 18,
+ "nineteen": 19, "twenty": 20, "thirty": 30, "forty": 40, "fifty": 50,
+ "sixty": 60, "seventy": 70, "eighty": 80, "ninety": 90, "hundred": 100, "thousand": 1000,
+}
+PENALTY_FACTOR = 10
+L_MAX = 4
+
+
+def words_to_num(s):
+ s = s.lower().replace('-', ' ').replace('and', ' ')
+ tokens = s.split()
+ total = 0
+ current = 0
+ for token in tokens:
+ if token in NUM_WORDS:
+ scale = NUM_WORDS[token]
+ if scale in (100, 1000):
+ if current == 0:
+ current = 1
+ current *= scale
+ total += current
+ current = 0
+ else:
+ current += scale
+ else:
+ return None
+ total += current
+ return total if total != 0 else None
+
+
+def extract_numbers(text):
+ text = text.lower()
+ digit_numbers = re.findall(r'\d+', text)
+ digit_numbers = [int(num) for num in digit_numbers]
+ word_numbers = []
+ pattern = re.compile(
+ r'\b(zero|one|two|three|four|five|six|seven|eight|nine|ten|'
+ r'eleven|twelve|thirteen|fourteen|fifteen|sixteen|seventeen|'
+ r'eighteen|nineteen|twenty|thirty|forty|fifty|sixty|seventy|'
+ r'eighty|ninety|hundred|thousand)\b', re.IGNORECASE)
+ matches = pattern.findall(text)
+ if matches:
+ word_phrase = ' '.join(matches)
+ num = words_to_num(word_phrase)
+ if num is not None:
+ word_numbers.append(num)
+ return digit_numbers + word_numbers
+
+
+def parse_model_answer(model_answer):
+ numbers = extract_numbers(model_answer)
+ if len(numbers) == 1:
+ return numbers[0]
+ else:
+ return None
+
+
+def cnt_aggregate_metric(results):
+ """
+ Aggregate evaluation results for the CNT task.
+ For each sample, parse the numbers in the prediction and compare them with the answer (which should be an integer),
+ calculate the score based on the error, and return the average score of all samples.
+ """
+ total_count = 0
+ total_norm_score = 0.0
+ for item in results:
+ try:
+ gt = int(item.get("answer", None))
+ except:
+ gt = None
+ if gt is None:
+ continue
+ total_count += 1
+ model_ans_str = str(item.get("prediction", "")).strip()
+ # Try to use the image_seq_len provided in the record; if not, default to 2
+ image_seq_len = item.get("image_seq_len", 2)
+ try:
+ image_seq_len = int(image_seq_len)
+ except:
+ image_seq_len = 2
+
+ parsed = parse_model_answer(model_ans_str)
+ if parsed is None:
+ norm_score = 0.0
+ else:
+ raw_diff = abs(parsed - gt)
+ if raw_diff == 0:
+ norm_score = 100.0
+ else:
+ max_error = max(gt - 1, image_seq_len - gt)
+ if max_error <= 0:
+ max_error = 1
+ relative_error = raw_diff / max_error
+ weight = L_MAX / image_seq_len
+ penalty = weight * (relative_error ** (1.0 / PENALTY_FACTOR))
+ norm_score = 100 * (1 - penalty) if penalty < 1 else 0.0
+ total_norm_score += norm_score
+ return total_norm_score / total_count if total_count > 0 else 0
+
+
+##########################################
+# 4. GRP Task Evaluation (suitable for oc-grp, pc-grp)
+##########################################
+
+
+def grp_clean_answer(answer):
+ if ")" in answer:
+ return answer.split(")")[0].strip()
+ return answer.strip()
+
+
+def grp_count_options(answer):
+ return len(re.findall(r'\([A-Z]\)', answer))
+
+
+def grp_aggregate_accuracy(results):
+ """
+ Aggregate evaluation results for the GRP task (MCQ).
+ For each sample, if multiple options appear in the prediction, it is considered incorrect; otherwise, compare the cleaned answer letters.
+ Return the accuracy.
+ """ # noqa: E501
+ total = 0
+ correct = 0
+ for item in results:
+ total += 1
+ model_ans = item.get("prediction", "")
+ gt_ans = item.get("answer", "")
+ if grp_count_options(model_ans) > 1:
+ continue
+ if grp_clean_answer(model_ans) == grp_clean_answer(gt_ans):
+ correct += 1
+ return (correct / total * 100) if total > 0 else 0
diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/utils/vmcbench.py b/VLMEvalKit-sudoku/vlmeval/dataset/utils/vmcbench.py
new file mode 100644
index 0000000000000000000000000000000000000000..74f6ec304a7756f82c0204f16b50148e30e642ba
--- /dev/null
+++ b/VLMEvalKit-sudoku/vlmeval/dataset/utils/vmcbench.py
@@ -0,0 +1,96 @@
+import pandas as pd
+import numpy as np
+import random
+
+
+def parse_multi_choice_response(response, all_choices, index2ans):
+ """
+ Parse the prediction from the generated response.
+ Return the predicted index e.g., A, B, C, D.
+ """
+ response = str(response)
+ for char in [',', '.', '!', '?', ';', ':', "'"]:
+ response = response.strip(char)
+ response = " " + response + " " # add space to avoid partial match
+
+ index_ans = True
+ ans_with_brack = False
+ candidates = []
+ for choice in all_choices: # e.g., (A) (B) (C) (D)
+ if f'({choice})' in response or f'{choice}. ' in response:
+ candidates.append(choice)
+ ans_with_brack = True
+
+ if len(candidates) == 0:
+ for choice in all_choices: # e.g., A B C D
+ if f' {choice} ' in response:
+ candidates.append(choice)
+
+ # if all above doesn't get candidates, check if the content is larger than 5 tokens and try to parse the example
+ if len(candidates) == 0 and len(response.split()) > 5:
+ for index, ans in index2ans.items():
+ if ans.lower() in response.lower():
+ candidates.append(index)
+ index_ans = False # it's content ans.
+
+ if len(candidates) == 0: # still not get answer, randomly choose one.
+ pred_index = random.choice(all_choices)
+ elif len(candidates) > 1:
+ start_indexes = []
+ if index_ans:
+ if ans_with_brack:
+ for can in candidates:
+ index = response.rfind(f'({can})')
+ start_indexes.append(index) # -1 will be ignored anyway
+ # start_indexes = [generated_response.index(f'({can})') for can in candidates]
+ else:
+ for can in candidates:
+ index = response.rfind(f" {can} ")
+ start_indexes.append(index)
+ else:
+ for can in candidates:
+ index = response.lower().rfind(index2ans[can].lower())
+ start_indexes.append(index)
+ # get the last one
+ pred_index = candidates[np.argmax(start_indexes)]
+ else: # if only one candidate, use it.
+ pred_index = candidates[0]
+
+ return pred_index
+
+
+def get_mc_score(row, use_parse=True):
+ if use_parse:
+ if pd.isna(row["A"]):
+ return False
+ response = row["prediction"]
+ all_choices = []
+ for i in range(9):
+ if chr(65 + i) in row and not pd.isna(row[chr(65 + i)]):
+ all_choices.append(chr(65 + i))
+ index2ans = {index: row[index] for index in all_choices}
+ pred_index = parse_multi_choice_response(response, all_choices, index2ans)
+ else:
+ pred_index = row["output"]
+ return int(pred_index == row["answer"])
+
+
+def report_vmc_acc(data):
+ general_datasets = ["SEEDBench", "MMStar", "A-OKVQA", "VizWiz", "MMVet", "VQAv2", "OKVQA"]
+ reason_datasets = ["MMMU", "MathVista", "ScienceQA", "RealWorldQA", "GQA", "MathVision"]
+ ocr_datasets = ["TextVQA", "OCRVQA"]
+ doc_datasets = ["AI2D", "ChartQA","DocVQA", "InfoVQA", "TableVQABench"]
+ results = {}
+ for category in data['category'].unique():
+ results[category] = data[data['category'] == category]['hit'].mean()
+ results = pd.DataFrame(results, index=[0])
+ results["Overall"] = data['hit'].mean()
+ results['General'] = results[general_datasets].mean(axis=1)
+ results['Reasoning'] = results[reason_datasets].mean(axis=1)
+ results['OCR'] = results[ocr_datasets].mean(axis=1)
+ results['Doc & Chart'] = results[doc_datasets].mean(axis=1)
+ for key in results:
+ results[key] = round(results[key] * 100, 2)
+ results = results[['Overall', 'General', 'Reasoning', 'OCR', 'Doc & Chart']
+ + general_datasets + reason_datasets + ocr_datasets + doc_datasets]
+ return results
diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/utils/worldsense.py b/VLMEvalKit-sudoku/vlmeval/dataset/utils/worldsense.py
new file mode 100644
index 0000000000000000000000000000000000000000..14c1bafd5a665c9b7d105d01fc117cde3d06db23
--- /dev/null
+++ b/VLMEvalKit-sudoku/vlmeval/dataset/utils/worldsense.py
@@ -0,0 +1,238 @@
+from ...smp import *
+from .multiple_choice import extract_answer_from_item
+import numpy as np
+import re
+
+FAIL_MSG = 'Failed to obtain answer via API.'
+
+DURATIONS = [
+ "<1min",
+ "1-2min",
+ "2-4min",
+ "4-6min",
+ "6-8min",
+ ">8min"
+]
+
+DOMAINS = [
+ 'Tech & Science',
+ 'Culture & Politics',
+ 'Daily Life',
+ 'Film & TV',
+ 'Performance',
+ 'Games',
+ 'Sports',
+ 'Music',
+]
+
+SUB_CATEGORIES = [
+ "Academic Lectures",
+ "Auto",
+ "Software",
+ "Physics",
+ "Climate Change",
+ "Space Missions",
+ "Chemistry",
+ "Engineering Projects",
+ "Biology",
+ "Science Explainers",
+ "Artificial Intelligence",
+ "Astronomy",
+ "Tech Reviews",
+ "Editorials",
+ "Politics",
+ "Historical Analysis",
+ "Social Commentary",
+ "Book Reviews",
+ "Cultural Explainers",
+ "Drawing Tutorials",
+ "Celebrity Interviews",
+ "Art Exhibitions",
+ "Fashion",
+ "Travel",
+ "Daily Vlogs",
+ "Cooking",
+ "Pranks",
+ "Camping",
+ "Nutrition & Health",
+ "Home Improvement",
+ "Painting & Photography",
+ "Unboxing Videos",
+ "Family Vlogs",
+ "DIY & Crafts",
+ "Skincare & Makeup",
+ "Documentaries",
+ "Film Trailers",
+ "Event Livestreams",
+ "Short Films",
+ "Documentary Profiles",
+ "Movie Reviews",
+ "World News",
+ "Talks",
+ "Parodies",
+ "Storytime",
+ "Stand-up",
+ "Sketches",
+ "FPS Game",
+ "Casual Game",
+ "Role Playing Game",
+ "Sports Game",
+ "Basketball",
+ "Racing",
+ "Football",
+ "Bowling Ball",
+ "Soccer",
+ "Motorsport",
+ "swimming",
+ "Boxing",
+ "Other Sports",
+ "Fitness",
+ "Fishing",
+ "Hiking",
+ "Covers",
+ "Music Videos",
+ "Remixes",
+ "Walkthroughs"
+]
+
+TASK_DOMAINS = [
+ 'Recognition',
+ 'Understanding',
+ 'Reasoning'
+]
+
+TASK_CATEGORIES = [
+ "Anomaly Recognition",
+ "Event Recognition",
+ "Attribute Recognition",
+ "Human Interaction",
+ "Temporal Localization",
+ "Video Emotions",
+ "Event Sorting",
+ "Hallucination",
+ "Text and Diagram Understanding",
+ "Attribute Reasoning",
+ "Causal Reasoning",
+ "Object Counting",
+ "Action Counting",
+ "Temporal Prediction",
+ "Emotion Change",
+ "Audio Counting",
+ "Scene Recognition",
+ "Human-object Interaction",
+ "Human Emotions",
+ "Object State Change",
+ "Relation Reasoning",
+ "Spatial Relation",
+ "Audio Source Localization",
+ "Audio Recognition",
+ "Object Existence Recognition",
+ "Audio Change"
+]
+
+AUDIO_CLASSES = [
+ "Speech",
+ "Event",
+ "Music",
+]
+
+
+def get_dimension_rating(data_path):
+ data = load(data_path)
+
+ duration_rating = {k: {} for k in DURATIONS}
+ for duration in DURATIONS + ['overall']:
+ duration_rating[duration] = {
+ 'overall': '',
+ 'domain': {k: [] for k in DOMAINS},
+ 'sub_category': {k: [] for k in SUB_CATEGORIES},
+ 'task_domain': {k: [] for k in TASK_DOMAINS},
+ 'task_type': {k: [] for k in TASK_CATEGORIES},
+ 'audio_class': {k: [] for k in AUDIO_CLASSES},
+ }
+
+ for i in range(len(data)):
+
+ domain = data.iloc[i]['domain']
+ sub_ctg = data.iloc[i]['sub_category']
+ task_domain_ctg = data.iloc[i]['task_domain']
+ task_ctg = data.iloc[i]['task_type']
+ audio_ctg = eval(data.iloc[i]['audio_class'])
+
+ duration = data.iloc[i]['duration']
+ score = float(data.iloc[i]['score'])
+
+ duration_rating['overall']['domain'][domain].append(score)
+ duration_rating['overall']['sub_category'][sub_ctg].append(score)
+ duration_rating['overall']['task_domain'][task_domain_ctg].append(score)
+ duration_rating['overall']['task_type'][task_ctg].append(score)
+
+ duration_rating[duration]['domain'][domain].append(score)
+ duration_rating[duration]['sub_category'][sub_ctg].append(score)
+ duration_rating[duration]['task_domain'][task_domain_ctg].append(score)
+ duration_rating[duration]['task_type'][task_ctg].append(score)
+
+ for _audio_ctg in audio_ctg:
+ duration_rating['overall']['audio_class'][_audio_ctg].append(score)
+ duration_rating[duration]['audio_class'][_audio_ctg].append(score)
+
+ for duration in ['overall'] + DURATIONS:
+
+ overall_res_dur = f'{np.mean([x for x in sum(duration_rating[duration]["domain"].values(), []) if x >= 0]):.3f}'
+ duration_rating[duration]['overall'] = overall_res_dur
+
+ for domain in DOMAINS:
+ domain_res_dur = f'{np.mean([x for x in duration_rating[duration]["domain"][domain] if x >= 0]):.3f}'
+ duration_rating[duration]['domain'][domain] = domain_res_dur
+
+ for sub_ctg in SUB_CATEGORIES:
+ sub_res_dur = f'{np.mean([x for x in duration_rating[duration]["sub_category"][sub_ctg] if x >= 0]):.3f}'
+ duration_rating[duration]['sub_category'][sub_ctg] = sub_res_dur
+
+ for task_ctg in TASK_DOMAINS:
+ task_res_dur = f'{np.mean([x for x in duration_rating[duration]["task_domain"][task_ctg] if x >= 0]):.3f}'
+ duration_rating[duration]['task_domain'][task_ctg] = task_res_dur
+
+ for task_ctg in TASK_CATEGORIES:
+ task_res_dur = f'{np.mean([x for x in duration_rating[duration]["task_type"][task_ctg] if x >= 0]):.3f}'
+ duration_rating[duration]['task_type'][task_ctg] = task_res_dur
+
+ for audio_ctg in AUDIO_CLASSES:
+ audio_res_dur = f'{np.mean([x for x in duration_rating[duration]["audio_class"][audio_ctg] if x >= 0]):.3f}'
+ duration_rating[duration]['audio_class'][audio_ctg] = audio_res_dur
+
+ return duration_rating
+
+
+def extract_option(model, input_item, dataset_name):
+ options = input_item['question'].split('\n')[1:]
+ for id, option in enumerate(options):
+ option_id = chr(ord('A') + id) + '.'
+ if option.find(option_id) >= 0:
+ input_item[chr(ord('A') + id)] = option[option.find(option_id) + len(option_id):].strip('. \n')
+ return extract_answer_from_item(model, input_item, dataset_name)['opt']
+
+
+def extract_characters_regex(s):
+ s = s.strip()
+ answer_prefixes = [
+ 'The best answer is',
+ 'The correct answer is',
+ 'The answer is',
+ 'The answer',
+ 'The best option is'
+ 'The correct option is',
+ 'Best answer:'
+ 'Best option:',
+ 'Answer:',
+ 'Option:',
+ ]
+ for answer_prefix in answer_prefixes:
+ s = s.replace(answer_prefix, '')
+
+ if len(s.split()) > 10 and not re.search('[ABCD]', s):
+ return ''
+ matches = re.search(r'[ABCD]', s)
+ if matches is None:
+ return ''
+ return matches[0]
diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/vcr.py b/VLMEvalKit-sudoku/vlmeval/dataset/vcr.py
new file mode 100644
index 0000000000000000000000000000000000000000..e63fab6fc4008a6031ccba1d3b26f26fa3dc90d1
--- /dev/null
+++ b/VLMEvalKit-sudoku/vlmeval/dataset/vcr.py
@@ -0,0 +1,335 @@
+import uuid
+from functools import partial
+from .image_base import ImageBaseDataset
+from ..smp import *
+from ..smp.file import get_intermediate_file_path
+
+
+rouge = None
+nlp_en = None
+nlp_zh = None
+nlp = None
+
+
+def initialize():
+ import evaluate
+ import spacy
+
+ global rouge, nlp_en, nlp_zh, nlp
+
+ try:
+ rouge = evaluate.load('rouge', experiment_id=str(uuid.uuid4()))
+ except Exception as e:
+ logging.critical(f'{type(e)}: {e}')
+ logging.critical('Please first `pip install rouge_score`.')
+
+ try:
+ nlp_en = spacy.load('en_core_web_sm')
+ except Exception as e:
+ logging.warning(f'{type(e)}: {e}')
+ logging.warning('Will automatically download en_core_web_sm via spacy.')
+ spacy.cli.download('en_core_web_sm')
+ nlp_en = spacy.load('en_core_web_sm')
+
+ try:
+ nlp_zh = spacy.load('zh_core_web_sm')
+ except Exception as e:
+ logging.warning(f'{type(e)}: {e}')
+ logging.warning('Will automatically download zh_core_web_sm via spacy.')
+ spacy.cli.download('zh_core_web_sm')
+ nlp_zh = spacy.load('zh_core_web_sm')
+
+ nlp = {'en': nlp_en, 'zh': nlp_zh}
+
+
+def rough_filter(answer_text):
+ if "I can't" in answer_text:
+ return False
+ elif 'I cannot' in answer_text:
+ return False
+ elif 'sorry' in answer_text.lower():
+ return False
+ if '无法' in answer_text:
+ return False
+ elif '抱歉' in answer_text:
+ return False
+ else:
+ return True
+
+
+def zero_template(crossed_text):
+ return {
+ 'crossed_text': crossed_text,
+ 'max_sim_val': 0,
+ 'max_sim_string': '',
+ 'precision': 0,
+ 'recall': 0,
+ 'f1': 0,
+ 'jaccard': 0,
+ 'rouge1': 0,
+ 'exact_match': 0,
+ }
+
+
+def tokenize(text, language):
+ """
+ Tokenize the text and return the tokens.
+
+ Parameters:
+ text (str): The text to tokenize.
+ language (str): The language of the text.
+
+ Returns:
+ list: The list of tokens.
+ """
+ assert language in ['en', 'zh']
+ nlp_language = nlp[language]
+ processed_text = nlp_language(text)
+ return [token.text for token in processed_text]
+
+
+def find_best_match(needle, hay, language, rouge):
+ """
+ Finds the best matching n-gram in the haystack for the given needle.
+
+ Parameters:
+ needle (str): The string to find.
+ hay (str): The text to search within.
+
+ Returns:
+ tuple: The highest similarity value and the best matching string.
+ """
+ assert language in ['en', 'zh']
+ from nltk.util import ngrams
+ from difflib import SequenceMatcher as SM
+
+ tokens_hay = tokenize(hay, language)
+ tokens_needle = tokenize(needle, language)
+
+ splitter = '' if language == 'zh' else ' '
+ ngrams_ = ngrams(tokens_hay, len(tokens_needle))
+ max_sim_val = 0
+ max_sim_string = ''
+ max_sim_ngram = []
+ tokens_needle_set = set(tokens_needle)
+ ngrams_hasjoint = [
+ ngram
+ for ngram in ngrams_
+ if not set(ngram).isdisjoint(tokens_needle_set)
+ ]
+
+ for ngram in ngrams_hasjoint:
+ hay_ngram = splitter.join(ngram)
+ similarity = SM(None, hay_ngram, needle).ratio()
+ if similarity > max_sim_val:
+ max_sim_val = similarity
+ max_sim_string = hay_ngram
+ max_sim_ngram = ngram
+
+ # Evaluate
+ if len(max_sim_ngram) == 0:
+ return {
+ 'crossed_text': needle,
+ 'max_sim_val': 0,
+ 'max_sim_string': '',
+ 'precision': 0,
+ 'recall': 0,
+ 'f1': 0,
+ 'jaccard': 0,
+ 'rouge1': 0,
+ 'exact_match': 0,
+ }
+ pred_set = set(max_sim_ngram)
+ ref_set = set(tokens_needle)
+ correct_tokens = pred_set.intersection(ref_set)
+ len_correct_tokens = len(correct_tokens)
+
+ precision = len_correct_tokens / len(pred_set)
+ recall = len_correct_tokens / len(ref_set)
+ if (precision + recall) == 0:
+ f1 = 0
+ else:
+ f1 = 2 * precision * recall / (precision + recall)
+ union = pred_set.union(ref_set)
+ jaccard = len_correct_tokens / len(union) if len(union) > 0 else 0
+ rouge_1 = rouge.compute(
+ predictions=[max_sim_string],
+ references=[needle],
+ tokenizer=partial(tokenize, language=language),
+ rouge_types=['rouge1'],
+ )['rouge1']
+ exact_match = float(list(max_sim_ngram) == list(tokens_needle))
+ out = {
+ 'crossed_text': needle,
+ 'max_sim_string': max_sim_string,
+ 'max_sim_val': max_sim_val,
+ 'precision': precision,
+ 'recall': recall,
+ 'f1': f1,
+ 'jaccard': jaccard,
+ 'rouge1': rouge_1,
+ 'exact_match': exact_match,
+ }
+ return out
+
+
+def process_match_single_new(
+ image_id, prediction, answer, language, progress
+):
+ """
+ process the inference results for a single image and calculate the metrics
+
+ Parameters:
+ image_id (int): The image id (question id).
+ prediction (str): The prediction text.
+ answer (Union[str, List[str]]): The answer text, or a list of answer texts. The masked n-grams in the image.
+ language (str): The language of the text. Can be "en" or "zh".
+ rouge (rouge): The rouge metric object.
+ progress (multiprocessing.Queue): The progress queue.
+
+ Returns:
+ tuple: The image id (question_id, int) and the result per id (dict of dict of dict).
+ """
+ result_per_id = {image_id: {}}
+ if isinstance(answer, str):
+ answer = eval(answer)
+ assert isinstance(answer, list)
+ result = prediction.split('Assistant: ')[-1]
+ for i, crossed_text in enumerate(answer):
+ if rough_filter(result):
+ find_best_match_result = find_best_match(
+ crossed_text, result, language, rouge
+ )
+ if i == 0:
+ result_per_id[image_id] = {str(i): find_best_match_result}
+ else:
+ result_per_id[image_id][str(i)] = find_best_match_result
+ else:
+ if i == 0:
+ result_per_id[image_id] = {str(i): zero_template(crossed_text)}
+ else:
+ result_per_id[image_id][str(i)] = zero_template(crossed_text)
+ progress.put(1)
+ return image_id, result_per_id
+
+
+class VCRDataset(ImageBaseDataset):
+ TYPE = 'VQA'
+
+ URL_PREFIX = 'https://huggingface.co/datasets/vcr-org'
+
+ DATASET_URL = {
+ 'VCR_EN_EASY_500': f'{URL_PREFIX}/VCR-wiki-en-easy-test-500/resolve/main/VCR-wiki-en-easy-test-500.tsv',
+ 'VCR_EN_EASY_100': f'{URL_PREFIX}/VCR-wiki-en-easy-test-100/resolve/main/VCR-wiki-en-easy-test-100.tsv',
+ 'VCR_EN_EASY_ALL': f'{URL_PREFIX}/VCR-wiki-en-easy-test/resolve/main/VCR-wiki-en-easy-test.tsv',
+ 'VCR_EN_HARD_500': f'{URL_PREFIX}/VCR-wiki-en-hard-test-500/resolve/main/VCR-wiki-en-hard-test-500.tsv',
+ 'VCR_EN_HARD_100': f'{URL_PREFIX}/VCR-wiki-en-hard-test-100/resolve/main/VCR-wiki-en-hard-test-100.tsv',
+ 'VCR_EN_HARD_ALL': f'{URL_PREFIX}/VCR-wiki-en-hard-test/resolve/main/VCR-wiki-en-hard-test.tsv',
+ 'VCR_ZH_EASY_500': f'{URL_PREFIX}/VCR-wiki-zh-easy-test-500/resolve/main/VCR-wiki-zh-easy-test-500.tsv',
+ 'VCR_ZH_EASY_100': f'{URL_PREFIX}/VCR-wiki-zh-easy-test-100/resolve/main/VCR-wiki-zh-easy-test-100.tsv',
+ 'VCR_ZH_EASY_ALL': f'{URL_PREFIX}/VCR-wiki-zh-easy-test/resolve/main/VCR-wiki-zh-easy-test.tsv',
+ 'VCR_ZH_HARD_500': f'{URL_PREFIX}/VCR-wiki-zh-hard-test-500/resolve/main/VCR-wiki-zh-hard-test-500.tsv',
+ 'VCR_ZH_HARD_100': f'{URL_PREFIX}/VCR-wiki-zh-hard-test-100/resolve/main/VCR-wiki-zh-hard-test-100.tsv',
+ 'VCR_ZH_HARD_ALL': f'{URL_PREFIX}/VCR-wiki-zh-hard-test/resolve/main/VCR-wiki-zh-hard-test.tsv',
+ }
+
+ DATASET_MD5 = {
+ 'VCR_EN_EASY_500': 'fd9258db52f8685dc710619a0ea0a261',
+ 'VCR_EN_EASY_100': '9df5d7266683458621ecbe122beb72f0',
+ 'VCR_EN_EASY_ALL': '8a9b96885f251d1c85f42f84073327f1',
+ 'VCR_EN_HARD_500': '0a22a85080b6a1f52b1f95e302d43df4',
+ 'VCR_EN_HARD_100': '1b20f5cbcbeae0b0bec77f7a36143958',
+ 'VCR_EN_HARD_ALL': '2d8b8b1ee0eba0e0b618fd3aa7d9710e',
+ 'VCR_ZH_EASY_500': 'beca5fd54176adf44cf94bd9b50cf048',
+ 'VCR_ZH_EASY_100': '4a86a5678a79844d6d22ab0629c51cd5',
+ 'VCR_ZH_EASY_ALL': '5050fe7f0027ad2068fd4c7f220edaea',
+ 'VCR_ZH_HARD_500': '617e3360f75c54455625cb0a8da5c1e7',
+ 'VCR_ZH_HARD_100': 'b0e38c85f5d5e63894a3b881c372a62b',
+ 'VCR_ZH_HARD_ALL': '54bbfef448206518b03127ef8b61404c',
+ }
+
+ def __init__(self, dataset='VCR_EN_EASY_500', skip_noimg=True):
+ super().__init__(dataset, skip_noimg)
+
+ initialize()
+ self.language = 'en' if 'EN' in dataset else 'zh'
+ self.difficulty = 'easy' if 'EASY' in dataset else 'hard'
+
+ # def build_prompt(self, line):
+ # msgs = super().build_prompt(line)
+ # assert msgs[-1]['type'] == 'text'
+ # if self.language == 'zh':
+ # msgs[-1]['value'] += '图像中被覆盖的文本是什么?请在不输出解释的情况下还原被覆盖的文本。'
+ # else:
+ # msgs[-1]['value'] += ('What is the covered texts in the image? '
+ # 'Please restore the covered texts without outputting the explanations.')
+ # return msgs
+
+ def evaluate(self, eval_file, **judge_kwargs):
+ import multiprocessing
+
+ vcr_score_list = {'Exact_Match': [], 'Jaccard': []}
+ vcr_score = {'Exact_Match': 0, 'Jaccard': 0}
+ logger = get_logger('Evaluation')
+ data = load(eval_file)
+
+ lt = len(data)
+ lines = [data.iloc[i] for i in range(lt)]
+
+ pool = multiprocessing.Pool()
+ manager = multiprocessing.Manager()
+ progress_queue = manager.Queue()
+ results = []
+
+ overall_results = {str(image_id): {} for image_id in range(len(lines))}
+
+ for instance_id, instance in enumerate(lines):
+ results.append(
+ pool.apply_async(
+ process_match_single_new,
+ args=(
+ str(instance_id),
+ instance['prediction'],
+ instance['answer'],
+ self.language,
+ progress_queue,
+ ),
+ )
+ )
+ pool.close()
+
+ # Display progress bar
+ for _ in tqdm(range(len(results))):
+ progress_queue.get()
+
+ pool.join()
+
+ # Merging results into overall_result
+ for result in results:
+ image_id, result_per_id = result.get()
+ overall_results[str(image_id)].update(result_per_id[image_id])
+ for blank_id_str in result_per_id[image_id].keys():
+ vcr_score_list['Exact_Match'].append(
+ result_per_id[image_id][blank_id_str]['exact_match']
+ )
+ vcr_score_list['Jaccard'].append(
+ result_per_id[image_id][blank_id_str]['jaccard']
+ )
+ vcr_score['Exact_Match'] = np.mean(vcr_score_list['Exact_Match'])
+ vcr_score['Jaccard'] = np.mean(vcr_score_list['Jaccard'])
+ results_out = {
+ k: v for i in range(len(results)) for k, v in results[i].get()[1].items()
+ }
+ results_with_metrics = {
+ 'Exact_Match': vcr_score['Exact_Match'],
+ 'Jaccard': vcr_score['Jaccard'],
+ 'Predictions': results_out,
+ }
+ score_pth = get_intermediate_file_path(eval_file, f'_{self.language}_{self.difficulty}_score', 'json')
+ dump(results_with_metrics, score_pth)
+ logger.info(
+ f'VCR successfully finished evaluating {eval_file}, results saved in {score_pth}'
+ )
+ logger.info('Score: ')
+ for key, value in vcr_score.items():
+ logger.info('{}:{}'.format(key, value))
diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/video_mmlu.py b/VLMEvalKit-sudoku/vlmeval/dataset/video_mmlu.py
new file mode 100644
index 0000000000000000000000000000000000000000..977cbff9eeef759bba791c1a17ab9cfc751a2a70
--- /dev/null
+++ b/VLMEvalKit-sudoku/vlmeval/dataset/video_mmlu.py
@@ -0,0 +1,614 @@
+# flake8: noqa
+from huggingface_hub import snapshot_download
+from ..smp import *
+from ..smp.file import get_intermediate_file_path, get_file_extension
+from .video_base import VideoBaseDataset
+from .utils import build_judge, DEBUG_MESSAGE
+from ..utils import track_progress_rich
+import random
+import json
+import ast
+from glob import glob
+from tqdm import tqdm
+
+FAIL_MSG = 'Failed to obtain answer via API.'
+
+
+detailed_caption_prompts = [
+ "The images are given containing equally spaced video frames. Please imagine the video based on the sequence of frames, and provide a faithfully detailed description of this video in more than three sentences.",
+ "You are given a sequence of equally spaced video frames. Based on these frames, imagine the full video and provide a detailed description of what is happening in more than three sentences.",
+ "The following set contains equally spaced video frames. Imagine the video from which these frames were taken and describe it in detail in at least three sentences.",
+ "Below are equally spaced frames from a video. Use these frames to visualize the entire video and provide a detailed description in more than three sentences.",
+ "A sequence of equally spaced video frames is presented. Please imagine the full video and write a faithfully detailed description of the events in more than three sentences.",
+ "The images provided include equally spaced frames from a video. Based on these frames, imagine the video and describe it comprehensively in at least three sentences.",
+ "You are given equally spaced frames from a video. Use these frames to envision the entire video and provide a detailed description of the events in more than three sentences.",
+ "The sequence includes equally spaced frames from a video. Imagine the full video based on these frames and provide a detailed description in more than three sentences.",
+ "The provided images contain equally spaced frames from a video. Visualize the video from these frames and describe it in detail in more than three sentences.",
+ "Here are equally spaced frames from a video. Based on these frames, imagine the video and provide a detailed, faithful description of it in more than three sentences.",
+ "The set of images includes equally spaced video frames. Please imagine the video these frames come from and describe it comprehensively in at least three sentences.",
+ "Describe the video based on these frames in a few sentences.",
+ "What is happening in the video shown in these frames?",
+ "Explain the video using these frames.",
+ "Imagine the video from these frames and describe it in detail in a few sentences.",
+ "Based on these frames, provide a narrative of the video in more than three sentences.",
+ "Describe the events in the video shown by these frames in at least three sentences.",
+ "Visualize the video from these frames and explain what is happening in more than three sentences.",
+ "Describe the sequence of events in the video depicted by these frames in a detailed manner.",
+ "Given these equally spaced frames, imagine the entire video and provide a detailed description of the events, including the setting, characters, and actions, in more than three sentences.",
+ "Visualize the video based on these frames and write a comprehensive description of what happens, describing the beginning, middle, and end in at least three sentences.",
+ "Using these frames as a reference, imagine the full video and provide a thorough description of the plot, including key details and actions, in more than three sentences.",
+ "Based on the sequence of these frames, describe the entire video in detail, mentioning important aspects such as the context, movements, and transitions in more than three sentences.",
+ "Imagine the video that corresponds to these frames and provide an elaborate description, covering the storyline, visual elements, and any notable features in at least three sentences."
+]
+
+
+class Video_MMLU_CAP(VideoBaseDataset):
+
+ MD5 = ''
+
+ TYPE = 'Video-VQA'
+ MODALITY = 'VIDEO'
+
+ def __init__(self, dataset='Video_MMLU_CAP', pack=False, nframe=0, fps=-1, subset='all', limit=1.0):
+ super().__init__(dataset=dataset, pack=pack, nframe=nframe, fps=fps)
+
+ if subset == 'all':
+ pass
+ elif subset == 'Math':
+ self.data = self.data[self.data['discipline'] == 'Math']
+ elif subset == 'physics':
+ self.data = self.data[self.data['discipline'] == 'physics']
+ elif subset == 'chemistry':
+ self.data = self.data[self.data['discipline'] == 'chemistry']
+ else:
+ raise ValueError(f'Invalid subset: {subset}')
+
+ if limit <= 1.0 and limit > 0:
+ sample_num = int(limit * len(self.data))
+ self.data = self.data.iloc[:sample_num]
+ elif limit > 1.0 and limit < len(self.data):
+ self.data = self.data.iloc[:limit]
+ else:
+ raise ValueError(f'Invalid limit: {limit}')
+
+
+ @classmethod
+ def supported_datasets(cls):
+ return ['Video_MMLU_CAP']
+
+ def prepare_dataset(self, dataset_name='Video_MMLU_CAP', repo_id='Enxin/Video-MMLU'):
+ def check_integrity(pth):
+ data_file = osp.join(pth, f'{dataset_name}.tsv')
+ if md5(data_file) != self.MD5:
+ return False
+ data = load(data_file)
+ for video_pth in data['video']:
+ if not osp.exists(osp.join(pth, 'youtube_videos', video_pth)):
+ return False
+ return True
+
+ def untar_video_data(archive_file, cache_dir):
+ import tarfile
+ with tarfile.open(archive_file, "r") as tar_ref:
+ tar_ref.extractall(cache_dir)
+ print(f"Extracted all files from {archive_file} to {cache_dir}")
+
+ def unzip_video_data(archive_file, cache_dir):
+ import zipfile
+ with zipfile.ZipFile(archive_file, 'r') as zip_ref:
+ zip_ref.extractall(cache_dir)
+ print(f"Extracted all files from {archive_file} to {cache_dir}")
+
+ def concat_archive_parts(parts, output_file):
+ with open(output_file, "wb") as out_file:
+ from tqdm import tqdm
+ for part in tqdm(sorted(parts)):
+ with open(part, "rb") as part_file:
+ out_file.write(part_file.read())
+ print(f"Concatenated parts {parts} into {output_file}")
+
+ if os.path.exists(repo_id):
+ dataset_path = repo_id
+ else:
+ cache_path = get_cache_path(repo_id)
+ if cache_path is not None and check_integrity(cache_path):
+ dataset_path = cache_path
+ else:
+ cache_path = snapshot_download(repo_id=repo_id, repo_type="dataset")
+ if not glob(osp.join(cache_path, "youtube_videos")):
+ # 查找所有的压缩文件
+ tar_files = glob(osp.join(cache_path, "**/*.tar*"), recursive=True)
+ zip_files = glob(osp.join(cache_path, "**/*.zip*"), recursive=True)
+
+ parts_dict = {}
+ # 分组处理tar文件
+ for f in tar_files:
+ base_name = f.split(".tar")[0]
+ if base_name not in parts_dict:
+ parts_dict[base_name] = {'type': 'tar', 'parts': []}
+ parts_dict[base_name]['parts'].append(f)
+
+
+ for f in zip_files:
+ base_name = f.split(".zip")[0]
+ if base_name not in parts_dict:
+ parts_dict[base_name] = {'type': 'zip', 'parts': []}
+ parts_dict[base_name]['parts'].append(f)
+
+
+ for base_name, info in parts_dict.items():
+ print(f"Processing archive: {base_name}")
+ archive_type = info['type']
+ parts = info['parts']
+
+
+ output_file = base_name + (".tar" if archive_type == 'tar' else ".zip")
+
+
+ if len(parts) > 1 and not osp.exists(output_file):
+ print('Start concatenating archive parts')
+ concat_archive_parts(parts, output_file)
+ elif len(parts) == 1:
+ output_file = parts[0]
+
+
+ if not osp.exists(osp.join(cache_path, osp.basename(base_name))):
+ if archive_type == 'tar':
+ untar_video_data(output_file, cache_path)
+ else:
+ unzip_video_data(output_file, cache_path)
+
+ dataset_path = cache_path
+
+ self.video_path = osp.join(dataset_path, 'youtube_videos/')
+ data_file = osp.join(dataset_path, f'{dataset_name}.tsv')
+
+ return dict(data_file=data_file, root=osp.join(dataset_path, 'youtube_videos'))
+
+ def build_prompt_pack(self, line):
+ if isinstance(line, int):
+ assert line < len(self)
+ video = self.videos[line]
+ elif isinstance(line, pd.Series):
+ video = line['video']
+ elif isinstance(line, str):
+ video = line
+
+ frames = self.save_video_frames(video)
+ message = []
+ for im in frames:
+ message.append(dict(type='image', value=im))
+
+ prompt = random.choice(detailed_caption_prompts)
+ message.append(dict(type='text', value=prompt, role='user'))
+ return message
+
+ def build_prompt_nopack(self, line, video_llm):
+ """Build prompt for a single line without packing"""
+ if isinstance(line, int):
+ assert line < len(self)
+ line = self.data.iloc[line]
+
+ prompt = random.choice(detailed_caption_prompts)
+
+ if video_llm:
+ video_path = os.path.join(self.video_path, line['video'])
+ return [
+ dict(type='video', value=video_path),
+ dict(type='text', value=prompt)
+ ]
+ else:
+ frames = self.save_video_frames(os.path.splitext(line['video'])[0])
+ message = []
+ for im in frames:
+ message.append(dict(type='image', value=im))
+ message.append(dict(type='text', value=prompt))
+ return message
+
+ def build_prompt(self, line, video_llm):
+ if self.pack and not video_llm:
+ return self.build_prompt_pack(line)
+ else:
+ return self.build_prompt_nopack(line, video_llm)
+
+ @staticmethod
+ def remove_side_quote(s, syms=[',', '"', "'"]):
+ if np.all([x in syms for x in s]):
+ return ''
+ while s[0] in syms:
+ s = s[1:]
+ while s[-1] in syms:
+ s = s[:-1]
+ return s
+
+ @staticmethod
+ def robust_json_load(s):
+ try:
+ jsons = list(extract_json_objects(s))
+ assert len(jsons) == 1
+ return jsons[0]
+ except:
+ if '{' in s and s.find('{') == s.rfind('{'):
+ sub_str = s[s.find('{') + 1:].strip()
+ lines = sub_str.split('\n')
+ res = {}
+ for l in lines:
+ l = l.strip()
+ if ': ' in l:
+ key = Video_MMLU_CAP.remove_side_quote(l.split(': ')[0].strip())
+ val = Video_MMLU_CAP.remove_side_quote(l.split(': ')[1].strip())
+ if len(key) and len(val):
+ res[key] = val
+ return res
+ return None
+
+ def load_pack_answers(self, data_raw):
+ vstats = defaultdict(lambda: 0)
+ data = defaultdict(lambda: {})
+
+ for k in data_raw:
+ ans = data_raw[k].strip()
+ if FAIL_MSG in ans:
+ vstats['GEN_FAIL'] += 1
+ continue
+ res = self.robust_json_load(ans)
+ if res is not None:
+ data[k] = res
+ vstats['PARSE_OK'] += 1
+ else:
+ vstats['PARSE_FAIL'] += 1
+
+ # return data
+ meta = cp.deepcopy(self.data)
+ lt = len(meta)
+ prediction = []
+ for i in range(lt):
+ line = meta.iloc[i]
+ vid = line['video']
+ idx = str(line['index'])
+ prediction.append(data[vid][idx] if idx in data[vid] else None)
+ meta['prediction'] = prediction
+ vstats['VALIDQ'] = len([x for x in prediction if x is not None])
+ vstats['INVALIDQ'] = len([x for x in prediction if x is None])
+ return meta, vstats
+
+ # It returns a dictionary
+ @classmethod
+ def evaluate(self, eval_file, **judge_kwargs):
+ from .utils.video_mmlu import get_dimension_rating, prepare_response_prompt, prepare_score_prompt, SYSTEM_CAL_SCORE_PROMPT_CAP, SYSTEM_GENER_PRED_PROMPT
+
+ assert get_file_extension(eval_file) in ['xlsx', 'json', 'tsv'], 'data file should be an supported format (xlsx/json/tsv) file'
+ judge = judge_kwargs['model']
+ nproc = judge_kwargs.pop('nproc', 4)
+ _ = judge_kwargs.pop('verbose', None)
+ _ = judge_kwargs.pop('retry', None)
+
+ response_file = get_intermediate_file_path(eval_file, f'_{judge}_response', 'pkl')
+ tmp_file = get_intermediate_file_path(eval_file, f'_{judge}_tmp', 'pkl')
+ tgt_file = get_intermediate_file_path(eval_file, f'_{judge}_rating', 'json')
+ score_file = get_intermediate_file_path(eval_file, f'_{judge}_score')
+
+ judge_kwargs['temperature'] = 0.0
+ model = build_judge(**judge_kwargs)
+
+ if not osp.exists(score_file):
+ res = {} if not osp.exists(tmp_file) else load(tmp_file)
+ res = {k: v for k, v in res.items() if FAIL_MSG not in v}
+
+ data = load(eval_file)
+
+ expanded_data = []
+ for idx, row in data.iterrows():
+ try:
+ questions = ast.literal_eval(row['question']) if isinstance(row['question'], str) else row['question']
+ for q_dict in questions:
+ new_row = row.copy()
+ new_row['question'] = q_dict['question']
+ new_row['answer'] = q_dict['answer']
+ expanded_data.append(new_row)
+ except Exception as e:
+ print(f"Error parsing questions for row {idx}")
+ print(f"Error message: {str(e)}")
+ continue
+
+ expanded_df = pd.DataFrame(expanded_data).reset_index(drop=True)
+
+ data_un = expanded_df[~expanded_df['index'].isin(res)]
+ data_un = data_un[~pd.isna(data_un['prediction'])]
+ lt = len(data_un)
+
+ response_prompts = [prepare_response_prompt(data_un.iloc[i]) for i in range(lt)]
+ indices = [data_un.iloc[i]['index'] for i in range(lt)]
+
+ model.system_prompt = SYSTEM_GENER_PRED_PROMPT
+ if len(response_prompts):
+ print(f"Processing {len(response_prompts)} valid prompts out of {lt} total items")
+ _ = track_progress_rich(
+ model.generate,
+ response_prompts,
+ keys=indices,
+ save=response_file,
+ nproc=nproc,
+ chunksize=nproc
+ )
+
+ pred_map = load(response_file)
+ data_un['pred_response'] = [pred_map[idx] for idx in data_un['index']]
+ score_prompts = [prepare_score_prompt(data_un.iloc[i]) for i in range(lt)]
+ model.system_prompt = SYSTEM_CAL_SCORE_PROMPT_CAP
+ if len(score_prompts):
+ _ = track_progress_rich(
+ model.generate,
+ score_prompts,
+ keys=indices,
+ save=tmp_file,
+ nproc=nproc,
+ chunksize=nproc
+ )
+
+ score_map = load(tmp_file)
+ data['score'] = [score_map[idx] for idx in data['index']]
+
+ dump(data, score_file)
+
+ rating = get_dimension_rating(score_file)
+ dump(rating, tgt_file)
+ return rating
+
+
+class Video_MMLU_QA(VideoBaseDataset):
+
+ MD5 = ''
+
+ TYPE = 'Video-VQA'
+ MODALITY = 'VIDEO'
+
+ def __init__(self, dataset='Video_MMLU_QA', pack=False, nframe=0, fps=-1, subset='all', limit=1.0):
+ super().__init__(dataset=dataset, pack=pack, nframe=nframe, fps=fps)
+
+ if subset == 'all':
+ pass
+ elif subset == 'Math':
+ self.data = self.data[self.data['discipline'] == 'Math']
+ elif subset == 'physics':
+ self.data = self.data[self.data['discipline'] == 'physics']
+ elif subset == 'chemistry':
+ self.data = self.data[self.data['discipline'] == 'chemistry']
+ else:
+ raise ValueError(f'Invalid subset: {subset}')
+
+ if limit <= 1.0 and limit > 0:
+ sample_num = int(limit * len(self.data))
+ self.data = self.data.iloc[:sample_num]
+ elif limit > 1.0 and limit < len(self.data):
+ self.data = self.data.iloc[:limit]
+ else:
+ raise ValueError(f'Invalid limit: {limit}')
+
+
+ @classmethod
+ def supported_datasets(cls):
+ return ['Video_MMLU_QA']
+
+ def prepare_dataset(self, dataset_name='Video_MMLU_QA', repo_id='Enxin/Video-MMLU'):
+ def check_integrity(pth):
+ data_file = osp.join(pth, f'{dataset_name}.tsv')
+ if md5(data_file) != self.MD5:
+ return False
+ data = load(data_file)
+ for video_pth in data['video']:
+ if not osp.exists(osp.join(pth, 'youtube_videos', video_pth)):
+ return False
+ return True
+
+ if os.path.exists(repo_id):
+ dataset_path = repo_id
+ else:
+ cache_path = get_cache_path(repo_id)
+ if cache_path is not None and check_integrity(cache_path):
+ dataset_path = cache_path
+ else:
+ cache_path = snapshot_download(repo_id=repo_id, repo_type="dataset")
+ if not glob(osp.join(cache_path, "youtube_videos")):
+ tar_files = glob(osp.join(cache_path, "**/*.tar*"), recursive=True)
+
+ def untar_video_data(tar_file, cache_dir):
+ import tarfile
+ with tarfile.open(tar_file, "r") as tar_ref:
+ tar_ref.extractall(cache_dir)
+ print(f"Extracted all files from {tar_file} to {cache_dir}")
+
+ def concat_tar_parts(tar_parts, output_tar):
+ with open(output_tar, "wb") as out_tar:
+ from tqdm import tqdm
+ for part in tqdm(sorted(tar_parts)):
+ with open(part, "rb") as part_file:
+ out_tar.write(part_file.read())
+ print(f"Concatenated parts {tar_parts} into {output_tar}")
+
+ tar_parts_dict = {}
+
+ # Group tar parts together
+ for tar_file in tar_files:
+ base_name = tar_file.split(".tar")[0]
+ if base_name not in tar_parts_dict:
+ tar_parts_dict[base_name] = []
+ tar_parts_dict[base_name].append(tar_file)
+
+ # Concatenate and untar split parts
+ for base_name, parts in tar_parts_dict.items():
+ print(f"Extracting following tar files: {parts}")
+ output_tar = base_name + ".tar"
+ if not osp.exists(output_tar):
+ print('Start concatenating tar files')
+
+ concat_tar_parts(parts, output_tar)
+ print('Finish concatenating tar files')
+
+ if not osp.exists(osp.join(cache_path, osp.basename(base_name))):
+ untar_video_data(output_tar, cache_path)
+ dataset_path = cache_path
+ self.video_path = osp.join(dataset_path, 'youtube_videos/')
+ data_file = osp.join(dataset_path, f'{dataset_name}.tsv')
+
+ return dict(data_file=data_file, root=osp.join(dataset_path, 'youtube_videos'))
+
+ def build_prompt_pack(self, line):
+ if isinstance(line, int):
+ assert line < len(self)
+ video = self.videos[line]
+ elif isinstance(line, pd.Series):
+ video = line['video']
+ elif isinstance(line, str):
+ video = line
+
+ frames = self.save_video_frames(video)
+ message = []
+ for im in frames:
+ message.append(dict(type='image', value=im))
+
+ prompt = line['question']+ '\nAnswer briefly and directly in one sentence.'
+ message.append(dict(type='text', value=prompt, role='user'))
+ return message
+
+ def build_prompt_nopack(self, line, video_llm):
+ """Build prompt for a single line without packing"""
+ if isinstance(line, int):
+ assert line < len(self)
+ line = self.data.iloc[line]
+
+ prompt = line['question'] + '\nAnswer briefly and directly in one sentence.'
+
+ if video_llm:
+ video_path = os.path.join(self.video_path, line['video'])
+ return [
+ dict(type='video', value=video_path),
+ dict(type='text', value=prompt)
+ ]
+ else:
+ frames = self.save_video_frames(os.path.splitext(line['video'])[0])
+ message = []
+ for im in frames:
+ message.append(dict(type='image', value=im))
+ message.append(dict(type='text', value=prompt))
+ return message
+
+ def build_prompt(self, line, video_llm):
+ if self.pack and not video_llm:
+ return self.build_prompt_pack(line)
+ else:
+ return self.build_prompt_nopack(line, video_llm)
+
+ @staticmethod
+ def remove_side_quote(s, syms=[',', '"', "'"]):
+ if np.all([x in syms for x in s]):
+ return ''
+ while s[0] in syms:
+ s = s[1:]
+ while s[-1] in syms:
+ s = s[:-1]
+ return s
+
+ @staticmethod
+ def robust_json_load(s):
+ try:
+ jsons = list(extract_json_objects(s))
+ assert len(jsons) == 1
+ return jsons[0]
+ except:
+ if '{' in s and s.find('{') == s.rfind('{'):
+ sub_str = s[s.find('{') + 1:].strip()
+ lines = sub_str.split('\n')
+ res = {}
+ for l in lines:
+ l = l.strip()
+ if ': ' in l:
+ key = Video_MMLU_QA.remove_side_quote(l.split(': ')[0].strip())
+ val = Video_MMLU_QA.remove_side_quote(l.split(': ')[1].strip())
+ if len(key) and len(val):
+ res[key] = val
+ return res
+ return None
+
+ def load_pack_answers(self, data_raw):
+ vstats = defaultdict(lambda: 0)
+ data = defaultdict(lambda: {})
+
+ for k in data_raw:
+ ans = data_raw[k].strip()
+ if FAIL_MSG in ans:
+ vstats['GEN_FAIL'] += 1
+ continue
+ res = self.robust_json_load(ans)
+ if res is not None:
+ data[k] = res
+ vstats['PARSE_OK'] += 1
+ else:
+ vstats['PARSE_FAIL'] += 1
+
+ # return data
+ meta = cp.deepcopy(self.data)
+ lt = len(meta)
+ prediction = []
+ for i in range(lt):
+ line = meta.iloc[i]
+ vid = line['video']
+ idx = str(line['index'])
+ prediction.append(data[vid][idx] if idx in data[vid] else None)
+ meta['prediction'] = prediction
+ vstats['VALIDQ'] = len([x for x in prediction if x is not None])
+ vstats['INVALIDQ'] = len([x for x in prediction if x is None])
+ return meta, vstats
+
+ # It returns a dictionary
+ @classmethod
+ def evaluate(self, eval_file, **judge_kwargs):
+ from .utils.video_mmlu import get_dimension_rating, prepare_score_prompt, SYSTEM_CAL_SCORE_PROMPT_QA
+
+ assert get_file_extension(eval_file) in ['xlsx', 'json', 'tsv'], 'data file should be an supported format (xlsx/json/tsv) file'
+ judge = judge_kwargs['model']
+ nproc = judge_kwargs.pop('nproc', 4)
+ _ = judge_kwargs.pop('verbose', None)
+ _ = judge_kwargs.pop('retry', None)
+
+ tmp_file = get_intermediate_file_path(eval_file, f'_{judge}_tmp', 'pkl')
+ tgt_file = get_intermediate_file_path(eval_file, f'_{judge}_rating', 'json')
+ score_file = get_intermediate_file_path(eval_file, f'_{judge}_score')
+
+ judge_kwargs['temperature'] = 0.0
+ model = build_judge(**judge_kwargs)
+
+ if not osp.exists(score_file):
+ res = {} if not osp.exists(tmp_file) else load(tmp_file)
+ res = {k: v for k, v in res.items() if model.fail_msg not in v}
+
+ data = load(eval_file)
+ data_un = data[~data['index'].isin(res)]
+ data_un = data_un[~pd.isna(data_un['prediction'])]
+ lt = len(data_un)
+ prompts = [prepare_score_prompt(data_un.iloc[i]) for i in range(lt)]
+ indices = [data_un.iloc[i]['index'] for i in range(lt)]
+ model.system_prompt = SYSTEM_CAL_SCORE_PROMPT_QA
+ if len(prompts):
+ _ = track_progress_rich(
+ model.generate,
+ prompts,
+ keys=indices,
+ save=tmp_file,
+ nproc=nproc,
+ chunksize=nproc
+ )
+ score_map = load(tmp_file)
+ data['score'] = [score_map[idx] if idx in score_map else -1 for idx in data['index']]
+ rejected = [x for x in score_map.values() if FAIL_MSG in x]
+ print(
+ f'Among {len(data)} questions, failed to obtain prediction for {len(data) - len(score_map)} questions, '
+ f'failed to obtain the score for another {len(rejected)} questions. '
+ f'Those questions will be counted as 0 score in ALL rating, and will not be counted in VALID rating.'
+ )
+
+ dump(data, score_file)
+
+ rating = get_dimension_rating(score_file)
+ dump(rating, tgt_file)
+ return rating
diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/vlm2bench.py b/VLMEvalKit-sudoku/vlmeval/dataset/vlm2bench.py
new file mode 100644
index 0000000000000000000000000000000000000000..bbe86d55407921d739fe0bf20881e296117d5e2a
--- /dev/null
+++ b/VLMEvalKit-sudoku/vlmeval/dataset/vlm2bench.py
@@ -0,0 +1,114 @@
+# flake8: noqa
+import os
+import pandas as pd
+from .image_base import ImageBaseDataset
+from .utils.vlm2bench import (
+ common_process_results,
+ tf_pair_aggregate_accuracy,
+ cnt_aggregate_metric,
+ grp_aggregate_accuracy,
+)
+from ..smp import *
+from ..smp.file import get_intermediate_file_path
+
+
+class VLM2Bench(ImageBaseDataset):
+ TYPE = "VQA"
+
+ DATASET_URL = {
+ "VLM2Bench": 'https://huggingface.co/datasets/Sterzhang/vlm2-bench/resolve/main/VLM2Bench_img.tsv' # all 2860 image cases from VLM2Bench huggingface repo
+ }
+ # DATASET_MD5
+ DATASET_MD5 = {'VLM2Bench': '16f474bfc4e269c583468bf89139da8f'}
+
+ def build_prompt(self, line):
+ """
+ Build multimodal input:
+ - If the record does not have "image_path", generate the image_path list based on the "image" field (stored as a regular list of image encodings),
+ and update the "image" field to contain a list of multiple image paths.
+ - Call dump_image to process the image and image_path fields to obtain all local paths of the images.
+ - Construct the text prompt in the format "Question: {question}".
+ - Encapsulate all image paths as image messages and append the text message, returning the final multimodal message list.
+ """
+ if isinstance(line, int):
+ line = self.data.iloc[line]
+
+ # If there is no image_path, generate the image_path list based on the image field
+ if "image_path" not in line:
+ img_field = line.get("image")
+ # Assume the image field is already a regular list of image encodings, not a JSON-encoded string
+ image_paths = [f"{line['index']}_{i}.jpg" for i in range(len(img_field))]
+ line["image_path"] = image_paths
+ # Also update the image field to the list of image encodings
+ line["image"] = img_field
+
+ # Call dump_image (implemented in the parent class) to process the image and image_path fields, returning the list of local image paths
+ img_paths = self.dump_image(line)
+ if not isinstance(img_paths, list):
+ img_paths = [img_paths]
+
+ # Construct the text prompt (only containing the question)
+ prompt = f"Question: {line['question']}\n"
+
+ # Encapsulate all image paths as image messages and append the text message
+ msgs = [{"type": "image", "value": p} for p in img_paths]
+ msgs.append({"type": "text", "value": prompt})
+ return msgs
+
+ @classmethod
+ def evaluate(cls, eval_file, **judge_kwargs):
+ """
+ Evaluation function:
+ - Automatically read the model prediction result file (xlsx or TSV), which contains fields: index, question, answer, category, prediction
+ - Directly use the original fields for evaluation without additional conversion;
+ - For categories "oc-cnt" or "pc-cnt", calculate image_seq_len based on the "image" field (stored as a regular multi-image encoding)
+ and write it into each record;
+ - Group by category and use different evaluation functions to calculate metrics for each sub-task:
+ • tf pair: suitable for gc-mat, gc-trk, oc-cpr, pc-cpr
+ • cnt: suitable for oc-cnt, pc-cnt
+ • grp: suitable for oc-grp, pc-grp
+ - Write the scores of each sub-task to a CSV file and return a DataFrame.
+ """
+ model = judge_kwargs.get("model")
+ if model:
+ storage = get_intermediate_file_path(eval_file, f'_{model}')
+ score_file = get_intermediate_file_path(eval_file, f'_{model}_score', 'csv')
+ tmp_file = get_intermediate_file_path(eval_file, f'_{model}', 'pkl')
+ if os.path.exists(storage):
+ data = load(storage)
+ else:
+ data = load(eval_file)
+ else:
+ data = load(eval_file)
+
+ results = data.to_dict(orient="records")
+ processed = common_process_results(results)
+
+ # For cnt category, calculate image_seq_len (i.e., number of images) based on the list of image encodings stored in the image field
+ for rec in processed:
+ if rec.get("category", "").lower() in ["oc-cnt", "pc-cnt"]:
+ try:
+ rec["image_seq_len"] = len(rec["image"])
+ except Exception as e:
+ rec["image_seq_len"] = 2
+
+ eval_scores = {}
+ for cat in sorted(set([r["category"] for r in processed])):
+ sub_results = [r for r in processed if r["category"] == cat]
+ if cat in ["gc-mat", "gc-trk", "oc-cpr", "pc-cpr"]:
+ score = tf_pair_aggregate_accuracy(sub_results)
+ elif cat in ["oc-cnt", "pc-cnt"]:
+ score = cnt_aggregate_metric(sub_results)
+ elif cat in ["oc-grp", "pc-grp"]:
+ score = grp_aggregate_accuracy(sub_results)
+ else:
+ score = None
+ eval_scores[cat] = score
+
+ score_df = pd.DataFrame({k: [v] for k, v in eval_scores.items()})
+ if model:
+ final_score_file = score_file
+ else:
+ final_score_file = get_intermediate_file_path(eval_file, "_score", "csv")
+ dump(score_df, final_score_file)
+ return score_df
diff --git a/VLMEvalKit-sudoku/vlmeval/smp/__pycache__/__init__.cpython-310.pyc b/VLMEvalKit-sudoku/vlmeval/smp/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..82950902e27ccb1e844cf2c0a871226b7103801b
Binary files /dev/null and b/VLMEvalKit-sudoku/vlmeval/smp/__pycache__/__init__.cpython-310.pyc differ
diff --git a/VLMEvalKit-sudoku/vlmeval/smp/__pycache__/file.cpython-310.pyc b/VLMEvalKit-sudoku/vlmeval/smp/__pycache__/file.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..331e8f6f5a97e360e6894b83c013edd9e2658ae8
Binary files /dev/null and b/VLMEvalKit-sudoku/vlmeval/smp/__pycache__/file.cpython-310.pyc differ
diff --git a/VLMEvalKit-sudoku/vlmeval/smp/__pycache__/log.cpython-310.pyc b/VLMEvalKit-sudoku/vlmeval/smp/__pycache__/log.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d507f83a64f58c49315df22a040b4c566b0aa242
Binary files /dev/null and b/VLMEvalKit-sudoku/vlmeval/smp/__pycache__/log.cpython-310.pyc differ
diff --git a/VLMEvalKit-sudoku/vlmeval/smp/__pycache__/vlm.cpython-310.pyc b/VLMEvalKit-sudoku/vlmeval/smp/__pycache__/vlm.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c408bfd70b10839e725624c43df1b1ac7dbe61a9
Binary files /dev/null and b/VLMEvalKit-sudoku/vlmeval/smp/__pycache__/vlm.cpython-310.pyc differ
diff --git a/VLMEvalKit-sudoku/vlmeval/utils/__pycache__/__init__.cpython-310.pyc b/VLMEvalKit-sudoku/vlmeval/utils/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1c0c3a7f6776e8428facf8584ed3d27f4b6098a3
Binary files /dev/null and b/VLMEvalKit-sudoku/vlmeval/utils/__pycache__/__init__.cpython-310.pyc differ
diff --git a/VLMEvalKit-sudoku/vlmeval/utils/__pycache__/mp_util.cpython-310.pyc b/VLMEvalKit-sudoku/vlmeval/utils/__pycache__/mp_util.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..835123577bf8358709b52096ee2206e205d3fec5
Binary files /dev/null and b/VLMEvalKit-sudoku/vlmeval/utils/__pycache__/mp_util.cpython-310.pyc differ
diff --git a/VLMEvalKit-sudoku/vlmeval/utils/__pycache__/result_transfer.cpython-310.pyc b/VLMEvalKit-sudoku/vlmeval/utils/__pycache__/result_transfer.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d3fe1407759ba2a5f9ea019807722aab78971d0c
Binary files /dev/null and b/VLMEvalKit-sudoku/vlmeval/utils/__pycache__/result_transfer.cpython-310.pyc differ
diff --git a/VLMEvalKit-sudoku/vlmeval/vlm/__pycache__/base.cpython-310.pyc b/VLMEvalKit-sudoku/vlmeval/vlm/__pycache__/base.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..cacf99db8dec9f39222b2a641928a0fb98005cd2
Binary files /dev/null and b/VLMEvalKit-sudoku/vlmeval/vlm/__pycache__/base.cpython-310.pyc differ
diff --git a/VLMEvalKit-sudoku/vlmeval/vlm/__pycache__/bunnyllama3.cpython-310.pyc b/VLMEvalKit-sudoku/vlmeval/vlm/__pycache__/bunnyllama3.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..93a8b4d4c02d42cf3a8379ba355cc13acb300a76
Binary files /dev/null and b/VLMEvalKit-sudoku/vlmeval/vlm/__pycache__/bunnyllama3.cpython-310.pyc differ
diff --git a/VLMEvalKit-sudoku/vlmeval/vlm/__pycache__/cambrian.cpython-310.pyc b/VLMEvalKit-sudoku/vlmeval/vlm/__pycache__/cambrian.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..eb0975904eaf5539a354f135c96fbd8ae6767563
Binary files /dev/null and b/VLMEvalKit-sudoku/vlmeval/vlm/__pycache__/cambrian.cpython-310.pyc differ
diff --git a/VLMEvalKit-sudoku/vlmeval/vlm/__pycache__/chameleon.cpython-310.pyc b/VLMEvalKit-sudoku/vlmeval/vlm/__pycache__/chameleon.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..6de235d9fc3d2b1c10e567fb45b824e13e46e783
Binary files /dev/null and b/VLMEvalKit-sudoku/vlmeval/vlm/__pycache__/chameleon.cpython-310.pyc differ
diff --git a/VLMEvalKit-sudoku/vlmeval/vlm/__pycache__/cogvlm.cpython-310.pyc b/VLMEvalKit-sudoku/vlmeval/vlm/__pycache__/cogvlm.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..121b55713727f10c3dc2a442b3b0635286b73fa0
Binary files /dev/null and b/VLMEvalKit-sudoku/vlmeval/vlm/__pycache__/cogvlm.cpython-310.pyc differ
diff --git a/VLMEvalKit-sudoku/vlmeval/vlm/__pycache__/emu.cpython-310.pyc b/VLMEvalKit-sudoku/vlmeval/vlm/__pycache__/emu.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e5c2474fa31beb2f6fee744241f6cbd725a7c150
Binary files /dev/null and b/VLMEvalKit-sudoku/vlmeval/vlm/__pycache__/emu.cpython-310.pyc differ
diff --git a/VLMEvalKit-sudoku/vlmeval/vlm/__pycache__/llama_vision.cpython-310.pyc b/VLMEvalKit-sudoku/vlmeval/vlm/__pycache__/llama_vision.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f61ed41f71b20c654891dcfee264dbd9848a460e
Binary files /dev/null and b/VLMEvalKit-sudoku/vlmeval/vlm/__pycache__/llama_vision.cpython-310.pyc differ
diff --git a/VLMEvalKit-sudoku/vlmeval/vlm/__pycache__/logics.cpython-310.pyc b/VLMEvalKit-sudoku/vlmeval/vlm/__pycache__/logics.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ebe3e7837ea05a8eac3cbba8629f58a10df1d410
Binary files /dev/null and b/VLMEvalKit-sudoku/vlmeval/vlm/__pycache__/logics.cpython-310.pyc differ
diff --git a/VLMEvalKit-sudoku/vlmeval/vlm/__pycache__/mgm.cpython-310.pyc b/VLMEvalKit-sudoku/vlmeval/vlm/__pycache__/mgm.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d39993d4a7b7cdf354f24aca52bae5117496607f
Binary files /dev/null and b/VLMEvalKit-sudoku/vlmeval/vlm/__pycache__/mgm.cpython-310.pyc differ
diff --git a/VLMEvalKit-sudoku/vlmeval/vlm/__pycache__/mplug_owl2.cpython-310.pyc b/VLMEvalKit-sudoku/vlmeval/vlm/__pycache__/mplug_owl2.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2e7a10bea8471d8a7c151f187239067476be1181
Binary files /dev/null and b/VLMEvalKit-sudoku/vlmeval/vlm/__pycache__/mplug_owl2.cpython-310.pyc differ
diff --git a/VLMEvalKit-sudoku/vlmeval/vlm/__pycache__/mplug_owl3.cpython-310.pyc b/VLMEvalKit-sudoku/vlmeval/vlm/__pycache__/mplug_owl3.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..4b85686a2af2f38fb52d61a46862dc7a7a34aa46
Binary files /dev/null and b/VLMEvalKit-sudoku/vlmeval/vlm/__pycache__/mplug_owl3.cpython-310.pyc differ
diff --git a/VLMEvalKit-sudoku/vlmeval/vlm/__pycache__/nvlm.cpython-310.pyc b/VLMEvalKit-sudoku/vlmeval/vlm/__pycache__/nvlm.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d175e14f6f5b391fbcf4b3ada4612f7e1dd1fa93
Binary files /dev/null and b/VLMEvalKit-sudoku/vlmeval/vlm/__pycache__/nvlm.cpython-310.pyc differ
diff --git a/VLMEvalKit-sudoku/vlmeval/vlm/__pycache__/oryx.cpython-310.pyc b/VLMEvalKit-sudoku/vlmeval/vlm/__pycache__/oryx.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..847c60b22a782d5eb64800480e5a552ed0da65b8
Binary files /dev/null and b/VLMEvalKit-sudoku/vlmeval/vlm/__pycache__/oryx.cpython-310.pyc differ
diff --git a/VLMEvalKit-sudoku/vlmeval/vlm/__pycache__/ristretto.cpython-310.pyc b/VLMEvalKit-sudoku/vlmeval/vlm/__pycache__/ristretto.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..62af7e10a85d653a314c892782cf21842aa16941
Binary files /dev/null and b/VLMEvalKit-sudoku/vlmeval/vlm/__pycache__/ristretto.cpython-310.pyc differ
diff --git a/VLMEvalKit-sudoku/vlmeval/vlm/__pycache__/treevgr.cpython-310.pyc b/VLMEvalKit-sudoku/vlmeval/vlm/__pycache__/treevgr.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..0a8b13560bc8b043123fc117fbca87c6e9ec3b5c
Binary files /dev/null and b/VLMEvalKit-sudoku/vlmeval/vlm/__pycache__/treevgr.cpython-310.pyc differ
diff --git a/VLMEvalKit-sudoku/vlmeval/vlm/__pycache__/vila.cpython-310.pyc b/VLMEvalKit-sudoku/vlmeval/vlm/__pycache__/vila.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..8e377006bf235b0fd33cb9d3b7f9b1f142ca4e2c
Binary files /dev/null and b/VLMEvalKit-sudoku/vlmeval/vlm/__pycache__/vila.cpython-310.pyc differ
diff --git a/VLMEvalKit-sudoku/vlmeval/vlm/__pycache__/vita.cpython-310.pyc b/VLMEvalKit-sudoku/vlmeval/vlm/__pycache__/vita.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1c73a04d69969f50c2d2c300b7651a1b1a3ef460
Binary files /dev/null and b/VLMEvalKit-sudoku/vlmeval/vlm/__pycache__/vita.cpython-310.pyc differ
diff --git a/VLMEvalKit-sudoku/vlmeval/vlm/__pycache__/vlm_r1.cpython-310.pyc b/VLMEvalKit-sudoku/vlmeval/vlm/__pycache__/vlm_r1.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f827bd5b540a6e49f928b2d9d9dc4d8a6b72ad08
Binary files /dev/null and b/VLMEvalKit-sudoku/vlmeval/vlm/__pycache__/vlm_r1.cpython-310.pyc differ
diff --git a/VLMEvalKit-sudoku/vlmeval/vlm/__pycache__/vxverse.cpython-310.pyc b/VLMEvalKit-sudoku/vlmeval/vlm/__pycache__/vxverse.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c8d876108d659fa6487efd3ed45d8f41ca28ee9d
Binary files /dev/null and b/VLMEvalKit-sudoku/vlmeval/vlm/__pycache__/vxverse.cpython-310.pyc differ
diff --git a/VLMEvalKit-sudoku/vlmeval/vlm/__pycache__/xgen_mm.cpython-310.pyc b/VLMEvalKit-sudoku/vlmeval/vlm/__pycache__/xgen_mm.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..508e513ee8e312d9a1a31a9cde7b805a762b5a7b
Binary files /dev/null and b/VLMEvalKit-sudoku/vlmeval/vlm/__pycache__/xgen_mm.cpython-310.pyc differ