ZzzHelloWorld commited on
Commit
7aef0de
·
verified ·
1 Parent(s): fabf9c5

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. Shapegrid/ShapeGrid_dis.parquet +3 -0
  2. Shapegrid/ShapeGrid_loc.parquet +3 -0
  3. VLMEvalKit-sudoku/.github/scripts/assert_score.py +61 -0
  4. VLMEvalKit-sudoku/.github/workflows/lint.yml +23 -0
  5. VLMEvalKit-sudoku/.github/workflows/pr-run-test.yml +70 -0
  6. VLMEvalKit-sudoku/llava/serve/__init__.py +0 -0
  7. VLMEvalKit-sudoku/llava/serve/examples/extreme_ironing.jpg +3 -0
  8. VLMEvalKit-sudoku/llava/train/train_dpo.py +1782 -0
  9. VLMEvalKit-sudoku/requirements/docs.txt +11 -0
  10. VLMEvalKit-sudoku/vlmeval/api/__pycache__/claude.cpython-310.pyc +0 -0
  11. VLMEvalKit-sudoku/vlmeval/api/__pycache__/gemini.cpython-310.pyc +0 -0
  12. VLMEvalKit-sudoku/vlmeval/api/__pycache__/glm_vision.cpython-310.pyc +0 -0
  13. VLMEvalKit-sudoku/vlmeval/api/__pycache__/hf_chat_model.cpython-310.pyc +0 -0
  14. VLMEvalKit-sudoku/vlmeval/api/__pycache__/hunyuan.cpython-310.pyc +0 -0
  15. VLMEvalKit-sudoku/vlmeval/api/__pycache__/lmdeploy.cpython-310.pyc +0 -0
  16. VLMEvalKit-sudoku/vlmeval/api/__pycache__/qwen_vl_api.cpython-310.pyc +0 -0
  17. VLMEvalKit-sudoku/vlmeval/dataset/__pycache__/megabench.cpython-310.pyc +0 -0
  18. VLMEvalKit-sudoku/vlmeval/dataset/__pycache__/sfebench.cpython-310.pyc +0 -0
  19. VLMEvalKit-sudoku/vlmeval/dataset/cmmmu.py +354 -0
  20. VLMEvalKit-sudoku/vlmeval/dataset/creation.py +741 -0
  21. VLMEvalKit-sudoku/vlmeval/dataset/dude.py +211 -0
  22. VLMEvalKit-sudoku/vlmeval/dataset/image_mcq.py +0 -0
  23. VLMEvalKit-sudoku/vlmeval/dataset/m4bench.py +193 -0
  24. VLMEvalKit-sudoku/vlmeval/dataset/miabench.py +166 -0
  25. VLMEvalKit-sudoku/vlmeval/dataset/mmbench_video.py +257 -0
  26. VLMEvalKit-sudoku/vlmeval/dataset/mmifeval.py +483 -0
  27. VLMEvalKit-sudoku/vlmeval/dataset/qbench_video.py +354 -0
  28. VLMEvalKit-sudoku/vlmeval/dataset/text_base.py +88 -0
  29. VLMEvalKit-sudoku/vlmeval/dataset/utils/__pycache__/mlvu.cpython-310.pyc +0 -0
  30. VLMEvalKit-sudoku/vlmeval/dataset/utils/__pycache__/multiple_choice.cpython-310.pyc +0 -0
  31. VLMEvalKit-sudoku/vlmeval/dataset/utils/__pycache__/omni_verifier.cpython-310.pyc +0 -0
  32. VLMEvalKit-sudoku/vlmeval/dataset/utils/__pycache__/shortqa.cpython-310.pyc +0 -0
  33. VLMEvalKit-sudoku/vlmeval/dataset/utils/__pycache__/spatial457.cpython-310.pyc +0 -0
  34. VLMEvalKit-sudoku/vlmeval/dataset/utils/__pycache__/tamperbench.cpython-310.pyc +0 -0
  35. VLMEvalKit-sudoku/vlmeval/dataset/utils/__pycache__/tempcompass.cpython-310.pyc +0 -0
  36. VLMEvalKit-sudoku/vlmeval/dataset/utils/__pycache__/yorn.cpython-310.pyc +0 -0
  37. VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/README.md +51 -0
  38. VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/__init__.py +5 -0
  39. VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/__pycache__/evaluator.cpython-310.pyc +0 -0
  40. VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/__pycache__/response_parse_type.cpython-310.pyc +0 -0
  41. VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/aggregation/min_agg.py +14 -0
  42. VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/aggregation_type.py +25 -0
  43. VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/evaluator.py +399 -0
  44. VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/metric_type.py +259 -0
  45. VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/parsing/answer_str_parse.py +137 -0
  46. VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/parsing/dummy_parse.py +6 -0
  47. VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/parsing/json_parse.py +17 -0
  48. VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/requirements.txt +15 -0
  49. VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/response_parse_type.py +54 -0
  50. VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/scoring/nli_entailment.py +20 -0
Shapegrid/ShapeGrid_dis.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c16d47667c9c5d2a97d70370610e54733861ec9043f2bb8aa6107c927de2367d
3
+ size 102012404
Shapegrid/ShapeGrid_loc.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8549444eaf072e051373241edca5e00a5d141c012c9a33fee6f353c3e203abc4
3
+ size 66166188
VLMEvalKit-sudoku/.github/scripts/assert_score.py ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import ast
3
+ import json
4
+ import os
5
+
6
+ import pandas as pd
7
+
8
+
9
+ def validate_scores(dataset_list, assert_score, model_name):
10
+ for dataset in dataset_list:
11
+ base_score = assert_score[dataset][model_name]
12
+ if dataset == "OCRBench_MINI":
13
+ score_file = os.path.join("outputs", f"{model_name}/{model_name}_{dataset}_score.json")
14
+ cur_score = 0
15
+ with open(score_file, "r") as f:
16
+ total_score = json.load(f)
17
+ cur_score = total_score["Final Score Norm"]
18
+ assert (
19
+ abs(cur_score - float(base_score)) <= 0.01
20
+ ), f"{dataset} on {model_name}: cur_score is {cur_score}, base_score is {base_score}"
21
+ else:
22
+ score_file = os.path.join("outputs", f"{model_name}/{model_name}_{dataset}_acc.csv")
23
+ df = pd.read_csv(score_file)
24
+ cur_score = df["Overall"].iloc[0]
25
+ if dataset == "MMBench_V11_MINI":
26
+ cur_score = df.loc[df["split"] == "dev", "Overall"].values
27
+ assert (
28
+ abs(cur_score - float(base_score)) <= 0.01
29
+ ), f"{dataset} on {model_name}: cur_score is {cur_score}, base_score is {base_score}"
30
+ print(f"cur_score is {cur_score}, base_score is {base_score}")
31
+
32
+
33
+ def parse_arguments():
34
+ parser = argparse.ArgumentParser(description="Validate model scores against csv/json data")
35
+
36
+ parser.add_argument("--dataset", type=str, required=True, help="Space-separated list of datasets")
37
+
38
+ parser.add_argument(
39
+ "--base_score", type=str, required=True, help="Dictionary string in format {dataset:{model:score}}"
40
+ )
41
+
42
+ parser.add_argument("--model-name", type=str, required=True, help="Name of the model to validate")
43
+
44
+ return parser.parse_args()
45
+
46
+
47
+ def main():
48
+ args = parse_arguments()
49
+
50
+ try:
51
+ dataset_list = args.dataset.split()
52
+ base_score = ast.literal_eval(args.base_score)
53
+ except Exception as e:
54
+ print(f"Parameter parsing error: {str(e)}")
55
+ return
56
+
57
+ validate_scores(dataset_list, base_score, args.model_name)
58
+
59
+
60
+ if __name__ == "__main__":
61
+ main()
VLMEvalKit-sudoku/.github/workflows/lint.yml ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: lint
2
+
3
+ on: [push, pull_request]
4
+
5
+ concurrency:
6
+ group: ${{ github.workflow }}-${{ github.ref }}
7
+ cancel-in-progress: true
8
+
9
+ jobs:
10
+ lint:
11
+ runs-on: ubuntu-latest
12
+ steps:
13
+ - uses: actions/checkout@v2
14
+ - name: Set up Python 3.10
15
+ uses: actions/setup-python@v2
16
+ with:
17
+ python-version: 3.10.15
18
+ - name: Install pre-commit hook
19
+ run: |
20
+ pip install pre-commit
21
+ pre-commit install
22
+ - name: Linting
23
+ run: pre-commit run --all-files
VLMEvalKit-sudoku/.github/workflows/pr-run-test.yml ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: pr_run_test
2
+
3
+ on:
4
+ pull_request:
5
+ branches:
6
+ - "main"
7
+ paths-ignore:
8
+ - "docs/**"
9
+ - "**.md"
10
+ workflow_dispatch:
11
+ schedule:
12
+ - cron: '56 01 * * *'
13
+
14
+ concurrency:
15
+ group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
16
+ cancel-in-progress: true
17
+
18
+ env:
19
+ BASE_SCORE: '{"MMBench_V11_MINI":{"Qwen2-VL-7B-Instruct":0.8727272727272727,"InternVL2_5-8B":0.89090909,"llava_onevision_qwen2_7b_si":0.8363636363636363},"MMStar_MINI":{"Qwen2-VL-7B-Instruct":0.6266666666666667,"InternVL2_5-8B":0.6333333333333333,"llava_onevision_qwen2_7b_si":0.49333333333333335},"AI2D_MINI":{"Qwen2-VL-7B-Instruct":0.7975708502024291,"InternVL2_5-8B":0.854251012145749,"llava_onevision_qwen2_7b_si":0.8178137651821862},"OCRBench_MINI":{"Qwen2-VL-7B-Instruct":16.6,"InternVL2_5-8B":16.7,"llava_onevision_qwen2_7b_si":13.0}}'
20
+ HF_HUB_CACHE: /fs-computility/llm/shared/llmeval/models/opencompass_hf_hub
21
+ HF_HUB_OFFLINE: 1
22
+ CONDA_PATH: /fs-computility/llm/qa-llm-cicd/miniconda3
23
+ CONDA_ENV: vlm_pr_test
24
+
25
+ jobs:
26
+ vlm_test:
27
+ if: ${{!cancelled()}}
28
+ runs-on: [volc_cu12_mllm]
29
+ strategy:
30
+ fail-fast: false
31
+ matrix:
32
+ model: [Qwen/Qwen2-VL-7B-Instruct,OpenGVLab/InternVL2_5-8B,lmms-lab/llava-onevision-qwen2-7b-si]
33
+ dataset: ["MMBench_V11_MINI MMStar_MINI AI2D_MINI","OCRBench_MINI"]
34
+ steps:
35
+ - name: clone_repo
36
+ uses: actions/checkout@v3
37
+ - name: evaluation_model
38
+ uses: nick-fields/retry@v3
39
+ with:
40
+ max_attempts: 3
41
+ timeout_minutes: 30
42
+ command: |
43
+ . ${{env.CONDA_PATH}}/bin/activate
44
+ conda activate ${{env.CONDA_ENV}}
45
+ pip uninstall vlmeval -y
46
+ pip install -e .
47
+ pre_model=$(echo ${{matrix.model}} | awk -F'/' '{print $1}')
48
+ if [ "${{matrix.model}}" = "lmms-lab/llava-onevision-qwen2-7b-si" ];then
49
+ model_name="llava_onevision_qwen2_7b_si"
50
+ else
51
+ model_name=$(echo ${{matrix.model}} | awk -F'/' '{print $2}')
52
+ fi
53
+ pip list
54
+ nvidia-smi
55
+ LOG=$(python run.py --data ${{matrix.dataset}} --model $model_name 2>&1)
56
+ echo "$LOG"
57
+ if echo "$LOG" | grep -q "CUDA out of memory"; then
58
+ sleep 300
59
+ exit 1 # retry becuase of oom
60
+ fi
61
+ - name: assert_result
62
+ run: |
63
+ . ${{env.CONDA_PATH}}/bin/activate
64
+ conda activate ${{env.CONDA_ENV}}
65
+ if [ "${{matrix.model}}" = "lmms-lab/llava-onevision-qwen2-7b-si" ];then
66
+ model_name="llava_onevision_qwen2_7b_si"
67
+ else
68
+ model_name=$(echo ${{matrix.model}} | awk -F'/' '{print $2}')
69
+ fi
70
+ python .github/scripts/assert_score.py --dataset "${{matrix.dataset}}" --base_score $BASE_SCORE --model-name $model_name
VLMEvalKit-sudoku/llava/serve/__init__.py ADDED
File without changes
VLMEvalKit-sudoku/llava/serve/examples/extreme_ironing.jpg ADDED

Git LFS Details

  • SHA256: a54caa21bc513ed25c8ca7f5747555c05dfd4e33f6a3cf5c08b3d9138a4da1d9
  • Pointer size: 130 Bytes
  • Size of remote file: 62.6 kB
VLMEvalKit-sudoku/llava/train/train_dpo.py ADDED
@@ -0,0 +1,1782 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Adopted from https://github.com/lm-sys/FastChat. Below is the original copyright:
2
+ # Adopted from tatsu-lab@stanford_alpaca. Below is the original copyright:
3
+ # Copyright 2023 Rohan Taori, Ishaan Gulrajani, Tianyi Zhang, Yann Dubois, Xuechen Li
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ import os
18
+ import copy
19
+ import deepspeed
20
+ from dataclasses import dataclass, field
21
+ import json
22
+ import logging
23
+ import pathlib
24
+ from typing import Dict, Optional, Sequence, List
25
+ import ast
26
+
27
+ import yaml
28
+ import time
29
+ import random
30
+ import yaml
31
+ import math
32
+ import re
33
+ import torch
34
+
35
+ import transformers
36
+ import tokenizers
37
+
38
+ from llava.constants import IGNORE_INDEX, DEFAULT_IMAGE_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN, IMAGE_TOKEN_INDEX
39
+ from torch.utils.data import Dataset
40
+ from llava.train.llava_trainer import LLaVADPOTrainer
41
+ from data_processing.utils import load_jsonl, load_json
42
+ from llava import conversation as conversation_lib
43
+ from llava.model import *
44
+ from llava.model.language_model.llava_qwen import LlavaQwenConfig
45
+ from llava.model.language_model.llava_llama import LlavaConfig
46
+ from llava.model.language_model.llava_mistral import LlavaMistralConfig
47
+ from llava.mm_utils import process_highres_image, process_anyres_image, process_highres_image_crop_split, tokenizer_image_token
48
+ from llava.utils import rank0_print
49
+ from transformers import AutoConfig
50
+ import pickle
51
+
52
+ from trl.trainer.utils import DPODataCollatorWithPadding
53
+ from PIL import Image, ImageFile
54
+ from decord import VideoReader, cpu
55
+
56
+ ImageFile.LOAD_TRUNCATED_IMAGES = True
57
+ from packaging import version
58
+ from typing import Any
59
+
60
+ local_rank = None
61
+ import numpy as np
62
+
63
+ IS_TOKENIZER_GREATER_THAN_0_14 = version.parse(tokenizers.__version__) >= version.parse("0.14")
64
+
65
+
66
+ @dataclass
67
+ class ModelArguments:
68
+ model_name_or_path: Optional[str] = field(default="facebook/opt-125m")
69
+ model_class_name: Optional[str] = field(default=None, metadata={"help": "Used to init model class, format is XXXXForCausalLM. e.g. currently XXXX is chosen from LlavaLlama, LlavaMixtral, LlavaMistral, Llama"})
70
+
71
+ mm_tunable_parts: Optional[str] = field(
72
+ default=None, metadata={"help": 'Could be "mm_mlp_adapter", "mm_vision_resampler", "mm_vision_tower,mm_mlp_adapter,mm_language_model", "mm_vision_tower,mm_mlp_adapter,mm_language_model", "mm_mlp_adapter,mm_language_model"'}
73
+ )
74
+ # deciding which part of the multimodal model to tune, will overwrite other previous settings
75
+
76
+ version: Optional[str] = field(default="v0")
77
+ freeze_backbone: bool = field(default=False)
78
+ tune_mm_mlp_adapter: bool = field(default=False)
79
+ tune_mm_vision_resampler: bool = field(default=False)
80
+ vision_tower: Optional[str] = field(default=None)
81
+ vision_tower_pretrained: Optional[str] = field(default=None) # default to the last layer
82
+
83
+ unfreeze_mm_vision_tower: bool = field(default=False)
84
+ unfreeze_language_model: bool = field(default=False)
85
+ mm_vision_select_layer: Optional[int] = field(default=-1) # default to the last layer
86
+ pretrain_mm_mlp_adapter: Optional[str] = field(default=None)
87
+ mm_projector_type: Optional[str] = field(default="linear")
88
+ mm_use_im_start_end: bool = field(default=False)
89
+ mm_use_im_patch_token: bool = field(default=True)
90
+ mm_patch_merge_type: Optional[str] = field(default="flat")
91
+ mm_vision_select_feature: Optional[str] = field(default="patch")
92
+ mm_resampler_type: Optional[str] = field(default=None)
93
+ mm_mask_drop_mode: str = field(default="fixed")
94
+ mm_mask_drop_skip_percentage: float = field(default=0.0)
95
+ mm_mask_drop_ratio: float = field(default=0.25)
96
+ mm_mask_drop_ratio_upper: Optional[float] = field(default=None)
97
+ mm_mask_drop_ratio_lower: Optional[float] = field(default=None)
98
+ mm_spatial_pool_stride: Optional[int] = field(default=None)
99
+ mm_spatial_pool_mode: str = field(default="average")
100
+ mm_spatial_pool_out_channels: Optional[int] = field(default=None)
101
+ mm_perceiver_depth: Optional[int] = field(default=3)
102
+ mm_perceiver_latents: Optional[int] = field(default=32)
103
+ mm_perceiver_ff_mult: Optional[float] = field(default=4)
104
+ mm_perceiver_pretrained: Optional[str] = field(default=None)
105
+ mm_qformer_depth: Optional[int] = field(default=3)
106
+ mm_qformer_latents: Optional[int] = field(default=32)
107
+ mm_qformer_pretrained: Optional[str] = field(default=None)
108
+
109
+ rope_scaling_factor: Optional[float] = field(default=None)
110
+ rope_scaling_type: Optional[str] = field(default=None)
111
+
112
+ s2: Optional[bool] = field(default=False)
113
+ s2_scales: Optional[str] = field(default="336,672,1008")
114
+
115
+
116
+ @dataclass
117
+ class DataArguments:
118
+ data_path: str = field(default=None, metadata={"help": "Path to the training data, in llava's instruction.json format. Supporting multiple json files via /path/to/{a,b,c}.json"})
119
+ lazy_preprocess: bool = False
120
+ is_multimodal: bool = False
121
+ image_folder: Optional[str] = field(default=None)
122
+ video_folder: Optional[str] = field(default=None)
123
+ video_fps: Optional[int] = field(default=1)
124
+ image_aspect_ratio: str = "square"
125
+ image_grid_pinpoints: Optional[str] = field(default=None)
126
+ image_crop_resolution: int = 384
127
+ image_split_resolution: int = 384
128
+ input_prompt: Optional[str] = field(default=None)
129
+ refine_prompt: Optional[bool] = field(default=False)
130
+ frames_upbound: Optional[int] = field(default=0)
131
+ num_sample: Optional[int] = field(default=None)
132
+
133
+
134
+ @dataclass
135
+ class TrainingArguments(transformers.TrainingArguments):
136
+ cache_dir: Optional[str] = field(default=None)
137
+ optim: str = field(default="adamw_torch")
138
+ remove_unused_columns: bool = field(default=False)
139
+ freeze_mm_mlp_adapter: bool = field(default=False)
140
+ freeze_mm_vision_resampler: bool = field(default=False)
141
+ mpt_attn_impl: Optional[str] = field(default="triton")
142
+ model_max_length: int = field(
143
+ default=4096,
144
+ metadata={"help": "Maximum sequence length. Sequences will be right padded (and possibly truncated)."},
145
+ )
146
+ double_quant: bool = field(default=True, metadata={"help": "Compress the quantization statistics through double quantization."})
147
+ quant_type: str = field(default="nf4", metadata={"help": "Quantization data type to use. Should be one of `fp4` or `nf4`."})
148
+ bits: int = field(default=16, metadata={"help": "How many bits to use."})
149
+ lora_enable: bool = False
150
+ lora_r: int = 64
151
+ lora_alpha: int = 16
152
+ lora_dropout: float = 0.05
153
+ lora_weight_path: str = ""
154
+ lora_bias: str = "none"
155
+ mm_projector_lr: Optional[float] = None
156
+ mm_vision_tower_lr: Optional[float] = None
157
+ group_by_varlen: bool = field(default=False)
158
+ group_by_modality_length: bool = field(default=False)
159
+ group_by_modality_length_auto: bool = field(default=False)
160
+ auto_find_batch_size: bool = field(default=False)
161
+ gradient_checkpointing: bool = field(default=True)
162
+ verbose_logging: bool = field(default=False)
163
+ attn_implementation: str = field(default="flash_attention_2", metadata={"help": "Use transformers attention implementation."})
164
+ dpo_alpha: float = field(default=1.0)
165
+ beta: float = field(default=0.1)
166
+ gamma: float = field(default=1.0)
167
+ generate_during_eval: bool = field(default=False)
168
+ precompute_ref_log_probs: bool = field(default=False)
169
+
170
+
171
+ def maybe_zero_3(param, ignore_status=False, name=None):
172
+ from deepspeed import zero
173
+ from deepspeed.runtime.zero.partition_parameters import ZeroParamStatus
174
+
175
+ if hasattr(param, "ds_id"):
176
+ if param.ds_status == ZeroParamStatus.NOT_AVAILABLE:
177
+ if not ignore_status:
178
+ logging.warning(f"{name}: param.ds_status != ZeroParamStatus.NOT_AVAILABLE: {param.ds_status}")
179
+ with zero.GatheredParameters([param]):
180
+ param = param.data.detach().cpu().clone()
181
+ else:
182
+ param = param.detach().cpu().clone()
183
+ return param
184
+
185
+
186
+ # Borrowed from peft.utils.get_peft_model_state_dict
187
+ def get_peft_state_maybe_zero_3(named_params, bias):
188
+ if bias == "none":
189
+ to_return = {k: t for k, t in named_params if "lora_" in k}
190
+ elif bias == "all":
191
+ to_return = {k: t for k, t in named_params if "lora_" in k or "bias" in k}
192
+ elif bias == "lora_only":
193
+ to_return = {}
194
+ maybe_lora_bias = {}
195
+ lora_bias_names = set()
196
+ for k, t in named_params:
197
+ if "lora_" in k:
198
+ to_return[k] = t
199
+ bias_name = k.split("lora_")[0] + "bias"
200
+ lora_bias_names.add(bias_name)
201
+ elif "bias" in k:
202
+ maybe_lora_bias[k] = t
203
+ for k, t in maybe_lora_bias:
204
+ if bias_name in lora_bias_names:
205
+ to_return[bias_name] = t
206
+ else:
207
+ raise NotImplementedError
208
+ to_return = {k: maybe_zero_3(v, ignore_status=True) for k, v in to_return.items()}
209
+ return to_return
210
+
211
+
212
+ def get_peft_state_non_lora_maybe_zero_3(named_params, require_grad_only=True):
213
+ to_return = {k: t for k, t in named_params if "lora_" not in k}
214
+ if require_grad_only:
215
+ to_return = {k: t for k, t in to_return.items() if t.requires_grad}
216
+ to_return = {k: maybe_zero_3(v, ignore_status=True).cpu() for k, v in to_return.items()}
217
+ return to_return
218
+
219
+
220
+ def get_mm_adapter_state_maybe_zero_3(named_params, keys_to_match):
221
+ to_return = {k: t for k, t in named_params if any(key_match in k for key_match in keys_to_match)}
222
+ to_return = {k: maybe_zero_3(v, ignore_status=True).cpu() for k, v in to_return.items()}
223
+ return to_return
224
+
225
+
226
+ def find_all_linear_names(model):
227
+ cls = torch.nn.Linear
228
+ lora_module_names = set()
229
+ multimodal_keywords = ["mm_projector", "vision_tower", "vision_resampler"]
230
+ for name, module in model.named_modules():
231
+ if any(mm_keyword in name for mm_keyword in multimodal_keywords):
232
+ continue
233
+ if isinstance(module, cls):
234
+ names = name.split(".")
235
+ lora_module_names.add(names[0] if len(names) == 1 else names[-1])
236
+
237
+ if "lm_head" in lora_module_names: # needed for 16-bit
238
+ lora_module_names.remove("lm_head")
239
+ return list(lora_module_names)
240
+
241
+
242
+ def safe_save_model_for_hf_trainer(trainer: transformers.Trainer, output_dir: str):
243
+ """Collects the state dict and dump to disk."""
244
+ if hasattr(trainer.args, "tune_mm_mlp_adapter") and trainer.args.tune_mm_mlp_adapter:
245
+ check_only_save_mm_adapter_tunnable = True
246
+ # only has mm_mlp_adapter and mm_vision_resampler in the tuneable parts
247
+ elif hasattr(trainer.args, "mm_tunable_parts") and (len(trainer.args.mm_tunable_parts.split(",")) == 1 and ("mm_mlp_adapter" in trainer.args.mm_tunable_parts or "mm_vision_resampler" in trainer.args.mm_tunable_parts)):
248
+ check_only_save_mm_adapter_tunnable = True
249
+ else:
250
+ check_only_save_mm_adapter_tunnable = False
251
+
252
+ trainer.accelerator.wait_for_everyone()
253
+ torch.cuda.synchronize()
254
+ rank0_print(f"Only save projectors: {check_only_save_mm_adapter_tunnable}")
255
+ if check_only_save_mm_adapter_tunnable:
256
+ # Only save Adapter
257
+ keys_to_match = ["mm_projector", "vision_resampler"]
258
+ if getattr(trainer.args, "use_im_start_end", False):
259
+ keys_to_match.extend(["embed_tokens", "embed_in"])
260
+
261
+ weight_to_save = get_mm_adapter_state_maybe_zero_3(trainer.model.named_parameters(), keys_to_match)
262
+ trainer.model.config.save_pretrained(output_dir)
263
+
264
+ current_folder = output_dir.split("/")[-1]
265
+ parent_folder = os.path.dirname(output_dir)
266
+ if trainer.args.local_rank == 0 or trainer.args.local_rank == -1:
267
+ if current_folder.startswith("checkpoint-"):
268
+ mm_projector_folder = os.path.join(parent_folder, "mm_projector")
269
+ os.makedirs(mm_projector_folder, exist_ok=True)
270
+ torch.save(weight_to_save, os.path.join(mm_projector_folder, f"{current_folder}.bin"))
271
+ else:
272
+ torch.save(weight_to_save, os.path.join(output_dir, f"mm_projector.bin"))
273
+ return
274
+
275
+ if trainer.deepspeed:
276
+ trainer.save_model(output_dir)
277
+ return
278
+
279
+ state_dict = trainer.model.state_dict()
280
+ if trainer.args.should_save:
281
+ cpu_state_dict = {key: value.cpu() for key, value in state_dict.items()}
282
+ del state_dict
283
+ trainer._save(output_dir, state_dict=cpu_state_dict) # noqa
284
+
285
+
286
+ def smart_tokenizer_and_embedding_resize(
287
+ special_tokens_dict: Dict,
288
+ tokenizer: transformers.PreTrainedTokenizer,
289
+ model: transformers.PreTrainedModel,
290
+ ):
291
+ """Resize tokenizer and embedding.
292
+
293
+ Note: This is the unoptimized version that may make your embedding size not be divisible by 64.
294
+ """
295
+ num_new_tokens = tokenizer.add_special_tokens(special_tokens_dict)
296
+ model.resize_token_embeddings(len(tokenizer))
297
+
298
+ if num_new_tokens > 0:
299
+ input_embeddings = model.get_input_embeddings().weight.data
300
+ output_embeddings = model.get_output_embeddings().weight.data
301
+
302
+ input_embeddings_avg = input_embeddings[:-num_new_tokens].mean(dim=0, keepdim=True)
303
+ output_embeddings_avg = output_embeddings[:-num_new_tokens].mean(dim=0, keepdim=True)
304
+
305
+ input_embeddings[-num_new_tokens:] = input_embeddings_avg
306
+ output_embeddings[-num_new_tokens:] = output_embeddings_avg
307
+
308
+
309
+ def _tokenize_fn(strings: Sequence[str], tokenizer: transformers.PreTrainedTokenizer) -> Dict:
310
+ """Tokenize a list of strings."""
311
+ tokenized_list = [
312
+ tokenizer(
313
+ text,
314
+ return_tensors="pt",
315
+ padding="longest",
316
+ max_length=tokenizer.model_max_length,
317
+ truncation=True,
318
+ )
319
+ for text in strings
320
+ ]
321
+ input_ids = labels = [tokenized.input_ids[0] for tokenized in tokenized_list]
322
+ input_ids_lens = labels_lens = [tokenized.input_ids.ne(tokenizer.pad_token_id).sum().item() for tokenized in tokenized_list]
323
+ return dict(
324
+ input_ids=input_ids,
325
+ labels=labels,
326
+ input_ids_lens=input_ids_lens,
327
+ labels_lens=labels_lens,
328
+ )
329
+
330
+
331
+ def _mask_targets(target, tokenized_lens, speakers):
332
+ # cur_idx = 0
333
+ cur_idx = tokenized_lens[0]
334
+ tokenized_lens = tokenized_lens[1:]
335
+ target[:cur_idx] = IGNORE_INDEX
336
+ for tokenized_len, speaker in zip(tokenized_lens, speakers):
337
+ if speaker == "human":
338
+ target[cur_idx + 2 : cur_idx + tokenized_len] = IGNORE_INDEX
339
+ cur_idx += tokenized_len
340
+
341
+
342
+ def _add_speaker_and_signal(header, source, get_conversation=True):
343
+ """Add speaker and start/end signal on each round."""
344
+ BEGIN_SIGNAL = "### "
345
+ END_SIGNAL = "\n"
346
+ conversation = header
347
+ for sentence in source:
348
+ from_str = sentence["from"]
349
+ if from_str.lower() == "human":
350
+ from_str = conversation_lib.default_conversation.roles[0]
351
+ elif from_str.lower() == "gpt":
352
+ from_str = conversation_lib.default_conversation.roles[1]
353
+ else:
354
+ from_str = "unknown"
355
+ sentence["value"] = BEGIN_SIGNAL + from_str + ": " + sentence["value"] + END_SIGNAL
356
+ if get_conversation:
357
+ conversation += sentence["value"]
358
+ conversation += BEGIN_SIGNAL
359
+ return conversation
360
+
361
+
362
+ def preprocess_multimodal(sources: Sequence[str], data_args: DataArguments) -> Dict:
363
+ is_multimodal = data_args.is_multimodal
364
+ if not is_multimodal:
365
+ return sources
366
+
367
+ for source in sources:
368
+ for sentence in source:
369
+ if DEFAULT_IMAGE_TOKEN in sentence["value"] and not sentence["value"].startswith(DEFAULT_IMAGE_TOKEN):
370
+ sentence["value"] = sentence["value"].replace(DEFAULT_IMAGE_TOKEN, "").strip()
371
+ sentence["value"] = DEFAULT_IMAGE_TOKEN + "\n" + sentence["value"]
372
+ sentence["value"] = sentence["value"].strip()
373
+ if "mmtag" in conversation_lib.default_conversation.version:
374
+ sentence["value"] = sentence["value"].replace(DEFAULT_IMAGE_TOKEN, "<Image>" + DEFAULT_IMAGE_TOKEN + "</Image>")
375
+ replace_token = DEFAULT_IMAGE_TOKEN
376
+ if data_args.mm_use_im_start_end:
377
+ replace_token = DEFAULT_IM_START_TOKEN + replace_token + DEFAULT_IM_END_TOKEN
378
+ sentence["value"] = sentence["value"].replace(DEFAULT_IMAGE_TOKEN, replace_token)
379
+
380
+ return sources
381
+
382
+
383
+ def preprocess_multimodal_movie(sources: Sequence[str], data_args: DataArguments, video_inputs: str) -> Dict:
384
+ is_multimodal = data_args.is_multimodal
385
+ if not is_multimodal:
386
+ return sources
387
+
388
+ for source in sources:
389
+ for sentence in source:
390
+ if DEFAULT_IMAGE_TOKEN in sentence["value"]:
391
+ prompt = sentence["value"].replace(DEFAULT_IMAGE_TOKEN, "").strip()
392
+ replace_token = video_inputs
393
+ if data_args.mm_use_im_start_end:
394
+ replace_token = DEFAULT_IM_START_TOKEN + replace_token + DEFAULT_IM_END_TOKEN
395
+ sentence["value"] = sentence["value"].replace(DEFAULT_IMAGE_TOKEN, replace_token)
396
+
397
+ return sources, prompt
398
+
399
+
400
+ def preprocess_llama_2(sources, tokenizer: transformers.PreTrainedTokenizer, has_image: bool = False) -> Dict:
401
+ conv = conversation_lib.default_conversation.copy()
402
+ roles = {"human": conv.roles[0], "gpt": conv.roles[1]}
403
+
404
+ # Apply prompt templates
405
+ conversations = []
406
+ for i, source in enumerate(sources):
407
+ if roles[source[0]["from"]] != conv.roles[0]:
408
+ # Skip the first one if it is not from human
409
+ source = source[1:]
410
+
411
+ conv.messages = []
412
+ for j, sentence in enumerate(source):
413
+ role = roles[sentence["from"]]
414
+ assert role == conv.roles[j % 2], f"{i}"
415
+ conv.append_message(role, sentence["value"])
416
+ conversations.append(conv.get_prompt())
417
+
418
+ # Tokenize conversations
419
+
420
+ if has_image:
421
+ input_ids = torch.stack([tokenizer_image_token(prompt, tokenizer, return_tensors="pt") for prompt in conversations], dim=0)
422
+ else:
423
+ input_ids = tokenizer(
424
+ conversations,
425
+ return_tensors="pt",
426
+ padding="longest",
427
+ max_length=tokenizer.model_max_length,
428
+ truncation=True,
429
+ ).input_ids
430
+
431
+ targets = input_ids.clone()
432
+
433
+ assert conv.sep_style == conversation_lib.SeparatorStyle.LLAMA_2
434
+
435
+ # Mask targets
436
+ sep = "[/INST] "
437
+ for conversation, target in zip(conversations, targets):
438
+ total_len = int(target.ne(tokenizer.pad_token_id).sum())
439
+
440
+ rounds = conversation.split(conv.sep2)
441
+ cur_len = 1
442
+ target[:cur_len] = IGNORE_INDEX
443
+ for i, rou in enumerate(rounds):
444
+ if rou == "":
445
+ break
446
+
447
+ parts = rou.split(sep)
448
+ if len(parts) != 2:
449
+ break
450
+ parts[0] += sep
451
+
452
+ if has_image:
453
+ round_len = len(tokenizer_image_token(rou, tokenizer))
454
+ instruction_len = len(tokenizer_image_token(parts[0], tokenizer)) - 2
455
+ else:
456
+ round_len = len(tokenizer(rou).input_ids)
457
+ instruction_len = len(tokenizer(parts[0]).input_ids) - 2
458
+
459
+ target[cur_len : cur_len + instruction_len] = IGNORE_INDEX
460
+
461
+ cur_len += round_len
462
+ target[cur_len:] = IGNORE_INDEX
463
+
464
+ if cur_len < tokenizer.model_max_length:
465
+ if cur_len != total_len:
466
+ target[:] = IGNORE_INDEX
467
+ rank0_print(f"WARNING: tokenization mismatch: {cur_len} vs. {total_len}." f" (ignored)")
468
+
469
+ return dict(
470
+ input_ids=input_ids,
471
+ labels=targets,
472
+ )
473
+
474
+
475
+ def make_conv(prompt, answer):
476
+ return [
477
+ {
478
+ "from": "human",
479
+ "value": prompt,
480
+ },
481
+ {
482
+ "from": "gpt",
483
+ "value": answer,
484
+ },
485
+ ]
486
+
487
+
488
+ def preprocess_gemma(sources: List[List[Dict[str, str]]], tokenizer: transformers.PreTrainedTokenizer, has_image: bool = False) -> Dict:
489
+ conv: conversation_lib.Conversation = conversation_lib.default_conversation.copy()
490
+ roles: Dict[str, str] = {"human": conv.roles[0], "gpt": conv.roles[1]}
491
+
492
+ # Apply prompt templates
493
+ conversations: List[str] = []
494
+ for i, source in enumerate(sources):
495
+ if roles[source[0]["from"]] != conv.roles[0]:
496
+ # Skip the first one if it is not from human
497
+ source: List[Dict[str, str]] = source[1:]
498
+
499
+ conv.messages = []
500
+ for j, sentence in enumerate(source):
501
+ role: str = roles[sentence["from"]]
502
+ assert role == conv.roles[j % 2], f"{i}"
503
+ conv.append_message(role, sentence["value"])
504
+ conversations.append(conv.get_prompt())
505
+
506
+ # Tokenize conversations
507
+ if has_image:
508
+ input_ids: torch.Tensor = torch.stack([tokenizer_image_token(prompt, tokenizer, return_tensors="pt") for prompt in conversations], dim=0)
509
+ else:
510
+ input_ids: torch.Tensor = tokenizer(
511
+ conversations,
512
+ return_tensors="pt",
513
+ padding="longest",
514
+ max_length=tokenizer.model_max_length,
515
+ truncation=True,
516
+ ).input_ids
517
+
518
+ targets: torch.Tensor = input_ids.clone()
519
+ assert conv.sep_style == conversation_lib.SeparatorStyle.GEMMA
520
+
521
+ # Mask target
522
+ sep: str = conv.sep + conv.roles[1]
523
+ for conversation, target in zip(conversations, targets):
524
+ total_len: int = int(target.ne(tokenizer.pad_token_id).sum())
525
+
526
+ rounds: List[str] = conversation.split(conv.sep)
527
+ re_rounds = []
528
+ for conv_idx in range(0, len(rounds), 2):
529
+ re_rounds.append(conv.sep.join(rounds[conv_idx : conv_idx + 2]))
530
+
531
+ cur_len = 1 # Ignore <bos>
532
+ target[:cur_len] = IGNORE_INDEX
533
+ for i, rou in enumerate(re_rounds):
534
+ if rou == "":
535
+ break
536
+
537
+ parts = rou.split(sep)
538
+ if len(parts) != 2:
539
+ break
540
+ parts[0] += sep # Re-append sep because split on this
541
+ # Now "".join(parts)==rou
542
+
543
+ if has_image:
544
+ round_len = len(tokenizer_image_token(rou, tokenizer)) - 1 # Ignore <bos>
545
+ instruction_len = len(tokenizer_image_token(parts[0], tokenizer)) - 1 # Ignore <bos>
546
+ else:
547
+ round_len = len(tokenizer(rou).input_ids) - 1 # Ignore <bos>
548
+ instruction_len = len(tokenizer(parts[0]).input_ids) - 1 # Ignore <bos>
549
+
550
+ round_len += 2 # sep: <end_of_turn>\n takes 2 tokens
551
+ target[cur_len : cur_len + instruction_len] = IGNORE_INDEX
552
+ cur_len += round_len
553
+
554
+ target[cur_len:] = IGNORE_INDEX
555
+
556
+ if cur_len < tokenizer.model_max_length:
557
+ if cur_len != total_len:
558
+ target[:] = IGNORE_INDEX
559
+ rank0_print(f"WARNING: tokenization mismatch: {cur_len} vs. {total_len}." f" (ignored)")
560
+
561
+ return dict(
562
+ input_ids=input_ids,
563
+ labels=targets,
564
+ )
565
+
566
+
567
+ def preprocess_qwen(sources, tokenizer: transformers.PreTrainedTokenizer, has_image: bool = False, max_len=2048, system_message: str = "You are a helpful assistant.") -> Dict:
568
+ roles = {"human": "<|im_start|>user", "gpt": "<|im_start|>assistant"}
569
+
570
+ im_start, im_end = tokenizer.additional_special_tokens_ids
571
+ nl_tokens = tokenizer("\n").input_ids
572
+ _system = tokenizer("system").input_ids + nl_tokens
573
+ _user = tokenizer("user").input_ids + nl_tokens
574
+ _assistant = tokenizer("assistant").input_ids + nl_tokens
575
+
576
+ # Apply prompt templates
577
+ input_ids, targets = [], []
578
+ for i, source in enumerate(sources):
579
+ if roles[source[0]["from"]] != roles["human"]:
580
+ source = source[1:]
581
+
582
+ input_id, target = [], []
583
+ system = [im_start] + _system + tokenizer(system_message).input_ids + [im_end] + nl_tokens
584
+ input_id += system
585
+ target += [im_start] + [IGNORE_INDEX] * (len(system) - 3) + [im_end] + nl_tokens
586
+ assert len(input_id) == len(target)
587
+ for j, sentence in enumerate(source):
588
+ role = roles[sentence["from"]]
589
+ if has_image and "<image>" in sentence["value"]:
590
+ assert sentence["value"].startswith("<image>"), print(sentence["value"])
591
+
592
+ _input_id = tokenizer(role).input_ids + nl_tokens + [IMAGE_TOKEN_INDEX] + nl_tokens + tokenizer(sentence["value"][len("<image>") :]).input_ids + [im_end] + nl_tokens
593
+ else:
594
+ _input_id = tokenizer(role).input_ids + nl_tokens + tokenizer(sentence["value"]).input_ids + [im_end] + nl_tokens
595
+ input_id += _input_id
596
+ if role == "<|im_start|>user":
597
+ _target = [im_start] + [IGNORE_INDEX] * (len(_input_id) - 3) + [im_end] + nl_tokens
598
+ elif role == "<|im_start|>assistant":
599
+ _target = [im_start] + [IGNORE_INDEX] * len(tokenizer(role).input_ids) + _input_id[len(tokenizer(role).input_ids) + 1 : -2] + [im_end] + nl_tokens
600
+ else:
601
+ raise NotImplementedError
602
+ target += _target
603
+ assert len(input_id) == len(target)
604
+ # input_id += [tokenizer.pad_token_id] * (max_len - len(input_id))
605
+ # target += [IGNORE_INDEX] * (max_len - len(target))
606
+ input_ids.append(input_id)
607
+ targets.append(target)
608
+ input_ids = torch.tensor(input_ids, dtype=torch.long)
609
+ targets = torch.tensor(targets, dtype=torch.long)
610
+
611
+ return dict(
612
+ input_ids=input_ids, # tensor(bs x seq_len)
613
+ labels=targets, # tensor(bs x seq_len)
614
+ # attention_mask=input_ids.ne(tokenizer.pad_token_id), # tensor(bs x seq_len)
615
+ )
616
+
617
+
618
+ def preprocess_llama3(
619
+ sources,
620
+ tokenizer: transformers.PreTrainedTokenizer,
621
+ has_image: bool = False,
622
+ max_len=2048,
623
+ system_message: str = "You are a helpful language and vision assistant. You are able to understand the visual content that the user provides, and assist the user with a variety of tasks using natural language.",
624
+ ) -> Dict:
625
+ roles = {"human": "<|start_header_id|>user<|end_header_id|>", "gpt": "<|start_header_id|>assistant<|end_header_id|>"}
626
+
627
+ eot_id = tokenizer.convert_tokens_to_ids("<|eot_id|>")
628
+ nl_tokens = tokenizer("\n").input_ids
629
+
630
+ # Apply prompt templates
631
+ input_ids, targets = [], []
632
+ for i, source in enumerate(sources):
633
+ if roles[source[0]["from"]] != roles["human"]:
634
+ source = source[1:]
635
+
636
+ input_id, target = [], []
637
+ system = tokenizer("<|begin_of_text|>").input_ids + tokenizer("<|start_header_id|>system<|end_header_id|>").input_ids + nl_tokens * 2 + tokenizer(system_message).input_ids + [eot_id]
638
+ input_id += system
639
+ target += [IGNORE_INDEX] * len(system)
640
+ for j, sentence in enumerate(source):
641
+ role = roles[sentence["from"]]
642
+ if has_image and "<image>" in sentence["value"]:
643
+ assert sentence["value"].startswith("<image>"), print(sentence["value"])
644
+ _input_id = tokenizer(role).input_ids + nl_tokens * 2 + [IMAGE_TOKEN_INDEX] + tokenizer(sentence["value"][len("<image>") :]).input_ids + [eot_id]
645
+ else:
646
+ _input_id = tokenizer(role).input_ids + nl_tokens * 2 + tokenizer(sentence["value"]).input_ids + [eot_id]
647
+ input_id += _input_id
648
+ if role == "<|start_header_id|>user<|end_header_id|>":
649
+ _target = [IGNORE_INDEX] * len(_input_id)
650
+ elif role == "<|start_header_id|>assistant<|end_header_id|>":
651
+ _target = [IGNORE_INDEX] * (len(tokenizer(role).input_ids) + 2) + _input_id[len(tokenizer(role).input_ids) + 2 : -1] + [eot_id]
652
+ else:
653
+ raise NotImplementedError
654
+ target += _target
655
+ assert len(input_id) == len(target), f"{len(input_id)} != {len(target)}"
656
+ input_ids.append(input_id)
657
+ targets.append(target)
658
+ input_ids = torch.tensor(input_ids, dtype=torch.long)
659
+ targets = torch.tensor(targets, dtype=torch.long)
660
+
661
+ return dict(
662
+ input_ids=input_ids, # tensor(bs x seq_len)
663
+ labels=targets, # tensor(bs x seq_len)
664
+ )
665
+
666
+
667
+ def preprocess_v1(sources, tokenizer: transformers.PreTrainedTokenizer, has_image: bool = False) -> Dict:
668
+ conv = conversation_lib.default_conversation.copy()
669
+ roles = {"human": conv.roles[0], "gpt": conv.roles[1]}
670
+
671
+ # Apply prompt templates
672
+ conversations = []
673
+ for i, source in enumerate(sources):
674
+ if roles[source[0]["from"]] != conv.roles[0]:
675
+ # Skip the first one if it is not from human
676
+ source = source[1:]
677
+
678
+ conv.messages = []
679
+ for j, sentence in enumerate(source):
680
+ role = roles[sentence["from"]]
681
+ assert role == conv.roles[j % 2], f"{i}"
682
+ conv.append_message(role, sentence["value"])
683
+ conversations.append(conv.get_prompt())
684
+
685
+ # Tokenize conversations
686
+
687
+ if has_image:
688
+ input_ids = torch.stack([tokenizer_image_token(prompt, tokenizer, return_tensors="pt") for prompt in conversations], dim=0)
689
+ else:
690
+ input_ids = tokenizer(
691
+ conversations,
692
+ return_tensors="pt",
693
+ padding="longest",
694
+ max_length=tokenizer.model_max_length,
695
+ truncation=True,
696
+ ).input_ids
697
+
698
+ targets = input_ids.clone()
699
+
700
+ assert conv.sep_style == conversation_lib.SeparatorStyle.TWO
701
+
702
+ # Mask targets
703
+ sep = conv.sep + conv.roles[1] + ": "
704
+ for conversation, target in zip(conversations, targets):
705
+ total_len = int(target.ne(tokenizer.pad_token_id).sum())
706
+
707
+ rounds = conversation.split(conv.sep2)
708
+ cur_len = 1
709
+ target[:cur_len] = IGNORE_INDEX
710
+ for i, rou in enumerate(rounds):
711
+ if rou == "":
712
+ break
713
+
714
+ parts = rou.split(sep)
715
+ if len(parts) != 2:
716
+ break
717
+ parts[0] += sep
718
+
719
+ if has_image:
720
+ round_len = len(tokenizer_image_token(rou, tokenizer))
721
+ instruction_len = len(tokenizer_image_token(parts[0], tokenizer)) - 2
722
+ else:
723
+ round_len = len(tokenizer(rou).input_ids)
724
+ instruction_len = len(tokenizer(parts[0]).input_ids) - 2
725
+
726
+ if i != 0 and not tokenizer.legacy and IS_TOKENIZER_GREATER_THAN_0_14:
727
+ round_len -= 1
728
+ instruction_len -= 1
729
+
730
+ target[cur_len : cur_len + instruction_len] = IGNORE_INDEX
731
+
732
+ cur_len += round_len
733
+ target[cur_len:] = IGNORE_INDEX
734
+
735
+ if cur_len < tokenizer.model_max_length:
736
+ if cur_len != total_len:
737
+ target[:] = IGNORE_INDEX
738
+ print(f"WARNING: tokenization mismatch: {cur_len} vs. {total_len}." f" (ignored)")
739
+
740
+ return dict(
741
+ input_ids=input_ids,
742
+ labels=targets,
743
+ )
744
+
745
+
746
+ def preprocess_mpt(sources, tokenizer: transformers.PreTrainedTokenizer, has_image: bool = False) -> Dict:
747
+ conv = conversation_lib.default_conversation.copy()
748
+ roles = {"human": conv.roles[0], "gpt": conv.roles[1]}
749
+
750
+ # Apply prompt templates
751
+ conversations = []
752
+ for i, source in enumerate(sources):
753
+ if roles[source[0]["from"]] != conv.roles[0]:
754
+ # Skip the first one if it is not from human
755
+ source = source[1:]
756
+
757
+ conv.messages = []
758
+ for j, sentence in enumerate(source):
759
+ role = roles[sentence["from"]]
760
+ assert role == conv.roles[j % 2], f"{i}"
761
+ conv.append_message(role, sentence["value"])
762
+ conversations.append(conv.get_prompt())
763
+
764
+ # Tokenize conversations
765
+
766
+ if has_image:
767
+ input_ids = torch.stack([tokenizer_image_token(prompt, tokenizer, return_tensors="pt") for prompt in conversations], dim=0)
768
+ else:
769
+ input_ids = tokenizer(
770
+ conversations,
771
+ return_tensors="pt",
772
+ padding="longest",
773
+ max_length=tokenizer.model_max_length,
774
+ truncation=True,
775
+ ).input_ids
776
+
777
+ targets = input_ids.clone()
778
+ assert conv.sep_style == conversation_lib.SeparatorStyle.MPT
779
+
780
+ # Mask targets
781
+ sep = conv.sep + conv.roles[1]
782
+ for conversation, target in zip(conversations, targets):
783
+ total_len = int(target.ne(tokenizer.pad_token_id).sum())
784
+
785
+ rounds = conversation.split(conv.sep)
786
+ re_rounds = [conv.sep.join(rounds[:3])] # system + user + gpt
787
+ for conv_idx in range(3, len(rounds), 2):
788
+ re_rounds.append(conv.sep.join(rounds[conv_idx : conv_idx + 2])) # user + gpt
789
+ cur_len = 1
790
+ target[:cur_len] = IGNORE_INDEX
791
+ for i, rou in enumerate(re_rounds):
792
+ if rou == "":
793
+ break
794
+
795
+ parts = rou.split(sep)
796
+ if len(parts) != 2:
797
+ break
798
+ parts[0] += sep
799
+
800
+ if has_image:
801
+ round_len = len(tokenizer_image_token(rou, tokenizer))
802
+ instruction_len = len(tokenizer_image_token(parts[0], tokenizer)) - 1
803
+ else:
804
+ round_len = len(tokenizer(rou).input_ids)
805
+ instruction_len = len(tokenizer(parts[0]).input_ids) - 1
806
+
807
+ if i != 0 and getattr(tokenizer, "legacy", False) and IS_TOKENIZER_GREATER_THAN_0_14:
808
+ round_len += 1
809
+ instruction_len += 1
810
+
811
+ target[cur_len : cur_len + instruction_len] = IGNORE_INDEX
812
+
813
+ cur_len += round_len
814
+ target[cur_len:] = IGNORE_INDEX
815
+
816
+ if cur_len < tokenizer.model_max_length:
817
+ if cur_len != total_len:
818
+ target[:] = IGNORE_INDEX
819
+ print(f"WARNING: tokenization mismatch: {cur_len} vs. {total_len}." f"(#turns={len(re_rounds)} ignored)")
820
+
821
+ return dict(
822
+ input_ids=input_ids,
823
+ labels=targets,
824
+ )
825
+
826
+
827
+ def preprocess_plain(
828
+ sources: Sequence[str],
829
+ tokenizer: transformers.PreTrainedTokenizer,
830
+ ) -> Dict:
831
+ # add end signal and concatenate together
832
+ conversations = []
833
+ for source in sources:
834
+ assert len(source) == 2
835
+ assert DEFAULT_IMAGE_TOKEN in source[0]["value"]
836
+ source[0]["value"] = DEFAULT_IMAGE_TOKEN
837
+ conversation = source[0]["value"] + source[1]["value"] + conversation_lib.default_conversation.sep
838
+ conversations.append(conversation)
839
+ # tokenize conversations
840
+ input_ids = [tokenizer_image_token(prompt, tokenizer, return_tensors="pt") for prompt in conversations]
841
+ targets = copy.deepcopy(input_ids)
842
+ for target, source in zip(targets, sources):
843
+ tokenized_len = len(tokenizer_image_token(source[0]["value"], tokenizer))
844
+ target[:tokenized_len] = IGNORE_INDEX
845
+
846
+ return dict(input_ids=input_ids, labels=targets)
847
+
848
+
849
+ def preprocess(sources: Sequence[str], tokenizer: transformers.PreTrainedTokenizer, has_image: bool = False) -> Dict:
850
+ """
851
+ Given a list of sources, each is a conversation list. This transform:
852
+ 1. Add signal '### ' at the beginning each sentence, with end signal '\n';
853
+ 2. Concatenate conversations together;
854
+ 3. Tokenize the concatenated conversation;
855
+ 4. Make a deepcopy as the target. Mask human words with IGNORE_INDEX.
856
+ """
857
+ if conversation_lib.default_conversation.sep_style == conversation_lib.SeparatorStyle.PLAIN:
858
+ return preprocess_plain(sources, tokenizer)
859
+ if conversation_lib.default_conversation.sep_style == conversation_lib.SeparatorStyle.LLAMA_2:
860
+ return preprocess_llama_2(sources, tokenizer, has_image=has_image)
861
+ if conversation_lib.default_conversation.version.startswith("v1"):
862
+ return preprocess_v1(sources, tokenizer, has_image=has_image)
863
+ if conversation_lib.default_conversation.version == "mpt":
864
+ return preprocess_mpt(sources, tokenizer, has_image=has_image)
865
+ if conversation_lib.default_conversation.version == "qwen":
866
+ return preprocess_qwen(sources, tokenizer, has_image=has_image)
867
+ if conversation_lib.default_conversation.version == "gemma":
868
+ return preprocess_gemma(sources, tokenizer, has_image=has_image)
869
+ if conversation_lib.default_conversation.version == "llama_v3":
870
+ return preprocess_llama3(sources, tokenizer, has_image=has_image)
871
+ # add end signal and concatenate together
872
+ conversations = []
873
+ for source in sources:
874
+ header = f"{conversation_lib.default_conversation.system}\n\n"
875
+ conversation = _add_speaker_and_signal(header, source)
876
+ conversations.append(conversation)
877
+
878
+ # tokenize conversations
879
+ def get_tokenize_len(prompts):
880
+ return [len(tokenizer_image_token(prompt, tokenizer)) for prompt in prompts]
881
+
882
+ if has_image:
883
+ input_ids = [tokenizer_image_token(prompt, tokenizer, return_tensors="pt") for prompt in conversations]
884
+ else:
885
+ conversations_tokenized = _tokenize_fn(conversations, tokenizer)
886
+ input_ids = conversations_tokenized["input_ids"]
887
+
888
+ targets = copy.deepcopy(input_ids)
889
+ for target, source in zip(targets, sources):
890
+ if has_image:
891
+ tokenized_lens = get_tokenize_len([header] + [s["value"] for s in source])
892
+ else:
893
+ tokenized_lens = _tokenize_fn([header] + [s["value"] for s in source], tokenizer)["input_ids_lens"]
894
+ speakers = [sentence["from"] for sentence in source]
895
+ _mask_targets(target, tokenized_lens, speakers)
896
+
897
+ return dict(input_ids=input_ids, labels=targets)
898
+
899
+
900
+ def load_data(data_path):
901
+ if "jsonl" in data_path:
902
+ data_list = load_jsonl(data_path)
903
+ else:
904
+ data_list = load_json(data_path)
905
+ return data_list
906
+
907
+
908
+ class DPODataset(Dataset):
909
+ """Dataset for DPODataset fine-tuning."""
910
+
911
+ def __init__(self, data_path: str, tokenizer: transformers.PreTrainedTokenizer, data_args: DataArguments):
912
+ super(DPODataset, self).__init__()
913
+ # Handle multiple JSON files specified in the data_path
914
+ self.list_data_dict = []
915
+
916
+ if "{" in data_path and "}" in data_path:
917
+ base_path, file_pattern = re.match(r"^(.*)\{(.*)\}\.json$", data_path).groups()
918
+ file_names = file_pattern.split(",")
919
+ rank0_print(f"Loading {file_names} from {base_path}")
920
+ data_args.dataset_paths = []
921
+ for file_name in file_names:
922
+ data_args.dataset_paths.append(f"{base_path}{file_name}.json")
923
+ full_path = f"{base_path}{file_name}.json"
924
+ rank0_print(f"Loading {full_path}")
925
+ cur_data_dict = load_data(full_path)
926
+ rank0_print(f"Loaded {len(cur_data_dict)} samples from {full_path}")
927
+ self.list_data_dict.extend(cur_data_dict)
928
+ elif data_path.endswith(".yaml"):
929
+ with open(data_path, "r") as file:
930
+ yaml_data = yaml.safe_load(file)
931
+ datasets = yaml_data.get("datasets")
932
+ # file should be in the format of:
933
+ # datasets:
934
+ # - json_path: xxxx1.json
935
+ # sampling_strategy: first:1000
936
+ # - json_path: xxxx2.json
937
+ # sampling_strategy: end:3000
938
+ # - json_path: xxxx3.json
939
+ # sampling_strategy: random:999
940
+ data_args.dataset_paths = [dataset.get("json_path") for dataset in datasets]
941
+ for dataset in datasets:
942
+ json_path = dataset.get("json_path")
943
+ sampling_strategy = dataset.get("sampling_strategy", "all")
944
+ sampling_number = None
945
+
946
+ rank0_print(f"Loading {json_path} with {sampling_strategy} sampling strategy")
947
+ cur_data_dict = load_data(json_path)
948
+
949
+ if ":" in sampling_strategy:
950
+ sampling_strategy, sampling_number = sampling_strategy.split(":")
951
+ if "%" in sampling_number:
952
+ sampling_number = math.ceil(int(sampling_number.split("%")[0]) * len(cur_data_dict) / 100)
953
+ else:
954
+ sampling_number = int(sampling_number)
955
+
956
+ # Apply the sampling strategy
957
+ if sampling_strategy == "first" and sampling_number is not None:
958
+ cur_data_dict = cur_data_dict[:sampling_number]
959
+ elif sampling_strategy == "end" and sampling_number is not None:
960
+ cur_data_dict = cur_data_dict[-sampling_number:]
961
+ elif sampling_strategy == "random" and sampling_number is not None:
962
+ random.shuffle(cur_data_dict)
963
+ cur_data_dict = cur_data_dict[:sampling_number]
964
+
965
+ rank0_print(f"Loaded {len(cur_data_dict)} samples from {json_path}")
966
+ self.list_data_dict.extend(cur_data_dict)
967
+ else:
968
+ data_args.dataset_paths = [data_path]
969
+ rank0_print(f"Loading {data_path}")
970
+ cur_data_dict = load_data(data_path)
971
+ rank0_print(f"Loaded {len(cur_data_dict)} samples from {data_path}")
972
+ self.list_data_dict.extend(cur_data_dict)
973
+
974
+ rank0_print("Formatting inputs...Skip in lazy mode")
975
+ self.tokenizer = tokenizer
976
+ self.data_args = data_args
977
+
978
+ def __len__(self):
979
+ return len(self.list_data_dict)
980
+
981
+ @property
982
+ def lengths(self):
983
+ length_list = []
984
+ for sample in self.list_data_dict:
985
+ # Calculate the length of the prompt, answer, chosen, and rejected text
986
+ cur_len = len(sample["prompt"].split()) + len(sample["answer"].split()) + len(sample["chosen"].split()) + len(sample["rejected"].split())
987
+ # Add additional tokens if an image is present
988
+ img_tokens = 128 if "image" in sample else 0
989
+ length_list.append(cur_len + img_tokens)
990
+ return length_list
991
+
992
+ @property
993
+ def modality_lengths(self):
994
+ length_list = []
995
+ for sample in self.list_data_dict:
996
+ # Calculate the length of the prompt, answer, chosen, and rejected text
997
+ cur_len = len(sample["prompt"].split()) + len(sample["answer"].split()) + len(sample["chosen"].split()) + len(sample["rejected"].split())
998
+ # If the sample includes a video, the length is positive; otherwise, it is negative
999
+ cur_len = cur_len if ("video" in sample or "image" in sample) else -cur_len
1000
+ length_list.append(cur_len)
1001
+ return length_list
1002
+
1003
+ def process_image(self, image_file):
1004
+ image_folder = self.data_args.image_folder
1005
+ processor = self.data_args.image_processor
1006
+ # print(f"\n\nInspecting the image path, folder = {image_folder}, image={image_file}\n\n")
1007
+ try:
1008
+ image = Image.open(os.path.join(image_folder, image_file)).convert("RGB")
1009
+ except Exception as exn:
1010
+ print(f"Failed to open image {image_file}. Exception:", exn)
1011
+ raise exn
1012
+
1013
+ image_size = image.size
1014
+ if self.data_args.image_aspect_ratio == "highres":
1015
+ image = process_highres_image(image, self.data_args.image_processor, self.data_args.image_grid_pinpoints)
1016
+ elif self.data_args.image_aspect_ratio == "anyres" or "anyres" in self.data_args.image_aspect_ratio:
1017
+ image = process_anyres_image(image, self.data_args.image_processor, self.data_args.image_grid_pinpoints)
1018
+ elif self.data_args.image_aspect_ratio == "crop_split":
1019
+ image = process_highres_image_crop_split(image, self.data_args)
1020
+ elif self.data_args.image_aspect_ratio == "pad":
1021
+
1022
+ def expand2square(pil_img, background_color):
1023
+ width, height = pil_img.size
1024
+ if width == height:
1025
+ return pil_img
1026
+ elif width > height:
1027
+ result = Image.new(pil_img.mode, (width, width), background_color)
1028
+ result.paste(pil_img, (0, (width - height) // 2))
1029
+ return result
1030
+ else:
1031
+ result = Image.new(pil_img.mode, (height, height), background_color)
1032
+ result.paste(pil_img, ((height - width) // 2, 0))
1033
+ return result
1034
+
1035
+ image = expand2square(image, tuple(int(x * 255) for x in processor.image_mean))
1036
+ image = processor.preprocess(image, return_tensors="pt")["pixel_values"][0]
1037
+ else:
1038
+ image = processor.preprocess(image, return_tensors="pt")["pixel_values"][0]
1039
+ return image, image_size, "image"
1040
+
1041
+ def __getitem__(self, i) -> Dict[str, torch.Tensor]:
1042
+ # TODO: define number of retries somewhere else
1043
+ num_base_retries = 3
1044
+ num_final_retries = 300
1045
+
1046
+ # try the current sample first
1047
+ for attempt_idx in range(num_base_retries):
1048
+ try:
1049
+ sample = self._get_item(i)
1050
+ return sample
1051
+ except Exception as e:
1052
+ # sleep 1s in case it is a cloud disk issue
1053
+ print(f"[Try #{attempt_idx}] Failed to fetch sample {i}. Exception:", e)
1054
+ time.sleep(1)
1055
+
1056
+ # try other samples, in case it is file corruption issue
1057
+ for attempt_idx in range(num_base_retries):
1058
+ try:
1059
+ next_index = min(i + 1, len(self.list_data_dict) - 1)
1060
+ # sample_idx = random.choice(range(len(self)))
1061
+ sample = self._get_item(next_index)
1062
+ return sample
1063
+ except Exception as e:
1064
+ # no need to sleep
1065
+ print(f"[Try other #{attempt_idx}] Failed to fetch sample {next_index}. Exception:", e)
1066
+ pass
1067
+
1068
+ # still fail, most likely to be path issue or cloud disk issue, retry the same sample for longer
1069
+ # for attempt_idx in range(num_final_retries):
1070
+ # try:
1071
+ # sample = self._get_item(i)
1072
+ # return sample
1073
+ # except Exception as e:
1074
+ # # sleep 1s in case it is a cloud disk issue
1075
+ # print(f"[Final try #{attempt_idx}] Failed to fetch sample {i}. Exception:", e)
1076
+ # time.sleep(1)
1077
+
1078
+ # Finally raise exception on failing.
1079
+ assert False, "Failed to fetch sample."
1080
+
1081
+ def _get_item(self, i) -> Dict[str, torch.Tensor]:
1082
+ sources = self.list_data_dict[i]
1083
+ if isinstance(i, int):
1084
+ sources = [sources]
1085
+ assert len(sources) == 1, "Don't know why it is wrapped to a list" # FIXME
1086
+
1087
+ suffix = None
1088
+ if "image" in sources[0]:
1089
+ image_file = self.list_data_dict[i]["image"]
1090
+ if type(image_file) is list:
1091
+ image = [self.process_image(f) for f in image_file]
1092
+ else:
1093
+ image = [self.process_image(image_file)]
1094
+ # sources = preprocess_multimodal(copy.deepcopy([e["conversations"] for e in sources]), self.data_args)
1095
+
1096
+ elif "video" in sources[0]: # FIXME: This logic should be largely improved by Yuanhan. It's too messy now.
1097
+ video_file = self.list_data_dict[i]["video"]
1098
+ video_folder = self.data_args.video_folder
1099
+ video_file = os.path.join(video_folder, video_file)
1100
+ suffix = video_file.split(".")[-1]
1101
+ if not os.path.exists(video_file):
1102
+ print("File {} not exist!".format(video_file))
1103
+
1104
+ if suffix == "pkl":
1105
+ video_info = pickle.load(open(video_file, "rb"))
1106
+ image = torch.from_numpy(video_info["feats"][:, 1:])
1107
+ input_prompt = video_info["inputs"].replace("...", "")
1108
+ # replace the default image token with multiple tokens
1109
+ input_prompt = input_prompt.replace(DEFAULT_IMAGE_TOKEN, DEFAULT_IMAGE_TOKEN * self.data_args.video_token)
1110
+ sources, query_prompt = preprocess_multimodal_movie(copy.deepcopy([e["conversations"] for e in sources]), self.data_args, input_prompt)
1111
+ else: # using videoreader
1112
+ if "shareVideoGPTV" not in video_file and "liangke" not in video_file:
1113
+ vr = VideoReader(video_file, ctx=cpu(0))
1114
+ total_frame_num = len(vr)
1115
+ avg_fps = round(vr.get_avg_fps() / self.data_args.video_fps)
1116
+ frame_idx = [i for i in range(0, total_frame_num, avg_fps)]
1117
+ if self.data_args.frames_upbound > 0:
1118
+ if len(frame_idx) > self.data_args.frames_upbound:
1119
+ uniform_sampled_frames = np.linspace(0, total_frame_num - 1, self.data_args.frames_upbound, dtype=int)
1120
+ frame_idx = uniform_sampled_frames.tolist()
1121
+ video = vr.get_batch(frame_idx).asnumpy()
1122
+ video = np.array(video)
1123
+ else:
1124
+ if "liangke" in video_file:
1125
+ video_file = self.list_data_dict[i]["video"]
1126
+ frame_files = [os.path.join(video_file, f) for f in os.listdir(video_file) if os.path.isfile(os.path.join(video_file, f))]
1127
+ frame_files.sort() # Ensure the frames are sorted if they are named sequentially
1128
+
1129
+ # TODO: Hard CODE: Determine the indices for uniformly sampling 10 frames
1130
+ num_frames_to_sample = 10
1131
+
1132
+ total_frames = len(frame_files)
1133
+
1134
+ sampled_indices = np.linspace(0, total_frames - 1, num_frames_to_sample, dtype=int)
1135
+
1136
+ # Read and store the sampled frames
1137
+ video = []
1138
+ for idx in sampled_indices:
1139
+ frame_path = frame_files[idx]
1140
+ try:
1141
+ with Image.open(frame_path) as img:
1142
+ frame = img.convert("RGB")
1143
+ video.append(frame)
1144
+ except IOError:
1145
+ print(f"Failed to read frame at path: {frame_path}")
1146
+
1147
+ processor = self.data_args.image_processor
1148
+ image = processor.preprocess(video, return_tensors="pt")["pixel_values"]
1149
+ image = [(image, video[0].size, "video")]
1150
+ # sources = preprocess_multimodal(copy.deepcopy([e["conversations"] for e in sources]), self.data_args)
1151
+
1152
+ else:
1153
+ sources = copy.deepcopy([e["conversations"] for e in sources])
1154
+
1155
+ has_image = ("image" in self.list_data_dict[i]) or ("video" in self.list_data_dict[i])
1156
+ # data_dict = preprocess(sources, self.tokenizer, has_image=has_image)
1157
+ data_dict = copy.deepcopy(self.list_data_dict[i]) # inplace modification following
1158
+
1159
+ if "prompt" in data_dict:
1160
+ prompt = data_dict["prompt"]
1161
+ prompt = prompt.replace("<image>", "").strip()
1162
+ prompt = "<image>\n" + prompt
1163
+ data_dict["prompt"] = prompt
1164
+ else:
1165
+ prompt = None
1166
+
1167
+ if suffix == "pkl":
1168
+ prompt = [query_prompt]
1169
+
1170
+ # image exist in the data
1171
+ if "image" in self.list_data_dict[i]:
1172
+ data_dict["image"] = image
1173
+ elif "video" in self.list_data_dict[i]:
1174
+ data_dict["image"] = image
1175
+ elif self.data_args.is_multimodal:
1176
+ # image does not exist in the data, but the model is multimodal
1177
+ crop_size = self.data_args.image_processor.crop_size
1178
+ data_dict["image"] = [
1179
+ (torch.zeros(1, 3, crop_size["height"], crop_size["width"]), (crop_size["width"], crop_size["height"]), "text"),
1180
+ ]
1181
+ # prompt exist in the data
1182
+ data_dict["has_image"] = has_image
1183
+ return data_dict
1184
+
1185
+
1186
+ @dataclass
1187
+ class DPODataCollator(DPODataCollatorWithPadding):
1188
+ """Collate examples for DPO fine-tuning."""
1189
+
1190
+ # tokenizer: transformers.PreTrainedTokenizer
1191
+
1192
+ def collate(self, batch):
1193
+ # first, pad everything to the same length
1194
+ # input_ids, labels = tuple([instance[key] for instance in instances]
1195
+ # for key in ("input_ids", "labels"))
1196
+ # input_ids = torch.nn.utils.rnn.pad_sequence(
1197
+ # input_ids,
1198
+ # batch_first=True,
1199
+ # padding_value=self.tokenizer.pad_token_id)
1200
+ # labels = torch.nn.utils.rnn.pad_sequence(labels,
1201
+ # batch_first=True,
1202
+ # padding_value=IGNORE_INDEX)
1203
+ # input_ids = input_ids[:, :self.tokenizer.model_max_length]
1204
+ # labels = labels[:, :self.tokenizer.model_max_length]
1205
+ # batch = dict(
1206
+ # input_ids=input_ids,
1207
+ # labels=labels,
1208
+ # attention_mask=input_ids.ne(self.tokenizer.pad_token_id),
1209
+ # )
1210
+ padded_batch = {}
1211
+ for k in batch[0].keys():
1212
+ if k.endswith("_input_ids") or k.endswith("_attention_mask") or k.endswith("_labels"):
1213
+ # if "prompt" in k:
1214
+ # to_pad = [torch.LongTensor(ex[k][::-1]) for ex in batch]
1215
+ # else:
1216
+ to_pad = [torch.LongTensor(ex[k]) for ex in batch]
1217
+ if k.endswith("_input_ids"):
1218
+ padding_value = self.tokenizer.pad_token_id
1219
+ elif k.endswith("_labels"):
1220
+ padding_value = self.label_pad_token_id
1221
+ else:
1222
+ continue
1223
+ # elif k.endswith("_attention_mask"):
1224
+ # padding_value = self.padding_value
1225
+ # else:
1226
+ # raise ValueError(f"Unexpected key in batch '{k}'")
1227
+
1228
+ padded_batch[k] = torch.nn.utils.rnn.pad_sequence(to_pad, batch_first=True, padding_value=padding_value)
1229
+ # for the prompt, flip back so padding is on left side
1230
+ # if "prompt" in k:
1231
+ # padded_batch[k] = padded_batch[k].flip(dims=[1])
1232
+ else:
1233
+ padded_batch[k] = [ex[k] for ex in batch]
1234
+ for k in ["chosen_input_ids", "rejected_input_ids"]:
1235
+ attn_k = k.replace("input_ids", "attention_mask")
1236
+ padded_batch[attn_k] = padded_batch[k].ne(self.tokenizer.pad_token_id)
1237
+ return padded_batch
1238
+
1239
+ def tokenize_batch_element(self, prompt: str, chosen: str, rejected: str, has_image: bool = True) -> Dict:
1240
+ """Tokenize a single batch element.
1241
+
1242
+ At this stage, we don't convert to PyTorch tensors yet; we just handle the truncation
1243
+ in case the prompt + chosen or prompt + rejected responses is/are too long. First
1244
+ we truncate the prompt; if we're still too long, we truncate the chosen/rejected.
1245
+
1246
+ We also create the labels for the chosen/rejected responses, which are of length equal to
1247
+ the sum of the length of the prompt and the chosen/rejected response, with
1248
+ label_pad_token_id for the prompt tokens.
1249
+ """
1250
+ # import pdb; pdb.set_trace()
1251
+ batch = {}
1252
+
1253
+ chosen_sources = make_conv(prompt, chosen)
1254
+ rejected_sources = make_conv(prompt, rejected)
1255
+ chosen_data_dict = preprocess([chosen_sources], self.tokenizer, has_image=has_image)
1256
+ # chosen_data_dict['attention_mask'] = chosen_data_dict["input_ids"].ne(self.tokenizer.pad_token_id)
1257
+
1258
+ rejected_data_dict = preprocess([rejected_sources], self.tokenizer, has_image=has_image)
1259
+ # rejected_data_dict['attention_mask'] = rejected_data_dict["input_ids"].ne(self.tokenizer.pad_token_id)
1260
+
1261
+ chosen_data_dict = {k: v[0] for k, v in chosen_data_dict.items()}
1262
+ rejected_data_dict = {k: v[0] for k, v in rejected_data_dict.items()}
1263
+
1264
+ for k, toks in {
1265
+ "chosen": chosen_data_dict,
1266
+ "rejected": rejected_data_dict,
1267
+ }.items():
1268
+ for type_key, tokens in toks.items():
1269
+ if type_key == "token_type_ids":
1270
+ continue
1271
+ batch[f"{k}_{type_key}"] = tokens
1272
+ return batch
1273
+
1274
+ def __call__(self, features: List[Dict[str, Any]]) -> Dict[str, Any]:
1275
+
1276
+ tokenized_batch = []
1277
+ Xs, keys = [], []
1278
+ for feature in features:
1279
+ prompt = feature["prompt"]
1280
+ chosen = feature["chosen"]
1281
+ rejected = feature["rejected"]
1282
+ has_image = feature["has_image"]
1283
+ # Xs.append(feature[has_X])
1284
+ # keys.append(has_X)
1285
+
1286
+ batch_element = self.tokenize_batch_element(prompt, chosen, rejected, has_image=has_image)
1287
+ tokenized_batch.append(batch_element)
1288
+
1289
+ # return collated batch
1290
+ padded_batch = self.collate(tokenized_batch)
1291
+ # import pdb;pdb.set_trace()
1292
+ if "image" in features[0]:
1293
+ # instances[1]['image'][0][0].shape
1294
+ # torch.Size([5, 3, 224, 224])
1295
+ images = [instance["image"] for instance in features]
1296
+
1297
+ padded_batch["image_sizes"] = [im[1] for im_list in images for im in im_list]
1298
+ padded_batch["modalities"] = [im[2] for im_list in images for im in im_list]
1299
+ images = [im[0] for im_list in images for im in im_list]
1300
+ # import pdb;pdb.set_trace()
1301
+
1302
+ padded_batch["images"] = images
1303
+ # padded_batch["images"] =[padded_batch["modalities"], images]
1304
+
1305
+ return padded_batch
1306
+
1307
+
1308
+ def make_dpo_data_module(tokenizer: transformers.PreTrainedTokenizer, data_args) -> Dict:
1309
+ """Make dataset and collator for supervised fine-tuning."""
1310
+ train_dataset = DPODataset(tokenizer=tokenizer, data_path=data_args.data_path, data_args=data_args)
1311
+ return train_dataset
1312
+
1313
+
1314
+ def get_model(model_args, training_args, bnb_model_from_pretrained_args):
1315
+ assert training_args.attn_implementation
1316
+ if training_args.attn_implementation == "sdpa" and torch.__version__ < "2.1.2":
1317
+ raise ValueError("The 'sdpa' attention implementation requires torch version 2.1.2 or higher.")
1318
+
1319
+ ######################### Overwrite config #########################
1320
+ customized_kwargs = dict()
1321
+ customized_kwargs.update(bnb_model_from_pretrained_args)
1322
+ overwrite_config = {}
1323
+ cfg_pretrained = None
1324
+ if "qwen" in model_args.model_name_or_path.lower():
1325
+ cfg_pretrained = LlavaQwenConfig.from_pretrained(model_args.model_name_or_path)
1326
+ elif "mistral" in model_args.model_name_or_path.lower() or "zephyr" in model_args.model_name_or_path.lower():
1327
+ cfg_pretrained = LlavaMistralConfig.from_pretrained(model_args.model_name_or_path)
1328
+ elif (
1329
+ "wizardlm-2" in model_args.model_name_or_path.lower()
1330
+ or "vicuna" in model_args.model_name_or_path.lower()
1331
+ or "llama" in model_args.model_name_or_path.lower()
1332
+ or "yi" in model_args.model_name_or_path.lower()
1333
+ or "nous-hermes" in model_args.model_name_or_path.lower()
1334
+ and "wizard-2" in model_args.model_name_or_path.lower()
1335
+ ):
1336
+ cfg_pretrained = LlavaConfig.from_pretrained(model_args.model_name_or_path)
1337
+ else:
1338
+ cfg_pretrained = AutoConfig.from_pretrained(model_args.model_name_or_path)
1339
+
1340
+ if model_args.rope_scaling_factor is not None and model_args.rope_scaling_type is not None and cfg_pretrained is not None:
1341
+ overwrite_config["rope_scaling"] = {
1342
+ "factor": model_args.rope_scaling_factor,
1343
+ "type": model_args.rope_scaling_type,
1344
+ }
1345
+ if training_args.model_max_length is None:
1346
+ training_args.model_max_length = cfg_pretrained.max_position_embeddings * model_args.rope_scaling_factor
1347
+ overwrite_config["max_sequence_length"] = training_args.model_max_length
1348
+ assert training_args.model_max_length == int(cfg_pretrained.max_position_embeddings * model_args.rope_scaling_factor), print(
1349
+ f"model_max_length: {training_args.model_max_length}, max_position_embeddings: {cfg_pretrained.max_position_embeddings}, rope_scaling_factor: {model_args.rope_scaling_factor}"
1350
+ )
1351
+ # overwrite_config["max_sequence_length"] = model_args.max_sequence_length
1352
+ # overwrite_config["tokenizer_model_max_length"] = model_args.tokenizer_model_max_length
1353
+
1354
+ if model_args.mm_spatial_pool_stride is not None and model_args.mm_spatial_pool_out_channels is not None and model_args.mm_spatial_pool_mode is not None and model_args.mm_resampler_type is not None and cfg_pretrained is not None:
1355
+ overwrite_config["mm_resampler_type"] = model_args.mm_resampler_type
1356
+ overwrite_config["mm_spatial_pool_stride"] = model_args.mm_spatial_pool_stride
1357
+ overwrite_config["mm_spatial_pool_out_channels"] = model_args.mm_spatial_pool_out_channels
1358
+ overwrite_config["mm_spatial_pool_mode"] = model_args.mm_spatial_pool_mode
1359
+
1360
+ if overwrite_config:
1361
+ rank0_print(f"Overwriting config with {overwrite_config}")
1362
+ for k, v in overwrite_config.items():
1363
+ setattr(cfg_pretrained, k, v)
1364
+
1365
+ customized_kwargs["config"] = cfg_pretrained
1366
+
1367
+ ######################### Finish Overwrite ###########################
1368
+
1369
+ ref_model = None
1370
+ if model_args.model_class_name is not None:
1371
+ actual_model_class_name = f"{model_args.model_class_name}ForCausalLM"
1372
+ model_class = getattr(transformers, actual_model_class_name)
1373
+ rank0_print(f"Using model class {model_class} from {model_args.model_class_name}")
1374
+ model = model_class.from_pretrained(
1375
+ model_args.model_name_or_path,
1376
+ cache_dir=training_args.cache_dir,
1377
+ attn_implementation=training_args.attn_implementation,
1378
+ torch_dtype=(torch.bfloat16 if training_args.bf16 else None),
1379
+ low_cpu_mem_usage=False,
1380
+ **customized_kwargs,
1381
+ )
1382
+ elif model_args.vision_tower is not None:
1383
+ if "mixtral" in model_args.model_name_or_path.lower():
1384
+ model = LlavaMixtralForCausalLM.from_pretrained(
1385
+ model_args.model_name_or_path,
1386
+ cache_dir=training_args.cache_dir,
1387
+ attn_implementation=training_args.attn_implementation,
1388
+ torch_dtype=(torch.bfloat16 if training_args.bf16 else None),
1389
+ low_cpu_mem_usage=False,
1390
+ **customized_kwargs,
1391
+ )
1392
+ from transformers.models.mixtral.modeling_mixtral import MixtralSparseMoeBlock
1393
+
1394
+ deepspeed.utils.set_z3_leaf_modules(model, [MixtralSparseMoeBlock])
1395
+ elif "mistral" in model_args.model_name_or_path.lower() or "zephyr" in model_args.model_name_or_path.lower():
1396
+ model = LlavaMistralForCausalLM.from_pretrained(
1397
+ model_args.model_name_or_path,
1398
+ cache_dir=training_args.cache_dir,
1399
+ attn_implementation=training_args.attn_implementation,
1400
+ torch_dtype=(torch.bfloat16 if training_args.bf16 else None),
1401
+ low_cpu_mem_usage=False,
1402
+ **customized_kwargs,
1403
+ )
1404
+ elif (
1405
+ "wizardlm-2" in model_args.model_name_or_path.lower()
1406
+ or "vicuna" in model_args.model_name_or_path.lower()
1407
+ or "llama" in model_args.model_name_or_path.lower()
1408
+ or "yi" in model_args.model_name_or_path.lower()
1409
+ or "nous-hermes" in model_args.model_name_or_path.lower()
1410
+ and "wizard-2" in model_args.model_name_or_path.lower()
1411
+ ):
1412
+ model = LlavaLlamaForCausalLM.from_pretrained(
1413
+ model_args.model_name_or_path,
1414
+ cache_dir=training_args.cache_dir,
1415
+ attn_implementation=training_args.attn_implementation,
1416
+ torch_dtype=(torch.bfloat16 if training_args.bf16 else None),
1417
+ low_cpu_mem_usage=False,
1418
+ **customized_kwargs,
1419
+ )
1420
+
1421
+ if "zero3" in training_args.deepspeed:
1422
+ rank0_print("#### Initialize reference model #####")
1423
+ ref_model = LlavaLlamaForCausalLM.from_pretrained(
1424
+ model_args.model_name_or_path,
1425
+ cache_dir=training_args.cache_dir,
1426
+ attn_implementation=training_args.attn_implementation,
1427
+ torch_dtype=(torch.bfloat16 if training_args.bf16 else None),
1428
+ low_cpu_mem_usage=False,
1429
+ **customized_kwargs,
1430
+ )
1431
+
1432
+ elif "qwen" in model_args.model_name_or_path.lower() or "quyen" in model_args.model_name_or_path.lower():
1433
+ if "moe" in model_args.model_name_or_path.lower():
1434
+ model = LlavaQwenMoeForCausalLM.from_pretrained(
1435
+ model_args.model_name_or_path,
1436
+ cache_dir=training_args.cache_dir,
1437
+ attn_implementation=training_args.attn_implementation,
1438
+ torch_dtype=(torch.bfloat16 if training_args.bf16 else None),
1439
+ low_cpu_mem_usage=False,
1440
+ **customized_kwargs,
1441
+ )
1442
+ from transformers.models.qwen2_moe.modeling_qwen2_moe import Qwen2MoeSparseMoeBlock
1443
+
1444
+ deepspeed.utils.set_z3_leaf_modules(model, [Qwen2MoeSparseMoeBlock])
1445
+ else:
1446
+ model = LlavaQwenForCausalLM.from_pretrained(
1447
+ model_args.model_name_or_path,
1448
+ cache_dir=training_args.cache_dir,
1449
+ attn_implementation=training_args.attn_implementation,
1450
+ torch_dtype=(torch.bfloat16 if training_args.bf16 else None),
1451
+ low_cpu_mem_usage=False,
1452
+ **customized_kwargs,
1453
+ )
1454
+
1455
+ if "zero3" in training_args.deepspeed:
1456
+ rank0_print("#### Initialize reference model #####")
1457
+ ref_model = LlavaQwenForCausalLM.from_pretrained(
1458
+ model_args.model_name_or_path,
1459
+ cache_dir=training_args.cache_dir,
1460
+ attn_implementation=training_args.attn_implementation,
1461
+ torch_dtype=(torch.bfloat16 if training_args.bf16 else None),
1462
+ low_cpu_mem_usage=False,
1463
+ **customized_kwargs,
1464
+ )
1465
+
1466
+ elif "gemma" in model_args.model_name_or_path.lower():
1467
+ model = LlavaGemmaForCausalLM.from_pretrained(
1468
+ model_args.model_name_or_path,
1469
+ cache_dir=training_args.cache_dir,
1470
+ attn_implementation=training_args.attn_implementation,
1471
+ torch_dtype=(torch.bfloat16 if training_args.bf16 else None),
1472
+ low_cpu_mem_usage=False,
1473
+ **customized_kwargs,
1474
+ )
1475
+ else:
1476
+ raise ValueError(f"Unknown model class {model_args}")
1477
+ else:
1478
+ model = transformers.LlamaForCausalLM.from_pretrained(
1479
+ model_args.model_name_or_path, cache_dir=training_args.cache_dir, attn_implementation=training_args.attn_implementation, torch_dtype=(torch.bfloat16 if training_args.bf16 else None), **customized_kwargs
1480
+ )
1481
+ return model, ref_model
1482
+
1483
+
1484
+ def train(attn_implementation=None):
1485
+ global local_rank
1486
+
1487
+ parser = transformers.HfArgumentParser((ModelArguments, DataArguments, TrainingArguments))
1488
+ model_args, data_args, training_args = parser.parse_args_into_dataclasses()
1489
+
1490
+ if training_args.verbose_logging:
1491
+ rank0_print(f"Inspecting experiment hyperparameters:\n")
1492
+ rank0_print(f"model_args = {vars(model_args)}\n\n")
1493
+ rank0_print(f"data_args = {vars(data_args)}\n\n")
1494
+ rank0_print(f"training_args = {vars(training_args)}\n\n")
1495
+ # rank0_print(f"evaluation_args = {vars(evaluation_args)}\n\n")
1496
+
1497
+ local_rank = training_args.local_rank
1498
+ compute_dtype = torch.float16 if training_args.fp16 else (torch.bfloat16 if training_args.bf16 else torch.float32)
1499
+
1500
+ bnb_model_from_pretrained_args = {}
1501
+ if training_args.bits in [4, 8]:
1502
+ from transformers import BitsAndBytesConfig
1503
+
1504
+ bnb_model_from_pretrained_args.update(
1505
+ dict(
1506
+ device_map={"": training_args.device},
1507
+ load_in_4bit=training_args.bits == 4,
1508
+ load_in_8bit=training_args.bits == 8,
1509
+ quantization_config=BitsAndBytesConfig(
1510
+ load_in_4bit=training_args.bits == 4,
1511
+ load_in_8bit=training_args.bits == 8,
1512
+ llm_int8_threshold=6.0,
1513
+ llm_int8_has_fp16_weight=False,
1514
+ bnb_4bit_compute_dtype=compute_dtype,
1515
+ bnb_4bit_use_double_quant=training_args.double_quant,
1516
+ bnb_4bit_quant_type=training_args.quant_type, # {'fp4', 'nf4'}
1517
+ ),
1518
+ )
1519
+ )
1520
+
1521
+ model, ref_model = get_model(model_args, training_args, bnb_model_from_pretrained_args)
1522
+ model.config.use_cache = False
1523
+
1524
+ if model_args.freeze_backbone:
1525
+ model.model.requires_grad_(False)
1526
+
1527
+ if training_args.bits in [4, 8]:
1528
+ from peft import prepare_model_for_kbit_training
1529
+
1530
+ model.config.torch_dtype = torch.float32 if training_args.fp16 else (torch.bfloat16 if training_args.bf16 else torch.float32)
1531
+ model = prepare_model_for_kbit_training(model, use_gradient_checkpointing=training_args.gradient_checkpointing)
1532
+
1533
+ if training_args.gradient_checkpointing:
1534
+ if hasattr(model, "enable_input_require_grads"):
1535
+ model.enable_input_require_grads()
1536
+ if ref_model is not None:
1537
+ ref_model.enable_input_require_grads()
1538
+ else:
1539
+
1540
+ def make_inputs_require_grad(module, input, output):
1541
+ output.requires_grad_(True)
1542
+
1543
+ model.get_input_embeddings().register_forward_hook(make_inputs_require_grad)
1544
+
1545
+ if ref_model is not None:
1546
+ ref_model.get_input_embeddings().register_forward_hook(make_inputs_require_grad)
1547
+
1548
+ if training_args.lora_enable:
1549
+ from peft import LoraConfig, get_peft_model
1550
+
1551
+ lora_config = LoraConfig(
1552
+ r=training_args.lora_r,
1553
+ lora_alpha=training_args.lora_alpha,
1554
+ target_modules=find_all_linear_names(model),
1555
+ lora_dropout=training_args.lora_dropout,
1556
+ bias=training_args.lora_bias,
1557
+ task_type="CAUSAL_LM",
1558
+ )
1559
+ if training_args.bits == 16:
1560
+ if training_args.bf16:
1561
+ model.to(torch.bfloat16)
1562
+ if training_args.fp16:
1563
+ model.to(torch.float16)
1564
+ rank0_print("Adding LoRA adapters...")
1565
+ model = get_peft_model(model, lora_config)
1566
+
1567
+ if "mpt" in model_args.model_name_or_path:
1568
+ tokenizer = transformers.AutoTokenizer.from_pretrained(model_args.model_name_or_path, cache_dir=training_args.cache_dir, model_max_length=training_args.model_max_length, padding_side="right")
1569
+ elif "mistral" in model_args.model_name_or_path.lower() or "mixtral" in model_args.model_name_or_path.lower() or "zephyr" in model_args.model_name_or_path.lower():
1570
+ tokenizer = transformers.AutoTokenizer.from_pretrained(model_args.model_name_or_path, cache_dir=training_args.cache_dir, model_max_length=training_args.model_max_length, padding_side="left")
1571
+ elif "qwen" in model_args.model_name_or_path.lower():
1572
+ tokenizer = transformers.AutoTokenizer.from_pretrained(model_args.model_name_or_path, cache_dir=training_args.cache_dir, model_max_length=training_args.model_max_length, padding_side="right")
1573
+ else: # for all other models
1574
+ tokenizer = transformers.AutoTokenizer.from_pretrained(
1575
+ model_args.model_name_or_path,
1576
+ cache_dir=training_args.cache_dir,
1577
+ model_max_length=training_args.model_max_length,
1578
+ padding_side="right",
1579
+ use_fast=False,
1580
+ )
1581
+
1582
+ rank0_print(f"Prompt version: {model_args.version}")
1583
+ if model_args.version == "v0":
1584
+ if tokenizer.pad_token is None:
1585
+ smart_tokenizer_and_embedding_resize(
1586
+ special_tokens_dict=dict(pad_token="[PAD]"),
1587
+ tokenizer=tokenizer,
1588
+ model=model,
1589
+ )
1590
+ elif model_args.version == "v0.5":
1591
+ tokenizer.pad_token = tokenizer.unk_token
1592
+ else:
1593
+ if tokenizer.unk_token is not None:
1594
+ tokenizer.pad_token = tokenizer.unk_token
1595
+ if model_args.version in conversation_lib.conv_templates:
1596
+ conversation_lib.default_conversation = conversation_lib.conv_templates[model_args.version]
1597
+ else:
1598
+ conversation_lib.default_conversation = conversation_lib.conv_templates["vicuna_v1"]
1599
+
1600
+ if model_args.vision_tower is not None:
1601
+ model.get_model().initialize_vision_modules(model_args=model_args, fsdp=training_args.fsdp)
1602
+
1603
+ vision_tower = model.get_vision_tower()
1604
+ vision_tower.to(dtype=torch.bfloat16 if training_args.bf16 else torch.float16, device=training_args.device)
1605
+
1606
+ data_args.image_processor = vision_tower.image_processor
1607
+ data_args.is_multimodal = True
1608
+
1609
+ model.config.image_aspect_ratio = data_args.image_aspect_ratio
1610
+ if data_args.image_grid_pinpoints is not None:
1611
+ # for input like "(1x1)...(3x3)", convert to [(1, 1), (2, 1), (3, 1), (1, 2), (2, 2), (3, 2), (1, 3), (2, 3), (3, 3)]
1612
+ if "x" in data_args.image_grid_pinpoints and "..." in data_args.image_grid_pinpoints:
1613
+ vis_encoder_size = data_args.image_processor.size[0]
1614
+ matches = re.findall(r"\((\d+)x(\d+)\)", data_args.image_grid_pinpoints)
1615
+ range_start = tuple(map(int, matches[0]))
1616
+ range_end = tuple(map(int, matches[-1]))
1617
+ grid_pinpoints = [(i, j) for i in range(range_start[0], range_end[0] + 1) for j in range(range_start[1], range_end[1] + 1)]
1618
+ grid_pinpoints = [[dim * vis_encoder_size for dim in pair] for pair in grid_pinpoints]
1619
+ data_args.image_grid_pinpoints = grid_pinpoints
1620
+ elif "x" in data_args.image_grid_pinpoints:
1621
+ vis_encoder_size = data_args.image_processor.size[0]
1622
+ assert vis_encoder_size in [224, 336, 384, 448, 512], "vis_encoder_size should be in [224, 336, 384, 448, 512]"
1623
+ grid_pinpoints = data_args.image_grid_pinpoints.replace(" ", "").replace("x", ",")[1:-1].split("),(")
1624
+ data_args.image_grid_pinpoints = [[int(x) * vis_encoder_size for x in item.split(",")] for item in grid_pinpoints]
1625
+ else:
1626
+ data_args.image_grid_pinpoints = ast.literal_eval(data_args.image_grid_pinpoints) # for backward compatibility
1627
+ model.config.image_grid_pinpoints = data_args.image_grid_pinpoints
1628
+ model.config.image_crop_resolution = data_args.image_crop_resolution
1629
+ model.config.image_split_resolution = data_args.image_split_resolution
1630
+ model.config.tokenizer_padding_side = tokenizer.padding_side
1631
+ model.config.tokenizer_model_max_length = tokenizer.model_max_length
1632
+
1633
+ ### Deciding train which part of the model
1634
+ if model_args.mm_tunable_parts is None: # traditional way of deciding which part to train
1635
+ model.config.tune_mm_mlp_adapter = training_args.tune_mm_mlp_adapter = model_args.tune_mm_mlp_adapter
1636
+ model.config.tune_mm_vision_resampler = training_args.tune_mm_vision_resampler = model_args.tune_mm_vision_resampler
1637
+ if model_args.tune_mm_mlp_adapter or model_args.tune_mm_vision_resampler:
1638
+ model.requires_grad_(False)
1639
+ if model_args.tune_mm_mlp_adapter:
1640
+ for p in model.get_model().mm_projector.parameters():
1641
+ p.requires_grad = True
1642
+ if model_args.tune_mm_vision_resampler:
1643
+ for p in model.get_model().vision_resampler.parameters():
1644
+ p.requires_grad = True
1645
+
1646
+ model.config.freeze_mm_mlp_adapter = training_args.freeze_mm_mlp_adapter
1647
+ if training_args.freeze_mm_mlp_adapter:
1648
+ for p in model.get_model().mm_projector.parameters():
1649
+ p.requires_grad = False
1650
+
1651
+ model.config.freeze_mm_vision_resampler = training_args.freeze_mm_vision_resampler
1652
+ if training_args.freeze_mm_vision_resampler:
1653
+ for p in model.get_model().vision_resampler.parameters():
1654
+ p.requires_grad = False
1655
+
1656
+ model.config.unfreeze_mm_vision_tower = model_args.unfreeze_mm_vision_tower
1657
+ if model_args.unfreeze_mm_vision_tower:
1658
+ vision_tower.requires_grad_(True)
1659
+ else:
1660
+ vision_tower.requires_grad_(False)
1661
+
1662
+ else:
1663
+ rank0_print(f"Using mm_tunable_parts: {model_args.mm_tunable_parts}")
1664
+ model.config.mm_tunable_parts = training_args.mm_tunable_parts = model_args.mm_tunable_parts
1665
+ # Set the entire model to not require gradients by default
1666
+ model.requires_grad_(False)
1667
+ vision_tower.requires_grad_(False)
1668
+ model.get_model().mm_projector.requires_grad_(False)
1669
+ model.get_model().vision_resampler.requires_grad_(False)
1670
+ # Parse the mm_tunable_parts to decide which parts to unfreeze
1671
+ tunable_parts = model_args.mm_tunable_parts.split(",")
1672
+ if "mm_mlp_adapter" in tunable_parts:
1673
+ for p in model.get_model().mm_projector.parameters():
1674
+ p.requires_grad = True
1675
+ if "mm_vision_resampler" in tunable_parts:
1676
+ for p in model.get_model().vision_resampler.parameters():
1677
+ p.requires_grad = True
1678
+ if "mm_vision_tower" in tunable_parts:
1679
+ for name, param in model.named_parameters():
1680
+ if "vision_tower" in name:
1681
+ param.requires_grad_(True)
1682
+ if "mm_language_model" in tunable_parts:
1683
+ for name, param in model.named_parameters():
1684
+ if "vision_tower" not in name and "mm_projector" not in name and "vision_resampler" not in name:
1685
+ param.requires_grad_(True)
1686
+
1687
+ total_params = sum(p.ds_numel if hasattr(p, "ds_numel") else p.numel() for p in model.parameters())
1688
+ trainable_params = sum(p.ds_numel if hasattr(p, "ds_numel") else p.numel() for p in model.parameters() if p.requires_grad)
1689
+ rank0_print(f"Total parameters: ~{total_params/1e6:.2f} MB)")
1690
+ rank0_print(f"Trainable parameters: ~{trainable_params/1e6:.2f} MB)")
1691
+ if training_args.bits in [4, 8]:
1692
+ model.get_model().mm_projector.to(dtype=compute_dtype, device=training_args.device)
1693
+
1694
+ model.config.mm_use_im_start_end = data_args.mm_use_im_start_end = model_args.mm_use_im_start_end
1695
+ model.config.mm_projector_lr = training_args.mm_projector_lr
1696
+ model.config.mm_vision_tower_lr = training_args.mm_vision_tower_lr
1697
+ training_args.use_im_start_end = model_args.mm_use_im_start_end
1698
+ model.config.mm_use_im_patch_token = model_args.mm_use_im_patch_token
1699
+ model.initialize_vision_tokenizer(model_args, tokenizer=tokenizer)
1700
+
1701
+ if ref_model is not None:
1702
+ ref_model.get_model().initialize_vision_modules(model_args=model_args, fsdp=training_args.fsdp)
1703
+ ref_vision_tower = ref_model.get_vision_tower()
1704
+ ref_vision_tower.to(dtype=torch.bfloat16 if training_args.bf16 else torch.float16, device=training_args.device)
1705
+ ref_model.config.image_aspect_ratio = data_args.image_aspect_ratio
1706
+ ref_model.config.image_grid_pinpoints = data_args.image_grid_pinpoints
1707
+ ref_model.config.image_crop_resolution = data_args.image_crop_resolution
1708
+ ref_model.config.image_split_resolution = data_args.image_split_resolution
1709
+ ref_model.config.tokenizer_padding_side = tokenizer.padding_side
1710
+ ref_model.config.tokenizer_model_max_length = tokenizer.model_max_length
1711
+ ref_model.config.mm_use_im_start_end = data_args.mm_use_im_start_end
1712
+ ref_model.config.mm_use_im_patch_token = model_args.mm_use_im_patch_token
1713
+ ref_model.initialize_vision_tokenizer(model_args, tokenizer=tokenizer)
1714
+ parameter_names = [n for n, _ in ref_model.named_parameters()]
1715
+ for param_name in parameter_names:
1716
+ param = ref_model.get_parameter(param_name)
1717
+ param.requires_grad = False
1718
+ ref_model.eval()
1719
+
1720
+ if training_args.bits in [4, 8]:
1721
+ from peft.tuners.lora import LoraLayer
1722
+
1723
+ for name, module in model.named_modules():
1724
+ if isinstance(module, LoraLayer):
1725
+ if training_args.bf16:
1726
+ module = module.to(torch.bfloat16)
1727
+ if "norm" in name:
1728
+ module = module.to(torch.float32)
1729
+ if "lm_head" in name or "embed_tokens" in name:
1730
+ if hasattr(module, "weight"):
1731
+ if training_args.bf16 and module.weight.dtype == torch.float32:
1732
+ module = module.to(torch.bfloat16)
1733
+
1734
+ train_dataset = make_dpo_data_module(tokenizer=tokenizer, data_args=data_args)
1735
+ data_collator = DPODataCollator(
1736
+ tokenizer,
1737
+ label_pad_token_id=IGNORE_INDEX,
1738
+ pad_token_id=tokenizer.pad_token_id,
1739
+ )
1740
+
1741
+ trainer = LLaVADPOTrainer(
1742
+ model,
1743
+ ref_model,
1744
+ args=training_args,
1745
+ dpo_alpha=training_args.dpo_alpha,
1746
+ beta=training_args.beta,
1747
+ gamma=training_args.gamma,
1748
+ train_dataset=train_dataset,
1749
+ eval_dataset=None,
1750
+ data_collator=data_collator,
1751
+ tokenizer=tokenizer,
1752
+ max_length=training_args.model_max_length,
1753
+ generate_during_eval=False, # training_args.generate_during_eval,
1754
+ precompute_ref_log_probs=training_args.precompute_ref_log_probs,
1755
+ )
1756
+
1757
+ if list(pathlib.Path(training_args.output_dir).glob("checkpoint-*")):
1758
+ trainer.train(resume_from_checkpoint=True)
1759
+ else:
1760
+ trainer.train()
1761
+ trainer.save_state()
1762
+
1763
+ model.config.use_cache = True
1764
+
1765
+ if training_args.lora_enable:
1766
+ state_dict = get_peft_state_maybe_zero_3(model.named_parameters(), training_args.lora_bias)
1767
+ non_lora_state_dict = get_peft_state_non_lora_maybe_zero_3(model.named_parameters())
1768
+ if training_args.local_rank == 0 or training_args.local_rank == -1:
1769
+ if hasattr(model, "config"):
1770
+ model.config.save_pretrained(training_args.output_dir)
1771
+ if hasattr(model, "generation_config"):
1772
+ model.generation_config.save_pretrained(training_args.output_dir)
1773
+ model.save_pretrained(training_args.output_dir, state_dict=state_dict)
1774
+ torch.save(non_lora_state_dict, os.path.join(training_args.output_dir, "non_lora_trainables.bin"))
1775
+ else:
1776
+ safe_save_model_for_hf_trainer(trainer=trainer, output_dir=training_args.output_dir)
1777
+
1778
+ rank0_print(f"Model saved to {training_args.output_dir}")
1779
+
1780
+
1781
+ if __name__ == "__main__":
1782
+ train()
VLMEvalKit-sudoku/requirements/docs.txt ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ docutils==0.18.1
2
+ modelindex
3
+ myst-parser
4
+ -e git+https://github.com/open-compass/pytorch_sphinx_theme.git#egg=pytorch_sphinx_theme
5
+ sphinx==6.1.3
6
+ sphinx-copybutton
7
+ sphinx-design
8
+ sphinx-notfound-page
9
+ sphinx-tabs
10
+ sphinxcontrib-jquery
11
+ tabulate
VLMEvalKit-sudoku/vlmeval/api/__pycache__/claude.cpython-310.pyc ADDED
Binary file (5.3 kB). View file
 
VLMEvalKit-sudoku/vlmeval/api/__pycache__/gemini.cpython-310.pyc ADDED
Binary file (5.15 kB). View file
 
VLMEvalKit-sudoku/vlmeval/api/__pycache__/glm_vision.cpython-310.pyc ADDED
Binary file (3.01 kB). View file
 
VLMEvalKit-sudoku/vlmeval/api/__pycache__/hf_chat_model.cpython-310.pyc ADDED
Binary file (8.34 kB). View file
 
VLMEvalKit-sudoku/vlmeval/api/__pycache__/hunyuan.cpython-310.pyc ADDED
Binary file (7.21 kB). View file
 
VLMEvalKit-sudoku/vlmeval/api/__pycache__/lmdeploy.cpython-310.pyc ADDED
Binary file (11.9 kB). View file
 
VLMEvalKit-sudoku/vlmeval/api/__pycache__/qwen_vl_api.cpython-310.pyc ADDED
Binary file (7.53 kB). View file
 
VLMEvalKit-sudoku/vlmeval/dataset/__pycache__/megabench.cpython-310.pyc ADDED
Binary file (14.6 kB). View file
 
VLMEvalKit-sudoku/vlmeval/dataset/__pycache__/sfebench.cpython-310.pyc ADDED
Binary file (8.04 kB). View file
 
VLMEvalKit-sudoku/vlmeval/dataset/cmmmu.py ADDED
@@ -0,0 +1,354 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .image_base import ImageBaseDataset
2
+ import random
3
+ from collections import Counter
4
+ import os
5
+ import re
6
+ import tempfile
7
+ from ..smp import *
8
+ from ..smp.file import get_intermediate_file_path
9
+
10
+
11
+ def get_multi_choice_prediction(response, all_choices, index2ans):
12
+ for char in [',', '.', '!', '?', ';', ':', "'"]:
13
+ response = response.strip(char)
14
+ response = " " + response + " " # add space to avoid partial match
15
+
16
+ candidates = []
17
+
18
+ for choice in all_choices: # (A) (B) (C) (D)
19
+ # Add the choice to candidates each time it appears in the response
20
+ candidates.extend([choice for _ in range(response.count(f'({choice})'))])
21
+
22
+ if len(candidates) == 0:
23
+ for choice in all_choices: # A B C D
24
+ # Similarly, add the choice for each occurrence
25
+ candidates.extend([choice for _ in range(response.count(f'{choice}'))])
26
+
27
+ if len(candidates) == 0 and len(response.split()) >= 1:
28
+ for index, ans in index2ans.items():
29
+ # Add index for each occurrence of ans in response
30
+ candidates.extend([index for _ in range(response.count(ans))])
31
+
32
+ # if all above doesn't get candidates, check if the content is larger than 5 tokens and try to parse the example
33
+ if len(candidates) == 0 and len(response.split()) >= 1:
34
+ for index, ans in index2ans.items():
35
+ if ans in response:
36
+ candidates.append(index)
37
+ # index_ans = False # it's content ans.
38
+
39
+ if len(candidates) == 0: # still not get answer, randomly choose one.
40
+ return random.choice(all_choices)
41
+ # return ''
42
+ else:
43
+ # Count the occurrence of each candidate
44
+ candidate_counts = Counter(candidates)
45
+
46
+ # Select the most frequent candidates
47
+ max_count = max(candidate_counts.values())
48
+ most_frequent_candidates = [c for c in all_choices if candidate_counts.get(c, 0) == max_count]
49
+
50
+ # Combine the most frequent candidates in ABCD order
51
+ return ''.join(most_frequent_candidates)
52
+
53
+
54
+ def extract_numbers(string):
55
+ # Pattern for numbers with Chinese commas
56
+ pattern_commas = r'-?\d{1,3}(?:,\d{3})+'
57
+ # Pattern for scientific notation
58
+ pattern_scientific = r'-?\d+(?:\.\d+)?[eE][+-]?\d+'
59
+ # Pattern for simple numbers without Chinese commas
60
+ pattern_simple = r'-?(?:\d+\.\d+|\.\d+|\d+)(?![eE][+-]?\d+)(?!,\d)'
61
+
62
+ # Extract numbers with Chinese commas
63
+ numbers_with_commas = re.findall(pattern_commas, string)
64
+ # Extract numbers in scientific notation
65
+ numbers_scientific = re.findall(pattern_scientific, string)
66
+ # Extract simple numbers without Chinese commas
67
+ numbers_simple = re.findall(pattern_simple, string)
68
+
69
+ # Combine all extracted numbers
70
+ all_numbers = numbers_with_commas + numbers_scientific + numbers_simple
71
+ return all_numbers
72
+
73
+
74
+ def check_is_number(string):
75
+ try:
76
+ float(string.replace(',', ''))
77
+ return True
78
+ except ValueError:
79
+ # check if there's comma inside
80
+ return False
81
+
82
+
83
+ def count_letters(string):
84
+ return sum(c.isalpha() and 'a' <= c <= 'z' or 'A' <= c <= 'Z' for c in string)
85
+
86
+
87
+ def normalize_str(string, answer):
88
+ # check if characters in the string
89
+
90
+ # if number, numerize it.
91
+ if string is None:
92
+ return [string]
93
+ string = string.strip()
94
+
95
+ is_number = check_is_number(string)
96
+
97
+ if is_number:
98
+ string = string.replace(',', '')
99
+ string = float(string)
100
+ # leave 2 decimal
101
+ string = round(string, 2)
102
+ return [string]
103
+ else: # it's likely to be a string
104
+ if len(string) > len(answer) + 20 or count_letters(string) > count_letters(answer) + 2:
105
+ return []
106
+ return [string]
107
+
108
+
109
+ def get_fill_blank_prediction(response, answer):
110
+ """get the prediction from the generated response,
111
+ return a list of predicted strings or numbers"""
112
+
113
+ def get_key_subresponses(response):
114
+ response = response.strip("。").strip()
115
+ sub_responses = re.split(r'。|\n', response)
116
+ indicators_of_keys = ['是', '为', '所以', '等于', '方案', '选择',
117
+ '正确答案', '因此', '最后', '答案', '结果']
118
+ key_responses = []
119
+ for index, resp in enumerate(sub_responses):
120
+ # if last one, accept it's an equation (the entire response can be just one sentence with equation)
121
+ if index == len(sub_responses) - 1:
122
+ indicators_of_keys.extend(['='])
123
+ shortest_key_response = None
124
+ # the shortest response that may contain the answer (tail part of the response)
125
+ for indicator in indicators_of_keys:
126
+ if indicator in resp:
127
+ if not shortest_key_response:
128
+ shortest_key_response = resp.split(indicator)[-1].strip()
129
+ else:
130
+ if len(resp.split(indicator)[-1].strip()) < len(shortest_key_response):
131
+ shortest_key_response = resp.split(indicator)[-1].strip()
132
+
133
+ if shortest_key_response:
134
+ # and it's not trivial
135
+ if shortest_key_response.strip() not in [":", ",", ".", "!", "?", ";", ":", "'"]:
136
+ key_responses.append(shortest_key_response)
137
+ if len(key_responses) == 0: # did not found any
138
+ return [response]
139
+ return key_responses
140
+
141
+ key_responses = get_key_subresponses(response)
142
+
143
+ pred_list = key_responses.copy() # keep the original string response
144
+ for resp in key_responses:
145
+ pred_list.extend(extract_numbers(resp))
146
+
147
+ tmp_pred_list = []
148
+ for i in range(len(pred_list)):
149
+ tmp_pred_list.extend(normalize_str(pred_list[i], answer))
150
+ pred_list = tmp_pred_list
151
+
152
+ # remove duplicates
153
+ pred_list = list(set(pred_list))
154
+
155
+ return pred_list
156
+
157
+
158
+ def get_TF_prediction(response):
159
+ """get the prediction from the generated response,
160
+ return a list of predicted strings or numbers"""
161
+
162
+ def get_key_subresponses(response):
163
+ response = response.strip("。").strip()
164
+ sub_responses = re.split(r'。|\n', response)
165
+ indicators_of_keys = ['是', '为', '所以', '判断',
166
+ '陈述', '说法', '表达', '答案', '结果']
167
+ key_responses = []
168
+ for index, resp in enumerate(sub_responses):
169
+ shortest_key_response = None
170
+ # the shortest response that may contain the answer (tail part of the response)
171
+ for indicator in indicators_of_keys:
172
+ if indicator in resp:
173
+ if not shortest_key_response:
174
+ shortest_key_response = resp.split(indicator)[-1].strip()
175
+ else:
176
+ if len(resp.split(indicator)[-1].strip()) < len(shortest_key_response):
177
+ shortest_key_response = resp.split(indicator)[-1].strip()
178
+
179
+ if shortest_key_response:
180
+ # and it's not trivial
181
+ if shortest_key_response.strip() not in [":", ",", ".", "!", "?", ";", ":", "'"]:
182
+ key_responses.append(shortest_key_response)
183
+ if len(key_responses) == 0: # did not found any
184
+ return [response]
185
+ return key_responses
186
+
187
+ key_responses = get_key_subresponses(response)
188
+
189
+ pred_list = key_responses.copy() # keep the original string response
190
+ # remove duplicates
191
+ pred_list = list(set(pred_list))
192
+
193
+ return pred_list
194
+
195
+
196
+ class CMMMU(ImageBaseDataset):
197
+ TYPE = 'VQA'
198
+
199
+ DATASET_URL = {
200
+ 'CMMMU_VAL': 'https://opencompass.openxlab.space/utils/VLMEval/CMMMU_VAL.tsv'
201
+ }
202
+
203
+ DATASET_MD5 = {
204
+ 'CMMMU_VAL': 'b4727e2fce2415bf646379e60c11a726'
205
+ }
206
+
207
+ def dump_image(self, line):
208
+ os.makedirs(self.img_root, exist_ok=True)
209
+
210
+ tgt_path_z = []
211
+ if isinstance(line['image'], list):
212
+ for i in range(len(line['image'])):
213
+ tgt_path = osp.join(self.img_root, f"{line['index']}--{i + 1}.jpg")
214
+ if not read_ok(tgt_path):
215
+ decode_base64_to_image_file(line['image'][i], tgt_path)
216
+ tgt_path_z.append(tgt_path)
217
+ else:
218
+ tgt_path = osp.join(self.img_root, f"{line['index']}.jpg")
219
+ if not read_ok(tgt_path):
220
+ decode_base64_to_image_file(line['image'], tgt_path)
221
+ tgt_path_z.append(tgt_path)
222
+ return tgt_path_z
223
+
224
+ @classmethod
225
+ def evaluate(self, eval_file, **judge_kwargs):
226
+
227
+ result_file = get_intermediate_file_path(eval_file, '_acc', 'csv')
228
+
229
+ if not osp.exists(result_file):
230
+ data = load(eval_file)
231
+ assert 'answer' in data and 'prediction' in data
232
+ data['prediction'] = [str(x) for x in data['prediction']]
233
+ data['answer'] = [str(x) for x in data['answer']]
234
+
235
+ correct_count = 0
236
+ correct_category = {
237
+ '技术与工程': [0, 0],
238
+ '科学': [0, 0],
239
+ '健康与医学': [0, 0],
240
+ '商业': [0, 0],
241
+ '艺术与设计': [0, 0],
242
+ '人文社会科学': [0, 0],
243
+ }
244
+
245
+ for i in tqdm(data.iterrows()):
246
+ line = i[1]
247
+ correct_category[line['category']][0] += 1
248
+
249
+ # Options
250
+ if line['type'] == '选择':
251
+ index2ans = {
252
+ 'A': line['option1'],
253
+ 'B': line['option2'],
254
+ 'C': line['option3'],
255
+ 'D': line['option4']
256
+ }
257
+ fact_option = get_multi_choice_prediction(line['prediction'], ['A', 'B', 'C', 'D'], index2ans)
258
+ if fact_option == line['answer']:
259
+ correct_count += 1
260
+ correct_category[line['category']][1] += 1
261
+
262
+ # Binary
263
+ elif line['type'] == '判断':
264
+ positive_keywords = ['正确', '对', '准确', '肯定', '对的']
265
+ negative_keywords = ['不对', '错误', '不正确', '不准确', '不合适', '否定', '错的', '错']
266
+ ambiguous_keywords = ['对错', '是否正确', '否正确', '或者', '是否', '正确性', '对不']
267
+
268
+ def judge_similarity(pred_list, positive_keywords, negative_keywords):
269
+ positive_count = 0
270
+ negative_count = 0
271
+
272
+ for pred in pred_list:
273
+ if any(pos_word in pred for pos_word in positive_keywords):
274
+ positive_count += 1
275
+ elif any(neg_word in pred for neg_word in negative_keywords):
276
+ negative_count += 1
277
+
278
+ if positive_count > negative_count:
279
+ return "对"
280
+ elif negative_count > positive_count:
281
+ return "错"
282
+ else:
283
+ return random.choice(['对', '错'])
284
+
285
+ answer = get_TF_prediction(line['prediction'])
286
+ answer = [word for word in answer if not any(ambiguous in word for ambiguous in ambiguous_keywords)]
287
+ fact_answer = judge_similarity(answer, positive_keywords, negative_keywords)
288
+ if fact_answer == line['answer']:
289
+ correct_count += 1
290
+ correct_category[line['category']][1] += 1
291
+
292
+ # Fill the Blank
293
+ else:
294
+ norm_answers = normalize_str(line['answer'], line['answer'])
295
+ predicted_answer = get_fill_blank_prediction(line['prediction'], line['answer'])
296
+
297
+ for pred in predicted_answer:
298
+ # already normalized
299
+ if isinstance(pred, str): # if it's a string, then find if ans in the pred_i
300
+ for norm_ans in norm_answers:
301
+ # only see if the string answer in the string pred
302
+ # print(norm_ans, pred)
303
+ if isinstance(norm_ans, str) and norm_ans in pred:
304
+ correct_count += 1
305
+ correct_category[line['category']][1] += 1
306
+ else: # it's a number
307
+ if pred in norm_answers:
308
+ correct_count += 1
309
+ correct_category[line['category']][1] += 1
310
+
311
+ accuracyz = {}
312
+ accuracyz['总准确率'] = correct_count / len(data)
313
+ for i in correct_category.keys():
314
+ accuracyz[i] = correct_category[i][1] / correct_category[i][0]
315
+
316
+ accuracyz = d2df(accuracyz)
317
+ accuracyz.round(10)
318
+ dump(accuracyz, result_file)
319
+
320
+ result = pd.read_csv(result_file)
321
+ return result
322
+
323
+ def build_prompt(self, line):
324
+ if line['type'] == '选择':
325
+ tgt_path = self.dump_image(line)
326
+ question = line['question']
327
+ options_prompt = 'Options:\n'
328
+
329
+ for i in [['A', '1'], ['B', '2'], ['C', '3'], ['D', '4']]:
330
+ options_prompt += i[0] + '. ' + line['option' + i[1]] + '\n'
331
+
332
+ prompt = (f'问题: {question}\n' + options_prompt
333
+ + '请回答上述多项选择题,并选出正确选项。这些题目可能包括单选和多选题型。如果所提供的信息不足以确定一个明确的答案,那么请根据可用的数据和你的判断来选择最可能正确的选项。')
334
+
335
+ msgs = []
336
+ if isinstance(tgt_path, list):
337
+ msgs.extend([dict(type='image', value=p) for p in tgt_path])
338
+ else:
339
+ msgs = [dict(type='image', value=tgt_path)]
340
+ msgs.append(dict(type='text', value=prompt))
341
+
342
+ return msgs
343
+
344
+ elif line['type'] == '判断':
345
+ msgs = super().build_prompt(line)
346
+ assert msgs[-1]['type'] == 'text'
347
+ msgs[-1]['value'] += '\n请回答上述判断题,并根据题目描述和所给的信息来判断问题中陈述的对错。如果信息不完整或不足以作出绝对判断,请运用你的逻辑推理和现有信息来做出最可能的判断。'
348
+ return msgs
349
+
350
+ else:
351
+ msgs = super().build_prompt(line)
352
+ assert msgs[-1]['type'] == 'text'
353
+ msgs[-1]['value'] += '\n请回答上述填空题,并根据题目的要求和所提供的信息来给出最恰当的答案。如果信息不足以确切回答,那么请依据现有的数据和你的推理能力来填写最合理的答案。'
354
+ return msgs
VLMEvalKit-sudoku/vlmeval/dataset/creation.py ADDED
@@ -0,0 +1,741 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # flake8: noqa
2
+ from .image_base import ImageBaseDataset
3
+ import numpy as np
4
+ import pandas as pd
5
+ from ..smp import *
6
+ from ..smp.file import get_intermediate_file_path
7
+ from .utils import build_judge, DEBUG_MESSAGE
8
+ from ..utils import track_progress_rich
9
+ import re
10
+
11
+ prompt_dict = {}
12
+ prompt_dict['LiveMMBench_Creation'] = {
13
+ # Subjective Judge [GPT-4o reference]
14
+ 'subjective':"""
15
+ Please act as an impartial judge and evaluate the quality of two responses provided by AI assistants to the user prompt.
16
+
17
+ Your task is to carefully assess two responses based on provided instructions and evaluation criteria. After evaluating both responses, determine which response features better quality and better meets the criteria. If both responses are similar or nearly identical in quality, you should indicate a tie. Avoid position bias toward the first or second response.
18
+
19
+ Suggested Steps for Evaluation:
20
+ 1. Review both responses independently and then carefully compare their strengths and weaknesses. A good response should feature good language quality, follow the user instruction and meet as many criteria as possible.
21
+ 2. After completing the first evaluation, swap the positions of response A and B and repeat Step 1 and get the 2nd evaluation outcome. This helps to mitigate the potential position bias.
22
+ 3. After completing both evaluations (in the original and reversed order), combine your analysis and provide a final conclusion based on the overall assessment. If both responses are relatively similar, or the differences are minimal and hard to distinguish, your conclusion should indicate a tie ([[A=B]]).
23
+
24
+ Your **conclusion** should be one of the following options (A, B are of the original order):
25
+ 1. [[A>>B]]: Response A is clearly better than Response B.
26
+ 2. [[A>B]]: Response A is slightly better than Response B.
27
+ 3. [[A=B]]: Response A is nearly identical to Response B.
28
+ 4. [[B>A]]: Response B is slightly better than Response A.
29
+ 5. [[B>>A]]: Response B is clearly better than Response A.
30
+
31
+ User Instruction:\n[INSTRUCTIONS]\n{instructions}\n[END INSTRUCTIONS]\n\n
32
+ Repsonse A:\n[RESPONSE A]\n{reference_answer_by_gpt4o}\n[END RESPONSE A]\n\n
33
+ Response B:\n[RESPONSE B]\n{prediction}\n[END RESPONSE B]\n\n
34
+ Evaluation Criteria:\n[CRITERIA]\n{criteria}\n[END CRITERIA]\n\n
35
+
36
+ Your output should include:
37
+ 1. Conclusion: Your final conclusion based on the overall assessment.
38
+ 2. Reasoning: Your reasoning process and analysis of the two responses.
39
+
40
+ Your output should follow the following format (CONCLUSION should be one of the five options: A>>B, A>B, A=B, B>A, B>>A):
41
+
42
+ Final Conclusion: [[CONCLUSION]]
43
+ Reasoning Process: [REASONING]\n
44
+ """,
45
+
46
+ # Criteria Alignment w/o GT
47
+ 'objective_without_gt':"""
48
+ Please act as an impartial judge and evaluate the **Criteria Alignment** of the two responses provided by AI assistants to the user prompt. The responses were generated based on the provided instructions and visual input from images.
49
+
50
+ Suggested Steps for Evaluation:
51
+ 1. Evaluate **Criteria Alignment** of both responses based on the criteria.
52
+ • If a criterion consist of **X aspects**, each aspect is worth **10 / X points**.
53
+ • For each aspect, there may be multiple sub-criteria. If there are **Y sub-criteria for the aspect**, each sub-criterion worths **10 / (X * Y) points**.
54
+ 2. Assign a total score out of 10 for each response.
55
+
56
+ User Instruction:\n[INSTRUCTIONS]\n{instructions}\n[END INSTRUCTIONS]\n\n
57
+ Repsonse A:\n[RESPONSE A]\n{reference_answer_by_gpt4o}\n[END RESPONSE A]\n\n
58
+ Response B:\n[RESPONSE B]\n{prediction}\n[END RESPONSE B]\n\n
59
+ Criteria:\n[CRITERIA]\n{criteria}\n[END CRITERIA]\n\n
60
+
61
+ Your output should evaluate alignment scores of each response and end with a conclusion in the following format (The full score is 10. X, Y are alignment scores for Response A and B):
62
+
63
+ Response A Alignment Score: X/10
64
+ Response B Alignment Score: Y/10\n
65
+ """,
66
+
67
+ # Criteria Alignment w. GT
68
+ 'objective_with_gt':"""
69
+ Please act as an impartial judge and evaluate the **Criteria Alignment** of the two responses provided by AI assistants to the user prompt. The responses were generated based on the provided instructions and visual input from images. There is also a ground truth corresponding to the instructions provided for reference.
70
+ Take this context into account when making your judgment.
71
+
72
+ Steps for Evaluation:
73
+ 1. Evaluate **Criteria Alignment** of both responses based on the criteria and the ground truth.
74
+ • If a criterion consist of **X aspects**, each aspect is worth **10 / X points**.
75
+ • For each aspect, there may be multiple sub-criteria. If there are **Y sub-criteria for the aspect**, each sub-criterion worths **10 / (X * Y) points**.
76
+ 2. Assign a total score out of 10 for each response.
77
+
78
+ User Instruction:\n[INSTRUCTIONS]\n{instructions}\n[END INSTRUCTIONS]\n\n
79
+ Ground Truth:\n[GROUND TRUTH]\n{groundtruth}\n[END GROUND TRUTH]\n\n
80
+ Repsonse A:\n[RESPONSE A]\n{reference_answer_by_gpt4o}\n[END RESPONSE A]\n\n
81
+ Response B:\n[RESPONSE B]\n{prediction}\n[END RESPONSE B]\n\n
82
+ Criteria:\n[CRITERIA]\n{criteria}\n[END CRITERIA]\n\n
83
+
84
+ Your output should evaluate alignment scores of each response and end with a conclusion in the following format (The full score is 10. X, Y are alignment scores for Response A and B):
85
+
86
+ Response A Alignment Score: X/10
87
+ Response B Alignment Score: Y/10\n
88
+ """,
89
+ }
90
+
91
+ prompt_dict['Creation_MMBench'] = {
92
+ # Subjective Judge [GPT-4o reference, with image]
93
+ 'subjective':"""
94
+ Please act as an impartial judge and evaluate the quality of the responses provided by two AI assistants to the user prompt below, considering both the provided criteria and the image.
95
+
96
+ Your task is to carefully assess each response based on how well it meets the evaluation criteria, incorporating the visual context from the image. The criteria should be the primary basis for your judgment, with the image serving to complement and inform your analysis.
97
+
98
+ Steps for Evaluation:
99
+ 1. Review Both Responses Independently:
100
+ Carefully analyze Assistant A’s and Assistant B’s responses with the criteria and the image. Do not assume any response is better just because it is listed first. Each response should be independently assessed based on the criteria and aided by images to help understand the context.
101
+
102
+ 2. Compare the Strengths and Weaknesses:
103
+ After evaluating each response independently, compare the two. Consider both the quality of the content and how closely it aligns with the criteria and image. Identify the strengths and weaknesses of each response, and highlight the key differences.
104
+
105
+ 3. Ensure Fairness:
106
+ To avoid positional bias, swap the positions of Assistant A and Assistant B after the first evaluation (i.e., make Assistant A become Assistant B and vice versa) and repeat the analysis and comparison. This ensures that each response is evaluated impartially under the same criteria.
107
+
108
+ 4. Provide a Conclusion Based on Both Evaluations:
109
+ After completing both evaluations (original and swapped positions), combine your analysis to provide a final verdict. If the responses are similar, with only minimal differences, your judgment should reflect that and indicate a tie.
110
+
111
+ Possible Verdict Options:
112
+
113
+ • If Assistant A is clearly better in both evaluations: [[A>>B]]
114
+ • If Assistant A is slightly better in both evaluations: [[A>B]]
115
+ • If both responses are nearly identical, showing minimal differences and no clear advantage: [[A=B]]
116
+ • If Assistant B is slightly better in both evaluations: [[B>A]]
117
+ • If Assistant B is clearly better in both evaluations: [[B>>A]]
118
+
119
+ Instructions to the AI Assistants:
120
+
121
+ [INSTRUCTIONS]
122
+ {instructions}
123
+ [END INSTRUCTIONS]
124
+
125
+ Assistant A Response:
126
+
127
+ [ASSISTANT A]
128
+ {reference_answer_by_gpt4o}
129
+ [END ASSISTANT A]
130
+
131
+ Evaluation Criteria:
132
+
133
+ [CRITERIA]
134
+ {criteria}
135
+ [END CRITERIA]
136
+
137
+ Assistant B Response:
138
+
139
+ [ASSISTANT B]
140
+ {prediction}
141
+ [END ASSISTANT B]
142
+
143
+ Output Format:
144
+
145
+ Your output should include:
146
+ 1. Evaluation of Assistant A’s Response: Provide a detailed qualitative evaluation, focusing on how well Assistant A’s response aligns with the criteria and the image.
147
+ 2. Evaluation of Assistant B’s Response: Provide a detailed qualitative evaluation, focusing on how well Assistant B’s response aligns with the criteria and the image.
148
+ 3. Final Verdict: After considering both evaluations, select one of the following verdicts and justify it based on your analysis:
149
+
150
+ Your output format should end like this:
151
+ Assistant A Evaluation: [qualitative comment]
152
+ Assistant B Evaluation: [qualitative comment]
153
+ Final Verdict is: [[VERDICT]]
154
+ """,
155
+
156
+ ##### For Visual Factuality
157
+ 'objective_without_gt':"""
158
+ Please act as an impartial judge and evaluate the **Visual Factuality** of the responses provided by two AI assistants to the user prompt displayed below.
159
+
160
+ The responses were generated based on the provided instructions and visual input from images. Take this context into account when making your judgment.
161
+
162
+ Steps for Evaluation:
163
+ 1. Evaluate visual factuality for both responses based on the visual factuality criteria.
164
+ • If the visual factuality criteria consist of **X aspects**, each aspect is worth **10/X points**.
165
+ • For each aspect, there may be multiple small criteria. If there are **Y small criteria in one aspect**, each small criterion is worth **10/X/Y points**.
166
+ 2. Assign a total score out of 10 for each response.
167
+
168
+ Instructions to the AI assistants:
169
+ [INSTRUCTIONS]
170
+ {instructions}
171
+ [END INSTRUCTIONS]
172
+
173
+ Assistant A response:
174
+ [ASSISTANT A]
175
+ {reference_answer_by_gpt4o}
176
+ [END ASSISTANT A]
177
+
178
+ Visual Factuality Criteria:
179
+ [VISUAL FACTUALITY CRITERIA]
180
+ {criteria}
181
+ [END CRITERIA]
182
+
183
+ Assistant B response:
184
+ [ASSISTANT B]
185
+ {prediction}
186
+ [END ASSISTANT B]
187
+
188
+ Your output should evaluate visual factuality scores for each assistant and end like this:
189
+
190
+ Response A Visual Factuality Score: X/10
191
+ Response B Visual Factuality Score: Y/10
192
+ """,
193
+
194
+ 'objective_with_gt':"""
195
+ Please act as an impartial judge and evaluate the **Visual Factuality** of the responses provided by two AI assistants to the user prompt displayed below.
196
+
197
+ The responses were generated based on the provided instructions and visual input from images.
198
+ There is a provided ground truth for the instructions, but the ground truth was not given to the AI assistants when generating their responses.
199
+ Take this context into account when making your judgment.
200
+
201
+ Steps for Evaluation:
202
+ 1. Evaluate visual factuality for both responses based on the provided ground truth and visual factuality criteria.
203
+ • If the visual factuality criteria consist of **X aspects**, each aspect is worth **10/X points**.
204
+ • For each aspect, there may be multiple small criteria. If there are **Y small criteria in one aspect**, each small criterion is worth **10/X/Y points**.
205
+ 2. Assign a total score out of 10 for each response.
206
+
207
+ Instructions to the AI assistants:
208
+ [INSTRUCTIONS]
209
+ {instructions}
210
+ [END INSTRUCTIONS]
211
+
212
+ Assistant A response:
213
+ [ASSISTANT A]
214
+ {reference_answer_by_gpt4o}
215
+ [END ASSISTANT A]
216
+
217
+ Visual Factuality Criteria:
218
+ [VISUAL FACTUALITY CRITERIA]
219
+ {criteria}
220
+ [END CRITERIA]
221
+
222
+ Assistant B response:
223
+ [ASSISTANT B]
224
+ {prediction}
225
+ [END ASSISTANT B]
226
+
227
+ Ground truth:
228
+ [GROUND TRUTH]
229
+ {groundtruth}
230
+ [END GROUND TRUTH]
231
+
232
+ Your output should evaluate visual factuality scores for each assistant and end like this:
233
+
234
+ Response A Visual Factuality Score: X/10
235
+ Response B Visual Factuality Score: Y/10
236
+ """,
237
+ }
238
+
239
+ creation_mmbench_category_dict = {
240
+ 'CATEGORY_Literary_Writing': [
241
+ 'story_continue',
242
+ 'landscape_to_poem',
243
+ 'historical_story_creation',
244
+ 'story_novel_creation',
245
+ 'prose_writing_scenery',
246
+ 'art_inspired_prose',
247
+ 'daily_conversation_creation',
248
+ 'children_book_illustration_dialogue_creation'
249
+ ],
250
+ 'CATEGORY_Common_Functionality_Writing':[
251
+ 'ins_simple_daily_copywriter',
252
+ 'travel_journal',
253
+ 'short_video_scripts_for_social_media',
254
+ 'social_media_travel_content',
255
+ 'daily_achievement_show_off',
256
+ 'scientific_research_simple_promotion',
257
+ 'twitter_comment_on_daily_news',
258
+ 'personal_event_summaries',
259
+ 'daily_affairs_inquiries',
260
+ 'business_collaborative_email_writing',
261
+ 'daily_emotional_email_writing',
262
+ 'letter_of_complaint',
263
+ 'daily_invitation_email_writing',
264
+ 'holiday_card_writing',
265
+ 'letter_of_application',
266
+ 'product_usage_experience_review',
267
+ 'store_experience_review',
268
+ 'public_welfare_activity_participation_initiative'
269
+ ],
270
+ 'CATEGORY_Professional_Functionality_Writing': [
271
+ 'museum_guide_word_creation',
272
+ 'recipe_infer_and_guide',
273
+ 'landscape_introduction',
274
+ 'drafting_announcements_for_public_spaces',
275
+ 'floor_plan_renovation_design',
276
+ 'teaching_plan',
277
+ 'nutritional_formulation_of_recipe',
278
+ 'clothing_match_design',
279
+ 'software_engineering_diagram_explanation',
280
+ 'event_planning_and_venue_arrangement',
281
+ 'ui_design_analysis_and_optimization',
282
+ 'attraction_promotional_words',
283
+ 'product_marketing_strategy',
284
+ 'script_writing_for_product_advertisement_promotional_video',
285
+ 'residence_reasoning',
286
+ 'scientific_diagram_understanding',
287
+ 'pulitzer_prize_judge',
288
+ 'architecture_appreciation',
289
+ 'company_team_amuse_broadcast'
290
+ ],
291
+ 'CATEGORY_Creative_Multimodal_Understanding': [
292
+ 'travel_itinerary_planning_and_recommendations',
293
+ 'photography_appreciation',
294
+ 'meme_explanation',
295
+ 'advertisement_explanation',
296
+ 'document_understanding',
297
+ 'snapshot_analysis'
298
+ ]
299
+
300
+ }
301
+
302
+ def is_criteria_valid(criteria):
303
+ import re
304
+ for value in criteria.values():
305
+ if value == '\\' or value == '' or not re.search('[a-zA-Z]', value):
306
+ return False
307
+ return True
308
+
309
+ key_mapping = {
310
+ "sub_parse_ok": "preference_parse_ok",
311
+ "sub_dist": "preference_dist",
312
+ "win_rate": "win_rate",
313
+ "sub_reward": "reward",
314
+ "obj_parse_ok": "visual_factuality_parse_ok",
315
+ "obj_score": "visual_factuality_score",
316
+ "obj_ref_score": "visual_factuality_ref_score"
317
+ }
318
+
319
+ def rename_keys(data, key_mapping):
320
+ if isinstance(data, dict):
321
+ new_data = {}
322
+ for key, value in data.items():
323
+ new_key = key_mapping.get(key, key)
324
+ new_data[new_key] = rename_keys(value, key_mapping)
325
+ return new_data
326
+ elif isinstance(data, list):
327
+ return [rename_keys(item, key_mapping) for item in data]
328
+ else:
329
+ return data
330
+
331
+
332
+ def build_prompt(line, dataset_name):
333
+ try:
334
+ criteria = eval(line['criteria'])
335
+ except:
336
+ criteria = line['criteria']
337
+
338
+ if isinstance(criteria, dict):
339
+ new_criteria = {}
340
+ for k in criteria:
341
+ if 'subjective' in k.lower():
342
+ new_criteria['subjective'] = criteria[k]
343
+ else:
344
+ new_criteria['objective'] = criteria[k]
345
+ else:
346
+ assert isinstance(criteria, str)
347
+ new_criteria = {'subjective': criteria}
348
+ criteria = new_criteria
349
+ assert 'subjective' in criteria, 'No subjective criteria found in the criteria dict'
350
+
351
+ prompts = {}
352
+ if listinstr(['Creation_MMBench'], dataset_name):
353
+ dataset_name = 'Creation_MMBench'
354
+ prompts['subjective'] = prompt_dict[dataset_name]['subjective'].format(
355
+ instructions=line['question'],
356
+ criteria=criteria['subjective'],
357
+ reference_answer_by_gpt4o=line['reference_answer_by_gpt4o'],
358
+ prediction=line['prediction']
359
+ )
360
+ if 'objective' in criteria:
361
+ if 'ground_truth' in line and (not pd.isna(line['ground_truth'])) and line['ground_truth'] != '':
362
+ prompts['objective'] = prompt_dict[dataset_name]['objective_with_gt'].format(
363
+ instructions=line['question'],
364
+ criteria=criteria['objective'],
365
+ groundtruth=line['ground_truth'],
366
+ reference_answer_by_gpt4o=line['reference_answer_by_gpt4o'],
367
+ prediction=line['prediction'])
368
+ else:
369
+ prompts['objective'] = prompt_dict[dataset_name]['objective_without_gt'].format(
370
+ instructions=line['question'],
371
+ criteria=criteria['objective'],
372
+ reference_answer_by_gpt4o=line['reference_answer_by_gpt4o'],
373
+ prediction=line['prediction'])
374
+ return prompts
375
+
376
+
377
+ def Generate_Creation_MMBench_judge(model, image_list, prompt):
378
+ assert isinstance(prompt, dict)
379
+ response = {}
380
+ for key in prompt.keys():
381
+ if image_list and key == 'subjective':
382
+ input_msg = []
383
+ for img_path in image_list:
384
+ if read_ok(img_path):
385
+ input_msg.append({'type': 'image', 'value': img_path})
386
+ else:
387
+ raise ValueError(f"Image not found: {img_path}")
388
+ input_msg.append({'type': 'text', 'value': prompt[key]})
389
+ # print(f'using image {image_list} and text')
390
+ response[key] = model.generate(input_msg)
391
+ else:
392
+ response[key] = model.generate(prompt[key])
393
+ return response
394
+
395
+
396
+ def extract_subjective(inp, dataset_name):
397
+ mapping_dict = {
398
+ 'LiveMMBench_Creation': 'FINAL CONCLUSION:',
399
+ 'Creation_MMBench': 'FINAL VERDICT IS:'
400
+ }
401
+ cands = {
402
+ 'A>>B', 'A>B', 'A=B', 'B>A', 'B>>A',
403
+ 'B<<A', 'B<A', 'B=A', 'A<B', 'A<<B'
404
+ }
405
+
406
+ lines = inp.split('\n')
407
+ for line in lines:
408
+ line_upper = line.upper()
409
+ if mapping_dict[dataset_name] in line_upper:
410
+
411
+ match = re.search(r'\[\[\s*(.*?)\s*\]\]', line_upper)
412
+ if match:
413
+ rem = match.group(1).replace(' ', '')
414
+ if rem in cands:
415
+ return rem
416
+ return None
417
+
418
+
419
+ def extract_objective(inp, dataset_name):
420
+ # Response A Alignment Score: X/10
421
+ mapping_dict = {
422
+ 'LiveMMBench_Creation': {
423
+ 'A': 'RESPONSE A ALIGNMENT SCORE:',
424
+ 'B': 'RESPONSE B ALIGNMENT SCORE:'
425
+ },
426
+ 'Creation_MMBench': {
427
+ 'A': 'RESPONSE A VISUAL FACTUALITY SCORE:',
428
+ 'B': 'RESPONSE B VISUAL FACTUALITY SCORE:'
429
+ },
430
+ }
431
+ if pd.isna(inp) or inp is None or inp == '':
432
+ return 'NO_OBJECTIVE'
433
+ lines = inp.split('\n')
434
+ a_score, b_score = None, None
435
+ for line in lines:
436
+ line = line.upper()
437
+ line = re.sub(r"[“”*]", "", line)
438
+ if line.startswith(mapping_dict[dataset_name]['A']):
439
+ rem = line.split(mapping_dict[dataset_name]['A'])[1].strip()
440
+ rem = rem.split('/')[0].strip()
441
+ try:
442
+ a_score = float(rem)
443
+ except:
444
+ continue
445
+ elif line.startswith(mapping_dict[dataset_name]['B']):
446
+ rem = line.split(mapping_dict[dataset_name]['B'])[1].strip()
447
+ rem = rem.split('/')[0].strip()
448
+ try:
449
+ b_score = float(rem)
450
+ except:
451
+ continue
452
+ if a_score is not None and b_score is not None and (0 <= a_score <= 10) and (0 <= b_score <= 10):
453
+ return f'{a_score}|{b_score}'
454
+ else:
455
+ return None
456
+
457
+
458
+ def Creation_MMBench_extract(judge_response_pkl, org_data, dataset_name):
459
+ import copy as cp
460
+ data = cp.deepcopy(org_data)
461
+ data['subjective_judge'] = [judge_response_pkl[idx]['subjective'] for idx in data['index']]
462
+ data['objective_judge'] = [judge_response_pkl[idx].get('objective', None) for idx in data['index']]
463
+ data['subjective_score'] = [extract_subjective(x, dataset_name) for x in data['subjective_judge']]
464
+ data['objective_score'] = [extract_objective(x, dataset_name) for x in data['objective_judge']]
465
+ return data
466
+
467
+
468
+ def get_dimension_rating(score_file_name, rev=False):
469
+ def get_pw_score(text):
470
+ if 'A<<B' in text or 'B>>A' in text:
471
+ return 2
472
+ elif 'A<B' in text or 'B>A' in text:
473
+ return 1
474
+ elif 'A=B' in text or 'B=A' in text:
475
+ return 0
476
+ elif 'A>B' in text or 'B<A' in text:
477
+ return -1
478
+ elif 'A>>B' in text or 'B<<A' in text:
479
+ return -2
480
+ else:
481
+ return None
482
+
483
+ score_file = load(score_file_name)
484
+ base_dict = {'sub_valid': 0, 'sub_missing': 0, 'sub_score': [], 'obj_valid': 0, 'obj_missing': 0, 'obj_ref_score': [], 'obj_score': []}
485
+ return_dict = {'overall': cp.deepcopy(base_dict)}
486
+
487
+ for idx, item in score_file.iterrows():
488
+ task_name = item['task_name']
489
+ if task_name not in return_dict.keys():
490
+ return_dict[task_name] = cp.deepcopy(base_dict)
491
+
492
+ if not pd.isna(item['subjective_score']):
493
+ for k in ['overall', task_name]:
494
+ return_dict[k]['sub_valid'] += 1
495
+ return_dict[k]['sub_score'].append(get_pw_score(item['subjective_score']))
496
+ else:
497
+ return_dict['overall']['sub_missing'] += 1
498
+ return_dict[task_name]['sub_missing'] += 1
499
+
500
+ if item['objective_score'] == 'NO_OBJECTIVE':
501
+ continue
502
+ elif not pd.isna(item['objective_score']):
503
+ score = item['objective_score']
504
+ assert '|' in score
505
+ ref_score, score = [float(x) for x in score.split('|')]
506
+ for k in ['overall', task_name]:
507
+ return_dict[k]['obj_valid'] += 1
508
+ return_dict[k]['obj_score'].append(score)
509
+ return_dict[k]['obj_ref_score'].append(ref_score)
510
+ else:
511
+ return_dict['overall']['obj_missing'] += 1
512
+ return_dict[task_name]['obj_missing'] += 1
513
+
514
+ final_res = {}
515
+
516
+ for k, v in return_dict.items():
517
+ res = {}
518
+ res['sub_parse_ok'] = v['sub_valid'] / (v['sub_valid'] + v['sub_missing'])
519
+ dist = defaultdict(lambda: 0)
520
+ for x in v['sub_score']:
521
+ dist[x] += 1
522
+ assert len(dist) <= 5 and sum(list(dist.values())) == v['sub_valid']
523
+ if v['sub_valid']:
524
+ res['sub_dist'] = {k: dist[k] / v['sub_valid'] for k in [-2, -1, 0, 1, 2]}
525
+ res['sub_reward'] = (-100 * dist[-2] - 50 * dist[-1] + 50 * dist[1] + 100 * dist[2]) / v['sub_valid']
526
+
527
+ if v['obj_valid'] + v['obj_missing']:
528
+ res['obj_parse_ok'] = v['obj_valid'] / (v['obj_valid'] + v['obj_missing'])
529
+ if v['obj_valid']:
530
+ res['obj_score'] = sum(v['obj_score']) / v['obj_valid']
531
+ res['obj_ref_score'] = sum(v['obj_ref_score']) / v['obj_valid']
532
+ final_res[k] = res
533
+
534
+ final_res['raw'] = return_dict
535
+ return final_res
536
+
537
+
538
+ def merge_dual(raw, raw_dual, dataset_name):
539
+ final_res = {}
540
+ category_raw = {}
541
+ for k, v in raw.items():
542
+ # merge dual: {'sub_valid': 0, 'sub_missing': 0, 'sub_score': [], 'obj_valid': 0, 'obj_missing': 0, 'obj_ref_score': [], 'obj_score': []}
543
+ dual_v = raw_dual[k]
544
+ v['sub_valid'] += dual_v['sub_valid']
545
+ v['sub_missing'] += dual_v['sub_missing']
546
+ v['sub_score'].extend([-x for x in dual_v['sub_score']])
547
+ v['obj_valid'] += dual_v['obj_valid']
548
+ v['obj_missing'] += dual_v['obj_missing']
549
+ v['obj_score'].extend(dual_v['obj_ref_score'])
550
+ v['obj_ref_score'].extend(dual_v['obj_score'])
551
+ raw[k] = v
552
+
553
+ res = {}
554
+ res['sub_parse_ok'] = v['sub_valid'] / (v['sub_valid'] + v['sub_missing'])
555
+ dist = defaultdict(lambda: 0)
556
+ for x in v['sub_score']:
557
+ dist[x] += 1
558
+ assert len(dist) <= 5 and sum(list(dist.values())) == v['sub_valid']
559
+ res['sub_dist'] = {k: dist[k] / v['sub_valid'] for k in [-2, -1, 0, 1, 2]}
560
+ res['win_rate'] = (dist[2] + dist[1]) / v['sub_valid'] * 100
561
+ res['sub_reward'] = (-100 * dist[-2] - 50 * dist[-1] + 50 * dist[1] + 100 * dist[2]) / v['sub_valid']
562
+
563
+ if v['obj_valid'] + v['obj_missing']:
564
+ res['obj_parse_ok'] = v['obj_valid'] / (v['obj_valid'] + v['obj_missing'])
565
+ if v['obj_valid']:
566
+ res['obj_score'] = sum(v['obj_score']) / v['obj_valid']
567
+ res['obj_ref_score'] = sum(v['obj_ref_score']) / v['obj_valid']
568
+ final_res[k] = res
569
+
570
+ if listinstr(['Creation_MMBench'], dataset_name):
571
+ pass_flag = False
572
+ for main_category_name, category_list in creation_mmbench_category_dict.items():
573
+ if k in creation_mmbench_category_dict.keys() or k == 'overall':
574
+ pass_flag = True
575
+ break
576
+ if k in category_list:
577
+ if main_category_name not in category_raw.keys():
578
+ category_raw[main_category_name] = {'sub_valid': 0, 'sub_missing': 0, 'sub_score': [], 'obj_valid': 0, 'obj_missing': 0, 'obj_ref_score': [], 'obj_score': []}
579
+ category_raw[main_category_name]['sub_valid'] += v['sub_valid']
580
+ category_raw[main_category_name]['sub_missing'] += v['sub_missing']
581
+ category_raw[main_category_name]['sub_score'].extend(v['sub_score'])
582
+ category_raw[main_category_name]['obj_valid'] += v['obj_valid']
583
+ category_raw[main_category_name]['obj_missing'] += v['obj_missing']
584
+ category_raw[main_category_name]['obj_score'].extend(v['obj_score'])
585
+ category_raw[main_category_name]['obj_ref_score'].extend(v['obj_ref_score'])
586
+ pass_flag = True
587
+ break
588
+ if not pass_flag:
589
+ raise Exception(f"Error: {k} not found in type_dict")
590
+
591
+ for k, v in category_raw.items():
592
+ res = {}
593
+ res['sub_parse_ok'] = v['sub_valid'] / (v['sub_valid'] + v['sub_missing'])
594
+ dist = defaultdict(lambda: 0)
595
+ for x in v['sub_score']:
596
+ dist[x] += 1
597
+ assert len(dist) <= 5 and sum(list(dist.values())) == v['sub_valid']
598
+ res['sub_dist'] = {k: dist[k] / v['sub_valid'] for k in [-2, -1, 0, 1, 2]}
599
+ res['win_rate'] = (dist[2] + dist[1]) / v['sub_valid'] * 100
600
+ res['sub_reward'] = (-100 * dist[-2] - 50 * dist[-1] + 50 * dist[1] + 100 * dist[2]) / v['sub_valid']
601
+
602
+ if v['obj_valid'] + v['obj_missing']:
603
+ res['obj_parse_ok'] = v['obj_valid'] / (v['obj_valid'] + v['obj_missing'])
604
+ if v['obj_valid']:
605
+ res['obj_score'] = sum(v['obj_score']) / v['obj_valid']
606
+ res['obj_ref_score'] = sum(v['obj_ref_score']) / v['obj_valid']
607
+ final_res[k] = res
608
+
609
+ final_res['raw'] = raw
610
+ final_res['category_raw'] = category_raw
611
+ if listinstr(['Creation_MMBench'], dataset_name):
612
+ final_res = rename_keys(final_res, key_mapping)
613
+ return final_res
614
+
615
+
616
+ class CreationMMBenchDataset(ImageBaseDataset):
617
+
618
+ TYPE = 'CreationVQA'
619
+ DATASET_URL = {
620
+ 'LiveMMBench_Creation': '',
621
+ 'Creation_MMBench': 'https://opencompass.openxlab.space/utils/VLMEval/Creation_MMBench.tsv'
622
+ }
623
+ DATASET_MD5 = {
624
+ 'Creation_MMBench':'870c0332a9c6a169d0ac9b8574c245fe'
625
+ }
626
+
627
+ # It returns a dictionary
628
+ def dump_image(self, line):
629
+ os.makedirs(self.img_root, exist_ok=True)
630
+
631
+ if 'image' in line:
632
+ if isinstance(line['image'], list):
633
+ tgt_path = []
634
+ assert 'image_path' in line
635
+ for img, im_name in zip(line['image'], line['image_path']):
636
+ path = osp.join(self.img_root, im_name)
637
+ if not read_ok(path):
638
+ decode_base64_to_image_file(img, path)
639
+ tgt_path.append(path)
640
+ else:
641
+ if 'image_path' in line:
642
+ assert isinstance(line['image_path'], str) or (isinstance(line['image_path'], list) and len(line['image_path']) == 1)
643
+ if isinstance(line['image_path'], list):
644
+ line['image_path'] = line['image_path'][0]
645
+ tgt_path = osp.join(self.img_root, line['image_path'])
646
+ else:
647
+ tgt_path = osp.join(self.img_root, f"{line['index']}.jpg")
648
+ if not read_ok(tgt_path):
649
+ decode_base64_to_image_file(line['image'], tgt_path)
650
+ tgt_path = [tgt_path]
651
+ else:
652
+ assert 'image_path' in line
653
+ tgt_path = toliststr(line['image_path'])
654
+
655
+ return tgt_path
656
+
657
+ def evaluate(self, eval_file, **judge_kwargs):
658
+ rating_rev = None
659
+ dual_eval = judge_kwargs.pop('dual_eval', True)
660
+ if dual_eval:
661
+ print('Dual Evaluation Strategy is enabled.')
662
+ src = load(eval_file)
663
+ tgt = load(eval_file)
664
+ tgt['reference_answer_by_gpt4o'] = src['prediction']
665
+ tgt['prediction'] = src['reference_answer_by_gpt4o']
666
+ tgt_file_name = get_intermediate_file_path(eval_file, '_rev')
667
+ dump(tgt, tgt_file_name)
668
+ judge_kwargs['dual_eval'] = False
669
+ rating_rev = self.evaluate(tgt_file_name, **judge_kwargs)
670
+ judge_kwargs.pop('dual_eval', None)
671
+
672
+ score_file = get_intermediate_file_path(eval_file, '_score')
673
+ tgt_file = get_intermediate_file_path(eval_file, '_rating', 'json')
674
+
675
+ model = judge_kwargs.pop('model', 'gpt-4o-0806')
676
+ model_name = model.split('/')[-1] if '/' in model else model
677
+ tmp_file = get_intermediate_file_path(eval_file, f'_{model_name}', 'pkl')
678
+
679
+ nproc = judge_kwargs.pop('nproc', 4)
680
+
681
+ if not osp.exists(score_file):
682
+ data = load(eval_file)
683
+ lt = len(data)
684
+ lines = [data.iloc[i] for i in range(len(data))]
685
+ judge_kwargs['max_tokens'] = 4096
686
+
687
+ model = build_judge(model=model, **judge_kwargs)
688
+ assert model.working(), ('CreationMMBench evaluation requires a working OPENAI API\n' + DEBUG_MESSAGE)
689
+
690
+ prompts = [build_prompt(line, self.dataset_name) for line in lines]
691
+
692
+ indices = [line['index'] for line in lines]
693
+
694
+ if listinstr(['Creation_MMBench'], self.dataset_name):
695
+ no_relative_image_list = [self.dump_image(line) for idx, line in self.data.iterrows()]
696
+ assert len(no_relative_image_list) == len(lines)
697
+ image_list = []
698
+ for subimage_list in no_relative_image_list:
699
+ sublist = []
700
+ for image_path in subimage_list:
701
+ image_path = osp.join(self.img_root, image_path)
702
+ assert osp.exists(image_path), f"Image not found: {image_path}"
703
+ sublist.append(image_path)
704
+ image_list.append(sublist)
705
+ else:
706
+ image_list = [[] for _ in range(len(lines))]
707
+ tups = [(model, image, prompt) for prompt, image in zip(prompts, image_list)]
708
+
709
+ ans = {}
710
+ if osp.exists(tmp_file):
711
+ ans = load(tmp_file)
712
+ ans = {k: v for k, v in ans.items() if model.fail_msg not in str(v)}
713
+ tups = [x for x, i in zip(tups, indices) if i not in ans]
714
+ indices = [i for i in indices if i not in ans]
715
+
716
+ if len(indices):
717
+ _ = track_progress_rich(
718
+ Generate_Creation_MMBench_judge,
719
+ tups,
720
+ nproc=nproc,
721
+ chunksize=nproc,
722
+ keys=indices,
723
+ save=tmp_file,
724
+ )
725
+ ans = load(tmp_file)
726
+ data = Creation_MMBench_extract(ans, data, self.dataset_name)
727
+ dump(data, score_file)
728
+
729
+ rating = get_dimension_rating(score_file)
730
+ dump(rating, tgt_file)
731
+
732
+ if dual_eval:
733
+ raw = rating['raw']
734
+ rev_tgt_file = tgt_file.replace('rating.json', 'rev_rating.json')
735
+ rev_raw = load(rev_tgt_file)['raw']
736
+ merged_rating = merge_dual(raw, rev_raw, self.dataset_name)
737
+ dump(merged_rating, tgt_file.replace('rating.json', 'merged_rating.json'))
738
+ print(f"Rating:\n{rating['overall']}\n\nDual Rating:\n{merged_rating['overall']}")
739
+ return merged_rating['overall']
740
+ else:
741
+ return rating['overall']
VLMEvalKit-sudoku/vlmeval/dataset/dude.py ADDED
@@ -0,0 +1,211 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ from typing import List
3
+
4
+ from .utils.judge_util import build_judge
5
+ from .image_base import ImageBaseDataset
6
+ from .mmlongbench import concat_images, MMLongBench_auxeval, anls_compute
7
+ from ..smp import *
8
+ from ..smp.file import get_intermediate_file_path
9
+
10
+
11
+ FAIL_MSG = 'Failed to obtain answer via API.'
12
+
13
+
14
+ def DUDE_acc(result_file):
15
+ data = load(result_file)
16
+ overall_score = 0.0
17
+ score_list = list()
18
+ for i in range(len(data)):
19
+ item = data.iloc[i]
20
+ if isinstance(item['answer'], float) and math.isnan(item['answer']):
21
+ item['answer'] = 'Not answerable'
22
+
23
+ item['answer'] = item['answer'].lower()
24
+ item['pred'] = item['pred'].lower()
25
+ score = anls_compute(item['answer'], item['pred'])
26
+ score_list.append(score)
27
+ overall_score += score
28
+
29
+ data['score'] = score_list
30
+ dump(data, result_file)
31
+
32
+ res = dict()
33
+ res['category'], res['num'], res['avg_score'] = ['anls'], [len(data)], [overall_score / len(data)]
34
+ res = pd.DataFrame(res)
35
+ return res
36
+
37
+
38
+ class DUDE(ImageBaseDataset):
39
+
40
+ TYPE = 'VQA'
41
+
42
+ DATASET_URL = {
43
+ 'DUDE': 'https://opencompass.openxlab.space/utils/VLMEval/DUDE.tsv',
44
+ 'DUDE_MINI': 'https://opencompass.openxlab.space/utils/VLMEval/DUDE_MINI.tsv',
45
+ }
46
+ DATASET_MD5 = {
47
+ 'DUDE': '130d860d08206e1e407cd77150c10d88',
48
+ 'DUDE_MINI': 'e0c0d998114f0cca7516d12039d2b538',
49
+ }
50
+
51
+ SUPPORTED_MODELS = {
52
+ 'GPT4': (1, 1),
53
+ 'GPT4V': (1, 1),
54
+ 'GPT4V_HIGH': (1, 1),
55
+ 'GPT4o': (1, 1),
56
+ 'GPT4o_HIGH': (1, 1),
57
+ 'GPT4o_MINI': (1, 1),
58
+ 'XComposer2d5': (1, -1),
59
+ 'XComposer2_4KHD': (1, -1),
60
+ 'MiniCPM-Llama3-V-2_5': (1, 5),
61
+ 'InternVL-Chat-V1-5': (5, 2),
62
+ }
63
+
64
+ def __init__(self, dataset, **kwargs):
65
+ self.model_list = list(self.SUPPORTED_MODELS.keys())
66
+ model_name = kwargs['model']
67
+ if not listinstr(self.model_list, model_name):
68
+ raise AssertionError("{} doesn't support the evaluation on DUDE.".format(model_name))
69
+ super(DUDE, self).__init__(dataset)
70
+
71
+ self.is_api = True if listinstr(['GPT4'], model_name) else False
72
+ self.max_pages = 120
73
+ concat_num, column_num = self.SUPPORTED_MODELS.get(model_name)
74
+ self.concat_num = concat_num
75
+ self.column_num = column_num
76
+
77
+ def prepare_tsv(self, url, file_md5=None):
78
+ data_root = LMUDataRoot()
79
+ os.makedirs(data_root, exist_ok=True)
80
+ file_name = url.split('/')[-1]
81
+ data_path = osp.join(data_root, file_name)
82
+ if osp.exists(data_path) and (file_md5 is None or md5(data_path) == file_md5):
83
+ pass
84
+ else:
85
+ warnings.warn('The dataset tsv is not downloaded')
86
+ download_file(url, data_path)
87
+ return load(data_path)
88
+
89
+ def dump_image(self, origin_line):
90
+ os.makedirs(self.img_root, exist_ok=True)
91
+ try:
92
+ import fitz
93
+ except Exception as e:
94
+ logging.critical(f'{type(e)}: {e}')
95
+ logging.critical('Please use `pip install pymupdf` to parse PDF files.')
96
+
97
+ line = origin_line.copy()
98
+ if not isinstance(line['image_path'], List):
99
+ line['image_path'] = [line['image_path']]
100
+ line['image_path'] = line['image_path'][:self.max_pages]
101
+ skip_pdf_parse = True
102
+ for im_name in line['image_path']:
103
+ path = osp.join(self.img_root, im_name)
104
+ if not read_ok(path):
105
+ skip_pdf_parse = False
106
+ break
107
+
108
+ # Just for being compatible with the zooped loop: zip(line['image'], line['image_path'])
109
+ if skip_pdf_parse:
110
+ line['image'] = line['image_path']
111
+ else:
112
+ pdf_data = base64.b64decode(line['image'])
113
+ pdf_file = io.BytesIO(pdf_data)
114
+ encoded_images = []
115
+ with fitz.open(stream=pdf_file, filetype='pdf') as doc:
116
+ doc = doc[:self.max_pages]
117
+ for page in doc:
118
+ image = page.get_pixmap(dpi=144)
119
+ image_file = io.BytesIO(image.tobytes(output='png'))
120
+ image = Image.open(image_file)
121
+ encoded_image = encode_image_to_base64(image)
122
+ encoded_images.append(encoded_image)
123
+ line['image'] = encoded_images
124
+ print('process {}'.format(line['doc_id']))
125
+
126
+ if 'image' in line:
127
+ if isinstance(line['image'], list):
128
+ tgt_path = []
129
+ assert 'image_path' in line
130
+ for img, im_name in zip(line['image'], line['image_path']):
131
+ path = osp.join(self.img_root, im_name)
132
+ if not read_ok(path):
133
+ decode_base64_to_image_file(img, path)
134
+ tgt_path.append(path)
135
+ else:
136
+ tgt_path = osp.join(self.img_root, f"{line['index']}.jpg")
137
+ if not read_ok(tgt_path):
138
+ decode_base64_to_image_file(line['image'], tgt_path)
139
+ tgt_path = [tgt_path]
140
+ else:
141
+ assert 'image_path' in line
142
+ tgt_path = toliststr(line['image_path'])
143
+
144
+ if self.concat_num > 0 and not self.is_api:
145
+ concatenated_images = concat_images(tgt_path, max_concat=self.concat_num, column_num=self.column_num)
146
+
147
+ old_tgt_path = tgt_path
148
+ assert isinstance(old_tgt_path, list)
149
+ if self.column_num != -1:
150
+ tgt_path = [
151
+ '_'.join(old_tgt_path[0].split('_')[:-1]) + '_concat{}_{}.jpg'.format(self.concat_num, i)
152
+ for i in range(len(concatenated_images))
153
+ ]
154
+ else:
155
+ tgt_path = ['_'.join(old_tgt_path[0].split('_')[:-1]) + '_concat_all.jpg']
156
+
157
+ for path, concatenated_image in zip(tgt_path, concatenated_images):
158
+ if not read_ok(path):
159
+ decode_base64_to_image_file(encode_image_to_base64(concatenated_image), path)
160
+ num_images, image_size = len(old_tgt_path), concatenated_image.size
161
+ print('concat {} images to a new one with size {}. save at {}'.format(num_images, image_size, path))
162
+ return tgt_path
163
+
164
+ @classmethod
165
+ def evaluate(self, eval_file, **judge_kwargs):
166
+ logger = get_logger('Evaluation')
167
+ model = judge_kwargs['model']
168
+
169
+ storage = get_intermediate_file_path(eval_file, f'_{model}')
170
+ tmp_file = get_intermediate_file_path(eval_file, f'_{model}', 'pkl')
171
+
172
+ if osp.exists(storage):
173
+ logger.warning(f'GPT scoring file {storage} already exists, will reuse it in DUDE_eval. ')
174
+ else:
175
+ data = load(eval_file)
176
+ model = build_judge(max_tokens=128, **judge_kwargs)
177
+ lt = len(data)
178
+ lines = [data.iloc[i] for i in range(lt)]
179
+ tups = [(model, line) for line in lines]
180
+ indices = [line['index'] for line in lines]
181
+
182
+ ans = {}
183
+ if osp.exists(tmp_file):
184
+ ans = load(tmp_file)
185
+ tups = [x for x, i in zip(tups, indices) if i not in ans]
186
+ indices = [i for i in indices if i not in ans]
187
+
188
+ if len(indices):
189
+ new_results = list()
190
+ for model, line in tqdm(tups):
191
+ res = MMLongBench_auxeval(model, line)
192
+ new_results.append(res)
193
+
194
+ log_map, res_map, pred_map = {}, {}, {}
195
+ all_inds = [line['index'] for line in lines]
196
+ for k, v in zip(all_inds, new_results):
197
+ log_map[k] = v['log']
198
+ res_map[k] = v['res']
199
+ pred_map[k] = v['pred']
200
+ data['res'] = [res_map[idx] for idx in data['index']]
201
+ data['log'] = [log_map[idx] for idx in data['index']]
202
+ data['pred'] = [pred_map[idx] for idx in data['index']]
203
+ dump(data, storage)
204
+
205
+ score = DUDE_acc(storage)
206
+ score_pth = get_intermediate_file_path(storage, '_score', 'csv')
207
+
208
+ dump(score, score_pth)
209
+ logger.info(f'DUDE successfully finished evaluating {eval_file}, results saved in {score_pth}')
210
+ logger.info('Score: ')
211
+ logger.info(score)
VLMEvalKit-sudoku/vlmeval/dataset/image_mcq.py ADDED
The diff for this file is too large to render. See raw diff
 
VLMEvalKit-sudoku/vlmeval/dataset/m4bench.py ADDED
@@ -0,0 +1,193 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import re
3
+ from tqdm import tqdm
4
+ import pandas as pd
5
+
6
+ from os import path as osp
7
+ from .image_base import ImageBaseDataset
8
+ from .utils import build_judge, DEBUG_MESSAGE
9
+ from ..smp import decode_base64_to_image_file, load, dump, get_intermediate_file_path
10
+ FAIL_MSG = 'Failed to obtain answer via API.'
11
+
12
+
13
+ class M4Bench(ImageBaseDataset):
14
+ """
15
+ Dataset class for M4Bench, handling single and dual image inputs.
16
+ """
17
+ TYPE = 'M4Bench'
18
+
19
+ DATASET_URL = {
20
+ "State_Invariance": "https://huggingface.co/datasets/Anonymous8976/M4Bench/resolve/main/State_Invariance.tsv", # noqa: E501
21
+ "State_Comparison": "https://huggingface.co/datasets/Anonymous8976/M4Bench/resolve/main/State_Comparison.tsv", # noqa: E501
22
+ "Spatial_Perception": "https://huggingface.co/datasets/Anonymous8976/M4Bench/resolve/main/Spatial_Perception.tsv", # noqa: E501
23
+ "Instance_Comparison": "https://huggingface.co/datasets/Anonymous8976/M4Bench/resolve/main/Instance_Comparison.tsv", # noqa: E501
24
+ "Detailed_Difference": "https://huggingface.co/datasets/Anonymous8976/M4Bench/resolve/main/Detailed_Difference.tsv" # noqa: E501
25
+ }
26
+
27
+ DATASET_MD5 = {
28
+ "State_Invariance": "ad9723d478d4696dfc3b18bcaeca89b6",
29
+ "State_Comparison": "41999997360a88e6e388b9a5438a45eb",
30
+ "Spatial_Perception": "7059e29d15ad4379b6f0c0f1801dafe5",
31
+ "Instance_Comparison": "9a7f282d0a092b617147a36693df3461",
32
+ "Detailed_Difference": "f1cd60c1c1144768cd978efce5ba93a8"
33
+ }
34
+
35
+ def build_prompt(self, line):
36
+ """
37
+ Builds a multimodal prompt for the given data line.
38
+ """
39
+ HF_HEADER = "https://huggingface.co/datasets/Anonymous8976/M4Bench/resolve/main/data/" # noqa: E501
40
+
41
+ if isinstance(line, int):
42
+ line = self.data.iloc[line]
43
+
44
+ image1_base64 = line.get('image1', '')
45
+ image2_base64 = line.get('image2', '')
46
+ image1_url = line.get('image1_path', '')
47
+ image2_url = line.get('image2_path', '')
48
+
49
+ msgs = []
50
+
51
+ if image1_base64 and image2_base64 and image1_url and image2_url:
52
+ image1_base_path = image1_url.replace(HF_HEADER, '')
53
+ image1_local_path = osp.join(self.img_root, image1_base_path)
54
+
55
+ image2_base_path = image2_url.replace(HF_HEADER, '')
56
+ image2_local_path = osp.join(self.img_root, image2_base_path)
57
+
58
+ if not osp.exists(image1_local_path) or not osp.exists(image2_local_path):
59
+ decode_base64_to_image_file(image1_base64, image1_local_path)
60
+ decode_base64_to_image_file(image2_base64, image2_local_path)
61
+
62
+ # If both images are in base64 format
63
+ msgs = [
64
+ dict(type='image', value=image1_local_path),
65
+ dict(type='image', value=image2_local_path)
66
+ ]
67
+ elif image1_url and image2_url:
68
+ # If both images are URLs
69
+ msgs = [
70
+ dict(type='image', value=image1_url),
71
+ dict(type='image', value=image2_url)
72
+ ]
73
+ else:
74
+ raise ValueError("Both images must be provided either as base64 or URLs.") # noqa: E501
75
+
76
+ query = line['query']
77
+
78
+ msgs.append(dict(type='text', value=query))
79
+ return msgs
80
+
81
+ def evaluate(self, eval_file, **judge_kwargs):
82
+ """
83
+ Evaluates the model predictions against the ground truth.
84
+ """
85
+ results_df = load(eval_file)
86
+
87
+ dataset_name = None
88
+ for name in self.DATASET_URL:
89
+ if name in eval_file:
90
+ dataset_name = name
91
+ break
92
+
93
+ if dataset_name is None:
94
+ raise ValueError(
95
+ f"Could not determine dataset name from eval_file path: {eval_file}") # noqa: E501
96
+
97
+ # # Load ground truth data
98
+ # gt_file = get_cache_path(self.DATASET_URL[dataset_name])
99
+ # gt_df = pd.read_csv(gt_file, sep='\t', on_bad_lines='warn')
100
+
101
+ # # Merge predictions with ground truth
102
+ df = results_df.copy()
103
+
104
+ def get_ans(s):
105
+ s = str(s)
106
+ match = re.search(r'^\s*\(([A-Z])\)', s)
107
+ if match:
108
+ return match.group(1)
109
+
110
+ options = list('ABCDEFGHIJKLMNOPQRSTUVWXYZ')
111
+ for op in options:
112
+ if s.startswith(op):
113
+ return op
114
+ return None
115
+
116
+ if judge_kwargs:
117
+ try:
118
+ # Use LLM as a judge to parse the prediction
119
+ judge = build_judge(**judge_kwargs)
120
+
121
+ # Prepare data for the judge
122
+ def extract_question(q):
123
+ return q.split('\n(')[0]
124
+
125
+ def extract_options(q):
126
+ parts = q.split('\n(')
127
+ return '\n('.join(parts[1:]) if len(parts) > 1 else ''
128
+
129
+ df['question_text'] = df['query'].apply(extract_question)
130
+ df['options_text'] = df['query'].apply(extract_options)
131
+
132
+ prompt_tmpl = (
133
+ 'You are an AI assistant who will help me to match '
134
+ 'an answer with several options of a single-choice question. ' # noqa: E501
135
+ 'You are provided with a question, several options, and an answer, ' # noqa: E501
136
+ 'and you need to find which option is most similar to the answer. ' # noqa: E501
137
+ 'If the meaning of all options are significantly different from the answer, output Z. ' # noqa: E501
138
+ 'Your should output a single uppercase character in A, B, C, D (if they are valid options), and Z. \n' # noqa: E501
139
+ 'Example 1: \n'
140
+ 'Question: What is the main object in image?\nOptions: A. teddy bear B. rabbit C. cat D. dog\n' # noqa: E501
141
+ 'Answer: a cute teddy bear\nYour output: A\n'
142
+ 'Example 2: \n'
143
+ 'Question: What is the main object in image?\nOptions: A. teddy bear B. rabbit C. cat D. dog\n' # noqa: E501
144
+ 'Answer: Spider\nYour output: Z\n'
145
+ 'Example 3: \n'
146
+ 'Question: {question}\nOptions: {options}\nAnswer: {prediction}\nYour output: ' # noqa: E501
147
+ )
148
+
149
+ prompts = [
150
+ prompt_tmpl.format(
151
+ question=row['question_text'],
152
+ options=row['options_text'],
153
+ prediction=row['prediction']
154
+ )
155
+ for _, row in tqdm(df.iterrows(), total=len(df), desc="Processing rows")
156
+ ]
157
+ parsed_pred = []
158
+
159
+ for prompt in tqdm(prompts, desc="Calling judge"):
160
+ input_msg = [
161
+ {
162
+ "role": "user",
163
+ "content": [
164
+ {"type": "text", "value": prompt}
165
+ ]
166
+ }
167
+ ]
168
+
169
+ _, judge_output, res = judge.generate_inner(input_msg)
170
+ judge_ans = get_ans(judge_output)
171
+ parsed_pred.append(judge_ans)
172
+ df['parsed_pred'] = pd.Series(parsed_pred)
173
+
174
+ except Exception as e:
175
+ print(f"Error during judge evaluation: {e}")
176
+ print(DEBUG_MESSAGE)
177
+ df['parsed_pred'] = df['prediction'].apply(get_ans)
178
+ else:
179
+ # Fallback to simple parsing if no judge is provided
180
+ df['parsed_pred'] = df['prediction'].apply(get_ans)
181
+
182
+ # Calculate score
183
+ df['score'] = (df['parsed_pred'] == df['response'])
184
+
185
+ # Save detailed results
186
+ details_file = get_intermediate_file_path(eval_file, '_details')
187
+ dump(df, details_file)
188
+
189
+ # Calculate and return accuracy
190
+ acc = df['score'].mean() * 100
191
+ results = {'acc': acc, 'details': details_file}
192
+
193
+ return results
VLMEvalKit-sudoku/vlmeval/dataset/miabench.py ADDED
@@ -0,0 +1,166 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import os
3
+
4
+ import pandas as pd
5
+
6
+ from .image_base import ImageBaseDataset
7
+ from ..smp import *
8
+ from .utils import build_judge, DEBUG_MESSAGE
9
+ from ..utils import track_progress_rich
10
+
11
+
12
+ def generate_prompt(d):
13
+ question = d['question']
14
+ weights = eval(d['component_weight'])
15
+ components = eval(d['components'])
16
+ num_of_component = int(d['num_of_component'])
17
+ response = d['prediction']
18
+
19
+ if num_of_component == 1:
20
+ components = f"The first component is: '{components[0]}'. "
21
+ score = f"The first component is worth: {weights[0]} scores. "
22
+ elif num_of_component == 2:
23
+ components = f"The first component is: '{components[0]}', and the second component is '{components[1]}'. "
24
+ score = f"The first and second component is each worth {weights[0]} and {weights[1]} scores. "
25
+ elif num_of_component == 3:
26
+ components = (
27
+ f"The first component is: '{components[0]}', and the second component is '{components[1]}', "
28
+ f"and the third component is '{components[2]}'. "
29
+ )
30
+ score = (
31
+ "The first, second, and third component is each worth "
32
+ f"{weights[0]}, {weights[1]}, and {weights[2]} scores."
33
+ )
34
+ elif num_of_component == 4:
35
+ components = (
36
+ f"The first component is: '{components[0]}', and the second component is '{components[1]}', "
37
+ f"and the third component is '{components[2]}', and the fourth component is '{components[3]}'. "
38
+ )
39
+ score = (
40
+ "The first, second, third, and fourth component is each worth "
41
+ f"{weights[0]}, {weights[1]}, {weights[2]}, and {weights[3]} scores."
42
+ )
43
+ elif num_of_component == 5:
44
+ components = (
45
+ f"The first component is: '{components[0]}', and the second component is '{components[1]}', "
46
+ f"and the third component is '{components[2]}', and the fourth component is '{components[3]}', "
47
+ f"and the fifth component is '{components[4]}'. "
48
+ )
49
+ score = (
50
+ "The first, second, third, fourth, and fifth component is each worth "
51
+ f"{weights[0]}, {weights[1]}, {weights[2]}, {weights[3]}, and {weights[4]} scores."
52
+ )
53
+
54
+ return (
55
+ "Here is an instruction for a multimodal LLM: '"
56
+ f"{question}"
57
+ "'. You need to grade if the response from the model follows each component of the instruction. "
58
+ f"{components}"
59
+ "The response is: '"
60
+ f"{response}"
61
+ "'. You need to score the response and be strict. The total score ranges from 0 to 10, "
62
+ "depending on if the response follows the instruction. "
63
+ f"{score}"
64
+ "List scores of each component, and the total score in one sentence in this format: "
65
+ "score of component 1: x/2, score of component 2: y/8, total score: z/10. Then explain your reasons."
66
+ )
67
+
68
+
69
+ def process_rawscore(component_type, raw_score):
70
+ first_sentence = raw_score.split('.')[0].split(',')
71
+ score_dict = {}
72
+ for i in range(len(first_sentence) - 1):
73
+ score_ = first_sentence[i].split(':')[1][1:].split('/')
74
+ score = int(score_[0]) / int(score_[1])
75
+ score_dict[component_type[i]] = score
76
+ total_score_ = first_sentence[i + 1].split(':')[1][1:].split('/')
77
+ total_score = int(total_score_[0]) / int(total_score_[1])
78
+ score_dict['total_score'] = total_score
79
+ return score_dict
80
+
81
+
82
+ def get_score_dict(data, score_raw):
83
+ cat_score_dict = {}
84
+ for i in range(len(data)):
85
+ try:
86
+ cmp = data['component_type'][i][2:-2]
87
+ cmp_list = cmp.split('\', \'')
88
+ score_dict = process_rawscore(cmp_list, score_raw[i])
89
+ for key, val in score_dict.items():
90
+ if key not in cat_score_dict.keys():
91
+ cat_score_dict[key] = [val]
92
+ else:
93
+ cat_score_dict[key].append(val)
94
+ except:
95
+ pass
96
+ cat_score_dict_average = {}
97
+ for key, val in cat_score_dict.items():
98
+ cat_score_dict_average[key] = sum(val) / len(val)
99
+ return cat_score_dict_average
100
+
101
+
102
+ class MIABench(ImageBaseDataset):
103
+ TYPE = 'VQA'
104
+
105
+ DATASET_URL = {
106
+ 'MIA-Bench': 'https://opencompass.openxlab.space/utils/VLMEval/Mia-Bench.tsv',
107
+ }
108
+ DATASET_MD5 = {
109
+ 'MIA-Bench': '0b9de595f4dd40af18a69b94d89aba82',
110
+ }
111
+
112
+ @classmethod
113
+ def evaluate(self, eval_file, **judge_kwargs):
114
+ judge_name = judge_kwargs.pop('model', 'gpt-4o')
115
+
116
+ model = build_judge(model=judge_name, **judge_kwargs)
117
+
118
+ storage = get_intermediate_file_path(eval_file, f'_{judge_name}') # noqa: F841
119
+ tmp_file = get_intermediate_file_path(eval_file, f'_{judge_name}', 'pkl') # noqa: F841
120
+ nproc = judge_kwargs.pop('nproc', 4) # noqa: F841
121
+
122
+ if not osp.exists(storage):
123
+ data = load(eval_file)
124
+ num_samples = len(data)
125
+ lines = [data.loc[i] for i in range(num_samples)]
126
+ prompts = [generate_prompt(line) for line in lines]
127
+ org_data = MIABench('MIA-Bench').data
128
+ img_map = {x: y for x, y in zip(org_data['index'], org_data['image'])}
129
+ image_b64 = [img_map[idx] for idx in data['index']]
130
+ indices = list(data['index'])
131
+ mm_messages = [
132
+ dict(message=[
133
+ dict(type='text', value=prompt),
134
+ dict(type='image', value=f'data:image/jpeg;base64,{b64}')
135
+ ])
136
+ for prompt, b64 in zip(prompts, image_b64)
137
+ ]
138
+
139
+ res = {}
140
+ if osp.exists(tmp_file):
141
+ res = load(tmp_file)
142
+
143
+ jobs = {k: v for k, v in zip(indices, mm_messages) if k not in res}
144
+ job_keys = list(jobs.keys())
145
+ job_vals = [jobs[k] for k in job_keys]
146
+
147
+ resps = track_progress_rich(
148
+ model.generate,
149
+ job_vals,
150
+ nproc=nproc,
151
+ chunksize=nproc,
152
+ keys=job_keys,
153
+ save=tmp_file,
154
+ )
155
+ for k, resp in zip(job_keys, resps):
156
+ res[k] = resp
157
+ data['score_raw'] = [res[idx] for idx in indices]
158
+ dump(data, storage)
159
+
160
+ goresult = load(storage)
161
+ results = get_score_dict(goresult, goresult['score_raw'])
162
+ result_pth = get_intermediate_file_path(storage, '_score', 'csv')
163
+ results_pd = pd.DataFrame.from_dict(list(results.items()))
164
+ dump(results_pd, result_pth)
165
+
166
+ return results
VLMEvalKit-sudoku/vlmeval/dataset/mmbench_video.py ADDED
@@ -0,0 +1,257 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from huggingface_hub import snapshot_download
2
+ from ..smp import *
3
+ from ..smp.file import get_intermediate_file_path, get_file_extension
4
+ from .video_base import VideoBaseDataset
5
+ from .utils import build_judge, DEBUG_MESSAGE
6
+ from ..utils import track_progress_rich
7
+
8
+
9
+ FAIL_MSG = 'Failed to obtain answer via API.'
10
+
11
+
12
+ def unwrap_hf_pkl(pth, suffix='.mp4'):
13
+ base_dir = os.path.join(pth, 'video_pkl/')
14
+ target_dir = os.path.join(pth, 'video/')
15
+ pickle_files = [os.path.join(base_dir, file) for file in os.listdir(base_dir)]
16
+ pickle_files.sort()
17
+
18
+ if not os.path.exists(target_dir):
19
+ os.makedirs(target_dir, exist_ok=True)
20
+ for pickle_file in pickle_files:
21
+ with open(pickle_file, 'rb') as file:
22
+ video_data = pickle.load(file)
23
+ # For each video file in the pickle file, write its contents to a new mp4 file
24
+ for video_name, video_content in video_data.items():
25
+ output_path = os.path.join(target_dir, f'{video_name}{suffix}')
26
+ with open(output_path, 'wb') as output_file:
27
+ output_file.write(video_content)
28
+ print('The video file has been restored and stored from the pickle file.')
29
+ else:
30
+ print('The video file already exists.')
31
+
32
+
33
+ class MMBenchVideo(VideoBaseDataset):
34
+
35
+ MD5 = '98f7df3eb1007fc375ea6fe88a98e2ff'
36
+ SYS = 'You are an AI assistant responsible for answering questions about videos.'
37
+ FRAMES_TMPL_PACK = """
38
+ You will be provided with {} separate frames uniformly sampled from a video, \
39
+ the frames are provided in chronological order of the video.
40
+ Please analyze these images and provide the answer / answers to the \
41
+ following question / questions about the video content.
42
+ If multiple questions are provided (with indices I1, I2, I3, ...), \
43
+ you should organize your answers in the following json format:
44
+ {{
45
+ 'I1': 'Answer to Question I1',
46
+ 'I2': 'Answer to Question I2',
47
+ ...
48
+ }}
49
+ Otherwise, please directly reply with your response to the only question.
50
+ Even if the information in these separate frames is not enough to give an answer,
51
+ PLEASE GIVE A RESPONSE TO EACH OF THE QUESTIONS IN THE FORMAT DESCRIBED ABOVE.
52
+ """
53
+
54
+ FRAMES_TMPL_NOPACK = """
55
+ You will be provided with {} separate frames uniformly sampled from a video, \
56
+ the frames are provided in chronological order of the video.
57
+ Please analyze these images and provide the answer to the question about the video content.
58
+ Please directly reply with your response to the only question.
59
+ """
60
+
61
+ TYPE = 'Video-VQA'
62
+
63
+ def __init__(self, dataset='MMBench-Video', pack=False, nframe=0, fps=-1):
64
+ super().__init__(dataset=dataset, pack=pack, nframe=nframe, fps=fps)
65
+
66
+ @classmethod
67
+ def supported_datasets(cls):
68
+ return ['MMBench-Video']
69
+
70
+ def prepare_dataset(self, dataset_name='MMBench-Video', repo_id='opencompass/MMBench-Video'):
71
+ def check_integrity(pth):
72
+ data_file = osp.join(pth, f'{dataset_name}.tsv')
73
+ if md5(data_file) != self.MD5:
74
+ return False
75
+ data = load(data_file)
76
+ for video_pth in data['video_path']:
77
+ if not osp.exists(osp.join(pth, video_pth)):
78
+ return False
79
+ return True
80
+
81
+ cache_path = get_cache_path(repo_id)
82
+ if cache_path is not None and check_integrity(cache_path):
83
+ dataset_path = cache_path
84
+ else:
85
+ if modelscope_flag_set():
86
+ from modelscope import dataset_snapshot_download
87
+ dataset_path = dataset_snapshot_download(dataset_id=repo_id)
88
+ else:
89
+ dataset_path = snapshot_download(repo_id=repo_id, repo_type='dataset')
90
+ unwrap_hf_pkl(dataset_path)
91
+ self.video_path = osp.join(dataset_path, 'video/')
92
+ data_file = osp.join(dataset_path, f'{dataset_name}.tsv')
93
+
94
+ return dict(data_file=data_file, root=osp.join(dataset_path, 'video'))
95
+
96
+ def build_prompt_pack(self, line):
97
+ if isinstance(line, int):
98
+ assert line < len(self)
99
+ video = self.videos[line]
100
+ elif isinstance(line, pd.Series):
101
+ video = line['video']
102
+ elif isinstance(line, str):
103
+ video = line
104
+
105
+ frames = self.save_video_frames(video)
106
+ sub = self.data[self.data['video'] == video]
107
+ sys_prompt = self.SYS + self.FRAMES_TMPL_PACK.format(len(frames))
108
+ message = [dict(type='text', value=sys_prompt)]
109
+ for im in frames:
110
+ message.append(dict(type='image', value=im))
111
+ nq = len(sub)
112
+ prompt = 'Questions: \n{}\nAnswers: \n'
113
+ qs = {int(sub.iloc[i]['index']): sub.iloc[i]['question'] for i in range(nq)}
114
+ prompt = prompt.format(json.dumps(qs))
115
+ message.append(dict(type='text', value=prompt))
116
+ return message
117
+
118
+ def build_prompt_nopack(self, line, video_llm):
119
+ if isinstance(line, int):
120
+ assert line < len(self)
121
+ line = self.data.iloc[line]
122
+ if video_llm:
123
+ question = line['question']
124
+ prefix, video_idx_path = os.path.split(line['video_path'])
125
+ message = [dict(type='text', value=question)]
126
+ message.append(dict(type='video', value=os.path.join(self.video_path, video_idx_path)))
127
+ return message
128
+ else:
129
+ frames = self.save_video_frames(line['video'])
130
+ sys_prompt = self.FRAMES_TMPL_NOPACK.format(len(frames))
131
+ message = [dict(type='text', value=sys_prompt)]
132
+ for im in frames:
133
+ message.append(dict(type='image', value=im))
134
+ prompt = 'Question: {}\nAnswer: '.format(line['question'])
135
+ message.append(dict(type='text', value=prompt))
136
+ return message
137
+
138
+ def build_prompt(self, line, video_llm):
139
+ if self.pack and not video_llm:
140
+ return self.build_prompt_pack(line)
141
+ else:
142
+ return self.build_prompt_nopack(line, video_llm)
143
+
144
+ @staticmethod
145
+ def remove_side_quote(s, syms=[',', '"', "'"]):
146
+ if np.all([x in syms for x in s]):
147
+ return ''
148
+ while s[0] in syms:
149
+ s = s[1:]
150
+ while s[-1] in syms:
151
+ s = s[:-1]
152
+ return s
153
+
154
+ @staticmethod
155
+ def robust_json_load(s):
156
+ try:
157
+ jsons = list(extract_json_objects(s))
158
+ assert len(jsons) == 1
159
+ return jsons[0]
160
+ except:
161
+ if '{' in s and s.find('{') == s.rfind('{'):
162
+ sub_str = s[s.find('{') + 1:].strip()
163
+ lines = sub_str.split('\n')
164
+ res = {}
165
+ for l in lines:
166
+ l = l.strip()
167
+ if ': ' in l:
168
+ key = l.split(': ')[0].strip()
169
+ val = l.split(': ')[1].strip()
170
+ key = MMBenchVideo.remove_side_quote(key)
171
+ val = MMBenchVideo.remove_side_quote(val)
172
+ if len(key) and len(val):
173
+ res[key] = val
174
+ return res
175
+ return None
176
+
177
+ def load_pack_answers(self, data_raw):
178
+ vstats = defaultdict(lambda: 0)
179
+ data = defaultdict(lambda: {})
180
+
181
+ for k in data_raw:
182
+ ans = data_raw[k].strip()
183
+ if FAIL_MSG in ans:
184
+ vstats['GEN_FAIL'] += 1
185
+ continue
186
+ res = self.robust_json_load(ans)
187
+ if res is not None:
188
+ data[k] = res
189
+ vstats['PARSE_OK'] += 1
190
+ else:
191
+ vstats['PARSE_FAIL'] += 1
192
+
193
+ # return data
194
+ meta = cp.deepcopy(self.data)
195
+ lt = len(meta)
196
+ prediction = []
197
+ for i in range(lt):
198
+ line = meta.iloc[i]
199
+ vid = line['video']
200
+ idx = str(line['index'])
201
+ prediction.append(data[vid][idx] if idx in data[vid] else None)
202
+ meta['prediction'] = prediction
203
+ vstats['VALIDQ'] = len([x for x in prediction if x is not None])
204
+ vstats['INVALIDQ'] = len([x for x in prediction if x is None])
205
+ return meta, vstats
206
+
207
+ # It returns a dictionary
208
+ @classmethod
209
+ def evaluate(self, eval_file, **judge_kwargs):
210
+ from .utils.mmbench_video import get_dimension_rating, system_prompt, build_prompt
211
+
212
+ assert get_file_extension(eval_file) in ['xlsx', 'json', 'tsv'], 'data file should be an supported format (xlsx/json/tsv) file' # noqa: E501
213
+ judge = judge_kwargs['model']
214
+ nproc = judge_kwargs.pop('nproc', 4)
215
+
216
+ tmp_file = get_intermediate_file_path(eval_file, f'_{judge}_tmp', 'pkl')
217
+ tgt_file = get_intermediate_file_path(eval_file, f'_{judge}_rating', 'json')
218
+ score_file = get_intermediate_file_path(eval_file, f'_{judge}_score')
219
+
220
+ model = build_judge(system_prompt=system_prompt, **judge_kwargs)
221
+ assert model.working(), 'MMBench-Video evaluation requires a working OPENAI API\n' + DEBUG_MESSAGE
222
+
223
+ if not osp.exists(score_file):
224
+ res = {} if not osp.exists(tmp_file) else load(tmp_file)
225
+ res = {k: v for k, v in res.items() if model.fail_msg not in v}
226
+
227
+ data = load(eval_file)
228
+ data_un = data[~data['index'].isin(res)]
229
+ data_un = data_un[~pd.isna(data_un['prediction'])]
230
+ lt = len(data_un)
231
+ prompts = [build_prompt(data_un.iloc[i]) for i in range(lt)]
232
+ indices = [data_un.iloc[i]['index'] for i in range(lt)]
233
+
234
+ if len(prompts):
235
+ _ = track_progress_rich(
236
+ model.generate,
237
+ prompts,
238
+ keys=indices,
239
+ save=tmp_file,
240
+ nproc=nproc,
241
+ chunksize=nproc
242
+ )
243
+ score_map = load(tmp_file)
244
+ data['score'] = [score_map[idx] if idx in score_map else -1 for idx in data['index']]
245
+ rejected = [x for x in score_map.values() if FAIL_MSG in x]
246
+ data['score'] = [int(x) if istype(x, int) else -1 for x in data['score']]
247
+ print(
248
+ f'Among {len(data)} questions, failed to obtain prediction for {len(data) - len(score_map)} questions, '
249
+ f'failed to obtain the score for another {len(rejected)} questions. '
250
+ f'Those questions will be counted as 0 score in ALL rating, and will not be counted in VALID rating.'
251
+ )
252
+
253
+ dump(data, score_file)
254
+
255
+ rating = get_dimension_rating(score_file)
256
+ dump(rating, tgt_file)
257
+ return rating
VLMEvalKit-sudoku/vlmeval/dataset/mmifeval.py ADDED
@@ -0,0 +1,483 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # flake8: noqa
2
+ import re
3
+
4
+ from .image_base import ImageBaseDataset
5
+ from .utils import build_judge, DEBUG_MESSAGE
6
+ from ..smp import *
7
+ from ..smp.file import get_intermediate_file_path
8
+ from ..utils import track_progress_rich
9
+ from ..dataset.utils.mmif.function_and_compare import *
10
+
11
+ logger = get_logger("MMIFEval")
12
+
13
+ aux_data_dict = {}
14
+ judge_model = None
15
+
16
+ # img_dict = {}
17
+ # <<< prompt >>>
18
+
19
+
20
+ def generate_eval_pt_c_level(constraints, prediction):
21
+ constraints_str = "\n".join(
22
+ [f"Constraint_{i + 1}: {constraint['value']}" for i, constraint in enumerate(constraints)]
23
+ )
24
+ pt = f"""\
25
+ Your task is to evaluate whether the response from an AI assistant adheres to all of the given constraints. \
26
+ Please follow the requirements below to make the judgment:
27
+ 1. Be strict and consistent in your assessment.
28
+ 2. You should refer to the content of image to make the judgment.
29
+ 3. For each constraint, if the response fails to fully meet the constraint, \
30
+ give it a score of 0. Otherwise, give it a score of 1.
31
+
32
+ <start of response>
33
+ {prediction}
34
+ <end of response>
35
+
36
+ <start of constraint list>
37
+ {constraints_str}
38
+ <end of constraint list>
39
+
40
+ You must evaluate and provide an explanation for each constraint listed, ensuring no constraint is omitted. \
41
+ At the end, summarize the scores for all constraints in one sentence.
42
+
43
+ Your output should strictly follow the format below:
44
+ Judgement: ...
45
+ Summary: Score of constraint_1: x/1, Score of constraint_2: x/1, Score of constraint_3: x/1, ..., Score of \
46
+ constraint_n: x/1.
47
+ """
48
+ return pt
49
+
50
+
51
+ def generate_eval_pt_p_level(question, prediction, ground_truth):
52
+ pt = f"""\
53
+ You are an expert evaluator. Your task is to extract the answer from the model output and \
54
+ compare it with the ground truth list \
55
+ to determine whether the model answer covers all the points in the ground truth list. \
56
+ The ground truth list is provided as a JSON array of strings, and the model answer is a text string. \
57
+ An answer is considered correct if every element from the ground truth list appears in the model \
58
+ answer (substring matching is acceptable). \
59
+ The order does not matter. \
60
+
61
+ Your response should only be 'right' if the model answer fully covers the ground truth, or 'wrong' if it does not. \
62
+ Do not provide any additional commentary.
63
+
64
+ Question: {question}
65
+ Response from the model: {prediction}
66
+ Ground Truth List: {ground_truth}
67
+ """
68
+ return pt
69
+
70
+
71
+ def generate_cmp_pt(constraint, pred_with_constraint, pred_without_constraint):
72
+ pt = f"""\
73
+ You are an expert in judging whether the respone follow the given constraint. \
74
+ Your task is to assess whether the model's response satisfies \
75
+ the given constraint and return True or False. I will provide you \
76
+ with the constraint and the model's response under this constraint. \
77
+ To assist with your evaluation, I will also provide you with the model's response \
78
+ to the same question without the constraint.
79
+
80
+ <start of constraint>
81
+ {constraint}
82
+ <end of constraint>
83
+
84
+ <start of response under the constraint>
85
+ {pred_with_constraint}
86
+ <end of response under the constraint>
87
+
88
+ <start of response without the constraint>
89
+ {pred_without_constraint}
90
+ <end of response without the constraint>
91
+
92
+ **Please follow the steps below to evaluate**:
93
+ Step 1. Compare the model's response under the constraint with its response without the constraint. \
94
+ If you believe these two answers \
95
+ are very similar, it means the model has not fully considered the impact of the constraint on the answer. \
96
+ Please return False.
97
+ Step 2. Compare the model's response under the constraint with the content of the constraint. If you believe the model's response \
98
+ does not meet the requirements specified in the constraint, return False. Otherwise, \
99
+ if the response effectively satisfies the constraint, return True.
100
+
101
+ Start by briefly explaining your reasoning based on the above steps. At the end, provide a one-sentence \
102
+ summary of your evaluation.
103
+
104
+ Your output must strictly follow this format:
105
+ Reasoning: ...
106
+ Summary: "True" / "False".
107
+ """
108
+ return pt
109
+
110
+
111
+ # <<< re >>>
112
+ # extract score from gpt_resp
113
+ # format: Score of instruction: x/1, Score of constraint_1: y/1, Score of constraint_2: z/1, ..., Score of constraint_n: w/1.
114
+ # return: score_dict {'instruction': x/1, 'constraint_1': y/1,
115
+ # 'constraint_2': z/1, ..., 'constraint_n': w/1}
116
+
117
+
118
+ def extract_score_from_direct_gpt_resp(raw_score):
119
+ # Define regular expression patterns (updated to handle underscores in
120
+ # constraint names)
121
+ score_pattern = re.compile(r"Score\s+of\s+([a-zA-Z0-9_\-]+):\s*(\d+)\s*/\s*(\d+)", re.IGNORECASE)
122
+
123
+ # Clean the raw score to remove unnecessary symbols (e.g., newlines,
124
+ # multiple spaces)
125
+ # Normalize whitespace
126
+ cleaned_score = re.sub(r"\s+", " ", raw_score).strip()
127
+ # delete all the '*'
128
+ cleaned_score = re.sub(r"\*", "", cleaned_score)
129
+
130
+ # Find all individual component scores
131
+ score_matches = score_pattern.findall(cleaned_score)
132
+
133
+ # If no valid score matches found, print and raise an exception
134
+ if not score_matches:
135
+ raise ValueError("raw_score format is incorrect, cannot parse scores")
136
+
137
+ score_dict = {}
138
+
139
+ # Parse each component score
140
+ for match in score_matches:
141
+ component_name = match[0].strip().lower() # Component name, converted to lowercase
142
+ component_name = component_name.replace(" ", "_")
143
+ numerator = int(match[1]) # Numerator
144
+ denominator = int(match[2]) # Denominator
145
+ score = numerator / denominator # Calculate the score
146
+ score_dict[component_name] = score # Store it in the dictionary
147
+
148
+ return score_dict
149
+
150
+
151
+ # extract score from gpt_resp
152
+ # format: right or wrong
153
+ # return: score
154
+
155
+
156
+ def extract_score_from_p_level_gpt_resp(raw_score):
157
+ if raw_score == "right":
158
+ return 1
159
+ elif raw_score == "wrong":
160
+ return 0
161
+ else:
162
+ # try to find "right" or "wrong" in the raw_score
163
+ if re.search(r"right", raw_score, re.IGNORECASE):
164
+ return 1
165
+ elif re.search(r"wrong", raw_score, re.IGNORECASE):
166
+ return 0
167
+ else:
168
+ raise ValueError("raw_score format is incorrect, cannot parse scores")
169
+
170
+
171
+ # extract score from gpt_resp
172
+ # format: True or False
173
+ # return: score
174
+
175
+
176
+ def extract_score_from_cmp_gpt_resp(response_text):
177
+ # Step 1: Find the last occurrence of 'summary:'
178
+ summary_idx = response_text.lower().rfind("summary")
179
+ if summary_idx == -1:
180
+ raise ValueError("No 'summary' found in response.")
181
+
182
+ # Step 2: Slice the string after 'summary:' and extract value
183
+ after_summary = response_text[summary_idx + len("summary") :]
184
+
185
+ # Match true/false ignoring markdown and formatting
186
+ match = re.search(r"\b(true|false)\b", after_summary, re.IGNORECASE)
187
+ if match:
188
+ value = match.group(1).lower()
189
+ return 1 if value == "true" else 0
190
+
191
+ raise ValueError("No valid 'True' or 'False' found after 'summary'.")
192
+
193
+
194
+ # <<< gpt >>>
195
+
196
+
197
+ def run_once_with_image(pt, image, retry=4):
198
+ global judge_model
199
+ prefix = "data:image/jpeg;base64,"
200
+ img = prefix + image
201
+ messages = [dict(type="text", value=pt), dict(type="image", value=img)]
202
+ while retry:
203
+ try:
204
+ ans = judge_model.generate(messages)
205
+ return ans
206
+ except Exception as e:
207
+ logger.info(f"Error in run_once_with_image: {e}")
208
+ retry -= 1
209
+ return ans
210
+
211
+
212
+ def run_once_without_image(pt, retry=3):
213
+ global judge_model
214
+ messages = [
215
+ dict(type="text", value=pt),
216
+ ]
217
+ while retry:
218
+ try:
219
+ ans = judge_model.generate(messages)
220
+ return ans
221
+ except Exception as e:
222
+ logger.info(f"Error in run_once_without_image: {e}")
223
+ retry -= 1
224
+ return ans
225
+
226
+
227
+ # <<< score >>>
228
+
229
+
230
+ def judge_one_item(item, retry=3):
231
+ global aux_data_dict
232
+ item = json.loads(item)
233
+ num_retry = 0
234
+ while num_retry < retry:
235
+ if item.get("tag", None) == "P-Level":
236
+ # in tsv file, answer is a string, need to be converted to list
237
+ pt = generate_eval_pt_p_level(item["question"], item["prediction"], json.loads(item["answer"]))
238
+ gpt_resp = run_once_without_image(pt)
239
+ try:
240
+ score = extract_score_from_p_level_gpt_resp(gpt_resp)
241
+ return (
242
+ 0,
243
+ "success",
244
+ {
245
+ "total_score": score,
246
+ "gpt_resp": gpt_resp,
247
+ },
248
+ )
249
+ except Exception as e:
250
+ logger.error(f"\nError:\n{e}\nItem:\n{item}\ngpt_resp:\n{gpt_resp}\n")
251
+ num_retry += 1
252
+ continue
253
+ else: # process C-Level data
254
+ # split into direct_gpt and other
255
+ # direct_gpt can be processed in batch
256
+ # other needs to be processed one by one
257
+ constraint_direct_gpt = []
258
+ constraint_other = []
259
+ for constraint in json.loads(item["constraints"]):
260
+ method = constraint["judge"]["method"]
261
+ if method == "direct_gpt":
262
+ constraint_direct_gpt.append(constraint)
263
+ else:
264
+ constraint_other.append(constraint)
265
+ score_dict = {}
266
+ # 1. process direct_gpt: if there is no direct_gpt, instruction is also
267
+ # needed
268
+ if len(constraint_direct_gpt) > 0:
269
+ pt_direct_gpt = generate_eval_pt_c_level(constraint_direct_gpt, item["prediction"])
270
+ gpt_resp = run_once_with_image(pt_direct_gpt, item["image"])
271
+ try:
272
+ direct_gpt_score_dict = extract_score_from_direct_gpt_resp(gpt_resp)
273
+ score_dict["gpt_resp_direct_gpt"] = gpt_resp
274
+ for i, constraint in enumerate(constraint_direct_gpt):
275
+ score_dict[constraint["key"]] = direct_gpt_score_dict[f"constraint_{i + 1}"]
276
+ except Exception as e:
277
+ logger.error(
278
+ f"\nError:\n{e}\nItem:\n{item}\npt_direct_gpt:\n{pt_direct_gpt}\ngpt_resp:\n{gpt_resp}"
279
+ )
280
+ num_retry += 1
281
+ continue
282
+ # 2. process rule_based
283
+ for constraint in constraint_other:
284
+ if constraint["judge"]["method"] == "rule_based":
285
+ # call function according to constraint["judge"]["verify_funcs"]
286
+ # maybe a list of function names (str)
287
+ # func in function_and_compare.py
288
+ # example: {"method": "rule_based", "verify_funcs": [{"func":
289
+ # "check_whether_response_paragraph_number_in_range", "params":
290
+ # [3, 3]}]}}
291
+ score = 1.0
292
+ # breakpoint()
293
+ for func_dict in constraint["judge"]["verify_funcs"]:
294
+ func = globals()[func_dict["func"]]
295
+ # use * to unpack the list, ** is used for dict
296
+ judge_result = func(item["prediction"], *func_dict["params"])
297
+ # breakpoint()
298
+ if not judge_result: # False -> score = 0
299
+ score = 0.0
300
+ break
301
+ # breakpoint()
302
+ score_dict[constraint["key"]] = score
303
+ # 3. process cmp_gpt
304
+ for constraint in constraint_other:
305
+ if constraint["judge"]["method"] == "cmp_gpt":
306
+ del_cons_prediction = aux_data_dict[item["id"]][constraint["key"]]
307
+ pt = generate_cmp_pt(constraint["value"], item["prediction"], del_cons_prediction)
308
+ gpt_resp = run_once_without_image(pt)
309
+ try:
310
+ score = extract_score_from_cmp_gpt_resp(gpt_resp)
311
+ score_dict[constraint["key"]] = score
312
+ score_dict[f"gpt_resp_cmp_gpt_{constraint['key']}"] = gpt_resp
313
+ except Exception as e:
314
+ logger.error(f"\nError:\n{e}\nItem:\n{item}\ngpt_resp:\n{gpt_resp}")
315
+ num_retry += 1
316
+ continue
317
+ # add total_score
318
+ total_score = 0.0
319
+ cnt = 0
320
+ for key, value in score_dict.items():
321
+ if key.startswith("gpt_resp_"):
322
+ continue
323
+ total_score += value
324
+ cnt += 1
325
+ score_dict["total_score"] = total_score / cnt
326
+ logger.info(f"score_dict:\n{score_dict}")
327
+ return 0, "success", score_dict
328
+ return 1, "C-Level, fail in judge", {}
329
+
330
+
331
+ class MMIFEval(ImageBaseDataset):
332
+ TYPE = "VQA"
333
+
334
+ # TODO: add dataset url and md5
335
+ DATASET_URL = {"MM-IFEval": 'https://opencompass.openxlab.space/utils/VLMEval/MM-IFEval.tsv'}
336
+ DATASET_MD5 = {
337
+ "MM-IFEval": '973bb839961a449565073a5ee70ae7a6'
338
+ }
339
+
340
+ # Given one data record, return the built prompt (a multi-modal message), can override
341
+ # Actually, all lines have single image
342
+ def build_prompt(self, line):
343
+ if isinstance(line, int):
344
+ line = self.data.iloc[line]
345
+
346
+ if self.meta_only:
347
+ tgt_path = toliststr(line["image_path"])
348
+ else:
349
+ tgt_path = self.dump_image(line)
350
+
351
+ question = line["question"]
352
+
353
+ # save images for evaluation
354
+ # global img_dict
355
+ # img_dict[line["index"]] = line["image"]
356
+
357
+ msgs = []
358
+ if isinstance(tgt_path, list):
359
+ msgs.extend([dict(type="image", value=p) for p in tgt_path])
360
+ else:
361
+ msgs = [dict(type="image", value=tgt_path)]
362
+
363
+ # WildVision adopts text first
364
+ msgs = [dict(type="text", value=question)] + msgs
365
+
366
+ return msgs
367
+
368
+ # score for the infer file
369
+ # @classmethod
370
+
371
+ def evaluate(self, eval_file, **judge_kwargs):
372
+ raw_bench_data = MMIFEval("MM-IFEval").data
373
+ global aux_data_dict
374
+ model = judge_kwargs["model"]
375
+ storage = get_intermediate_file_path(eval_file, f"_{model}", "jsonl")
376
+ score_file = get_intermediate_file_path(eval_file, f"_{model}_score", "csv")
377
+ tmp_file = get_intermediate_file_path(eval_file, f"_{model}_tmp", "pkl")
378
+ nproc = judge_kwargs.pop("nproc", 4)
379
+
380
+ data_all = load(eval_file).to_dict(orient="records")
381
+
382
+ main_data = []
383
+ aux_data = []
384
+ for i, line in enumerate(data_all):
385
+ if line.get("infer_type", None) == "main":
386
+ main_data.append(line)
387
+ else:
388
+ aux_data.append(line)
389
+
390
+ line["image"] = raw_bench_data.iloc[i]["image"]
391
+
392
+ aux_data_dict = {}
393
+ for line in aux_data:
394
+ assert line["infer_type"] == "aux_cmp_gpt"
395
+ del_cons = line["del_cons"]
396
+ if line["id"] not in aux_data_dict:
397
+ aux_data_dict[line["id"]] = {}
398
+ aux_data_dict[line["id"]][del_cons] = line["prediction"]
399
+
400
+ # params
401
+ params_all = [json.dumps(item) for item in main_data]
402
+ indices_all = [line["id"] for line in main_data]
403
+
404
+ ans = {}
405
+ if os.path.exists(tmp_file):
406
+ ans_tuples = load(tmp_file)
407
+ for k, v in ans_tuples.items():
408
+ if v[0] == 0:
409
+ ans[k] = {"eval_ret_code": v[0], "eval_msg": v[1], "eval_score_dict": v[2]}
410
+ # ans is a dict
411
+ logger.info(f"Tmp file exists, loaded {len(ans)} data from {tmp_file}")
412
+
413
+ tups = [x for x, i in zip(params_all, indices_all) if i not in ans]
414
+ indices = [i for i in indices_all if i not in ans]
415
+
416
+ # judge
417
+ if not osp.exists(storage):
418
+ # judge_kwargs['system_prompt'] = SYSTEM_PROMPT
419
+ judge_kwargs["temperature"] = 0
420
+ judge_kwargs["img_detail"] = "high"
421
+ judge_kwargs["timeout"] = 300
422
+ global judge_model
423
+ judge_model = build_judge(max_tokens=4096, **judge_kwargs)
424
+
425
+ assert judge_model.working(), "MMIFEval evaluation requires a working OPENAI API\n" + DEBUG_MESSAGE
426
+
427
+ if len(indices):
428
+ new_results = track_progress_rich(
429
+ judge_one_item,
430
+ tups,
431
+ nproc=nproc,
432
+ chunksize=nproc,
433
+ keys=indices,
434
+ save=tmp_file,
435
+ )
436
+ for k, v in zip(indices, new_results):
437
+ ans[k] = {"eval_ret_code": v[0], "eval_msg": v[1], "eval_score_dict": v[2]}
438
+ else:
439
+ for k, v in ans.items():
440
+ if isinstance(v, tuple):
441
+ ans[k] = {"eval_ret_code": v[0], "eval_msg": v[1], "eval_score_dict": v[2]}
442
+ for item in main_data:
443
+ item.pop("image")
444
+
445
+ for item in main_data:
446
+ item["eval_ret_code"] = ans[item["id"]]["eval_ret_code"]
447
+ item["eval_msg"] = ans[item["id"]]["eval_msg"]
448
+ item["eval_score_dict"] = ans[item["id"]]["eval_score_dict"]
449
+ # storage is a jsonl file
450
+ with open(storage, "w") as f:
451
+ for item in main_data:
452
+ f.write(json.dumps(item) + "\n")
453
+
454
+ eval_data = load(storage)
455
+ # eval_data = [json.loads(line) for line in eval_data]
456
+ # calculate P-Level scores
457
+ p_level_score_sum = 0
458
+ c_level_score_sum = 0
459
+ p_level_cnt = 0
460
+ c_level_cnt = 0
461
+ for line in eval_data:
462
+ if line["tag"] == "P-Level":
463
+ p_level_score_sum += line["eval_score_dict"]["total_score"]
464
+ p_level_cnt += 1
465
+ elif line["tag"] == "C-Level":
466
+ c_level_score_sum += line["eval_score_dict"]["total_score"]
467
+ c_level_cnt += 1
468
+ p_level_accuracy = p_level_score_sum / p_level_cnt
469
+ c_level_accuracy = c_level_score_sum / c_level_cnt
470
+ # save to score_file
471
+ score_dict = {
472
+ "p_level_accuracy": [p_level_accuracy],
473
+ "c_level_accuracy": [c_level_accuracy],
474
+ "p_level_cnt": [p_level_cnt],
475
+ "c_level_cnt": [c_level_cnt],
476
+ "overall_accuracy": [
477
+ (p_level_accuracy * p_level_cnt + c_level_accuracy * c_level_cnt) / (p_level_cnt + c_level_cnt)
478
+ ],
479
+ }
480
+ score_df = pd.DataFrame(score_dict)
481
+ dump(score_df, score_file)
482
+
483
+ return score_df
VLMEvalKit-sudoku/vlmeval/dataset/qbench_video.py ADDED
@@ -0,0 +1,354 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # flake8: noqa
2
+ import huggingface_hub
3
+ from huggingface_hub import snapshot_download
4
+ from ..smp import *
5
+ from ..smp.file import get_intermediate_file_path, get_file_extension
6
+ from .video_concat_dataset import ConcatVideoDataset
7
+ from .video_base import VideoBaseDataset
8
+ from .utils import build_judge, DEBUG_MESSAGE
9
+ from ..utils import track_progress_rich
10
+ import torchvision.transforms as T
11
+ from torchvision import transforms
12
+ from torchvision.transforms.functional import InterpolationMode
13
+ import pandas as pd
14
+ import imageio
15
+ import cv2
16
+ import zipfile
17
+ import os
18
+ import glob
19
+ from .utils.qbench_video import *
20
+
21
+ FAIL_MSG = 'Failed to obtain answer via API.'
22
+
23
+
24
+ class QBench_Video(ConcatVideoDataset):
25
+ def __init__(self, dataset='QBench_Video', nframe=0, fps=-1):
26
+ self.DATASET_SETS[dataset] = ['QBench_Video_MCQ','QBench_Video_VQA']
27
+ super().__init__(dataset=dataset, nframe=nframe, fps=fps)
28
+
29
+ @classmethod
30
+ def supported_datasets(cls):
31
+ return ['QBench_Video']
32
+
33
+ def evaluate(self, eval_file, **judge_kwargs):
34
+ result = super().evaluate(eval_file=eval_file, **judge_kwargs)
35
+ score_file = get_intermediate_file_path(eval_file, '_acc')
36
+ result.at['open_ended', 'acc'] /= 2
37
+ dump(result, score_file)
38
+ return result
39
+
40
+
41
+ class QBench_Video_MCQ(VideoBaseDataset):
42
+
43
+ MD5 = '9d6760d75fa80aa9fd5e5cf1ea274ace'
44
+
45
+ FRAMES_TMPL_SYS = """
46
+ You will receive {} distinct frames that have been uniformly sampled from a video sequence, arranged in the same temporal order as they appear in the video.
47
+ Please analyze these frames and answer the question based on your observations.
48
+ """
49
+
50
+ FRAMES_TMPL_SYS_4VIDEO_LLM = """
51
+ You will receive several distinct frames that have been uniformly sampled from a video sequence, arranged in the same temporal order as they appear in the video.
52
+ Please analyze these frames and answer the question based on your observations.
53
+ """
54
+
55
+ POST_PROMPT = """
56
+ Please answer the question in the following format: the uppercase letter of the correct answer option itself.
57
+ Please do not add any other answers beyond this.
58
+ """
59
+
60
+ TYPE = 'Video-MCQ'
61
+
62
+ def __init__(self, dataset='qbenchvideo_single_MCQ', nframe=0, fps=-1):
63
+ dataset_tsv_name = 'qbenchvideo_single_MCQ'
64
+ super().__init__(dataset=dataset_tsv_name, nframe=nframe, fps=fps)
65
+
66
+ @classmethod
67
+ def supported_datasets(cls):
68
+ return ['QBench_Video_MCQ']
69
+
70
+ def prepare_dataset(self, dataset_name='qbenchvideo_single_MCQ', repo_id='zhangzicheng/Q-Bench-Video'):
71
+ def check_integrity(pth):
72
+ data_file = osp.join(pth, f'{dataset_name}.tsv')
73
+
74
+ if not os.path.exists(data_file):
75
+ return False
76
+
77
+ if md5(data_file) != self.MD5:
78
+ return False
79
+
80
+ data = load(data_file)
81
+ for idx, item in data.iterrows():
82
+ if not osp.exists(os.path.normpath(osp.join(pth, item['video_path']))):
83
+ return False
84
+ return True
85
+
86
+ cache_path = get_cache_path(repo_id)
87
+ if cache_path is not None and check_integrity(cache_path):
88
+ dataset_path = cache_path
89
+ else:
90
+ def unzip_videos(pth):
91
+ if not osp.exists(osp.join(pth, 'video')):
92
+ zip_file = osp.join(pth, 'video.zip')
93
+ with zipfile.ZipFile(zip_file, 'r') as zip_ref:
94
+ zip_ref.extractall(pth)
95
+ dataset_path = snapshot_download(repo_id=repo_id, repo_type='dataset')
96
+ unzip_videos(dataset_path)
97
+
98
+ data_file = osp.join(dataset_path, f'{dataset_name}.tsv')
99
+ return dict(root=dataset_path, data_file=data_file)
100
+
101
+ def save_video_frames(self, line):
102
+ video = line['video']
103
+ vid_path = os.path.normpath(os.path.join(self.data_root, line['video_path']))
104
+ import decord
105
+ vid = decord.VideoReader(vid_path)
106
+ video_info = {
107
+ 'fps': vid.get_avg_fps(),
108
+ 'n_frames': len(vid),
109
+ }
110
+ if self.nframe > 0 and self.fps < 0:
111
+ step_size = len(vid) / (self.nframe + 1)
112
+ indices = [int(i * step_size) for i in range(1, self.nframe + 1)]
113
+ frame_paths = self.frame_paths(video)
114
+ elif self.fps > 0:
115
+ # not constrained by num_frames, get frames by fps
116
+ total_duration = video_info['n_frames'] / video_info['fps']
117
+ required_frames = int(total_duration * self.fps)
118
+ step_size = video_info['fps'] / self.fps
119
+ indices = [int(i * step_size) for i in range(required_frames)]
120
+ frame_paths = self.frame_paths_fps(video, len(indices))
121
+
122
+ flag = np.all([osp.exists(p) for p in frame_paths])
123
+
124
+ if not flag:
125
+ lock_path = osp.splitext(vid_path)[0] + '.lock'
126
+ with portalocker.Lock(lock_path, 'w', timeout=30):
127
+ if not np.all([osp.exists(p) for p in frame_paths]):
128
+ images = [vid[i].asnumpy() for i in indices]
129
+ images = [Image.fromarray(arr) for arr in images]
130
+ for im, pth in zip(images, frame_paths):
131
+ if not osp.exists(pth):
132
+ im.save(pth)
133
+
134
+ return frame_paths
135
+
136
+ def save_video_into_images(self, line):
137
+ frame_paths = self.save_video_frames(line)
138
+ return frame_paths
139
+
140
+ def build_prompt(self, line, video_llm):
141
+ if isinstance(line, int):
142
+ assert line < len(self)
143
+ line = self.data.iloc[line]
144
+
145
+ # message = [dict(type='text', value=line['question'])]
146
+ video_path = os.path.normpath(os.path.join(self.data_root, line['video_path']))
147
+ if video_llm:
148
+ message = [dict(type='text', value=self.FRAMES_TMPL_SYS_4VIDEO_LLM)]
149
+ message.append(dict(type='text', value=line['question']))
150
+ message.append(dict(type='video', value=video_path))
151
+ else:
152
+ img_frame_paths = self.save_video_into_images(line)
153
+ message = [dict(type='text', value=self.FRAMES_TMPL_SYS.format(len(img_frame_paths)))]
154
+ message.append(dict(type='text', value=line['question']))
155
+ for im in img_frame_paths:
156
+ message.append(dict(type='image', value=im))
157
+ message.append(dict(type='text', value=self.POST_PROMPT))
158
+ return message
159
+
160
+ @classmethod
161
+ def evaluate(self, eval_file, **judge_kwargs):
162
+ assert get_file_extension(eval_file) in ['xlsx', 'json', 'tsv'], 'data file should be an supported format (xlsx/json/tsv) file'
163
+
164
+ tmp_file = get_intermediate_file_path(eval_file, '_tmp', 'pkl')
165
+ score_file = get_intermediate_file_path(eval_file, '_score')
166
+
167
+ if not osp.exists(score_file):
168
+ model = judge_kwargs.setdefault('model', 'exact_matching')
169
+ assert model in ['exact_matching']
170
+
171
+ res = {} if not osp.exists(tmp_file) else load(tmp_file)
172
+ res = {k: v for k, v in res.items() if FAIL_MSG not in v}
173
+
174
+ data = load(eval_file)
175
+ data_un = data[~pd.isna(data['prediction'])]
176
+
177
+ for idx in data['index']:
178
+ ans = data.loc[data['index'] == idx, 'answer'].values[0]
179
+ pred = data.loc[data['index'] == idx, 'prediction'].values[0]
180
+ correct_choice = ans.split('.')[0].strip()
181
+ correct_answer = ans.split('.')[1].strip()
182
+
183
+ if FAIL_MSG in pred:
184
+ data.loc[idx, 'score'] = -1
185
+ else:
186
+ data.loc[idx, 'score'] = int(check_ans_mcq(
187
+ pred, ans, correct_choice, correct_answer
188
+ ))
189
+
190
+ rejected = [x for x in data['score'] if x == -1]
191
+
192
+ print(
193
+ f'Among {len(data)} questions, failed to obtain prediction for {len(data) - len(data_un)} questions, '
194
+ f'failed to obtain the score for another {len(rejected)} questions. '
195
+ f'Those questions will be counted as -1 score in ALL rating, and will not be counted in VALID rating.'
196
+ )
197
+
198
+ dump(data, score_file)
199
+
200
+ rating = get_dimension_rating(score_file)
201
+ return rating
202
+
203
+
204
+ class QBench_Video_VQA(VideoBaseDataset):
205
+
206
+ MD5 = '49e6181b341c934d0b33ec78bdcc0a3d'
207
+
208
+ FRAMES_TMPL_SYS = """
209
+ You will receive {} distinct frames that have been uniformly sampled from a video sequence, arranged in the same temporal order as they appear in the video.
210
+ Please analyze these frames and provide a detailed and accurate answer from the perspective of visual quality based on your observations.
211
+ """
212
+
213
+ FRAMES_TMPL_SYS_4VIDEO_LLM = """
214
+ You will receive several distinct frames that have been uniformly sampled from a video sequence, arranged in the same temporal order as they appear in the video.
215
+ Please analyze these frames and provide a detailed and accurate answer from the perspective of visual quality based on your observations.
216
+ """
217
+
218
+ TYPE = 'Video-VQA'
219
+
220
+ def __init__(self, dataset='qbenchvideo_single_VQA', nframe=0, fps=-1):
221
+ dataset_tsv_name = 'qbenchvideo_single_VQA'
222
+ super().__init__(dataset=dataset_tsv_name, nframe=nframe, fps=fps)
223
+
224
+ @classmethod
225
+ def supported_datasets(cls):
226
+ return ['QBench_Video_VQA']
227
+
228
+ def prepare_dataset(self, dataset_name='qbenchvideo_single_VQA', repo_id='zhangzicheng/Q-Bench-Video'):
229
+ def check_integrity(pth):
230
+ data_file = osp.join(pth, f'{dataset_name}.tsv')
231
+
232
+ if not os.path.exists(data_file):
233
+ return False
234
+
235
+ if md5(data_file) != self.MD5:
236
+ return False
237
+
238
+ data = load(data_file)
239
+ for idx, item in data.iterrows():
240
+ if not osp.exists(os.path.normpath(osp.join(pth, item['video_path']))):
241
+ return False
242
+ return True
243
+
244
+ cache_path = get_cache_path(repo_id)
245
+ if cache_path is not None and check_integrity(cache_path):
246
+ dataset_path = cache_path
247
+ else:
248
+ def unzip_videos(pth):
249
+ if not osp.exists(osp.join(pth, 'video')):
250
+ zip_file = osp.join(pth, 'video.zip')
251
+ with zipfile.ZipFile(zip_file, 'r') as zip_ref:
252
+ zip_ref.extractall(pth)
253
+ dataset_path = snapshot_download(repo_id=repo_id, repo_type='dataset')
254
+ unzip_videos(dataset_path)
255
+
256
+ data_file = osp.join(dataset_path, f'{dataset_name}.tsv')
257
+ return dict(root=dataset_path, data_file=data_file)
258
+
259
+ def save_video_frames(self, line):
260
+ video = line['video']
261
+ vid_path = os.path.normpath(os.path.join(self.data_root, line['video_path']))
262
+ import decord
263
+ vid = decord.VideoReader(vid_path)
264
+ video_info = {
265
+ 'fps': vid.get_avg_fps(),
266
+ 'n_frames': len(vid),
267
+ }
268
+ if self.nframe > 0 and self.fps < 0:
269
+ step_size = len(vid) / (self.nframe + 1)
270
+ indices = [int(i * step_size) for i in range(1, self.nframe + 1)]
271
+ frame_paths = self.frame_paths(video)
272
+ elif self.fps > 0:
273
+ # not constrained by num_frames, get frames by fps
274
+ total_duration = video_info['n_frames'] / video_info['fps']
275
+ required_frames = int(total_duration * self.fps)
276
+ step_size = video_info['fps'] / self.fps
277
+ indices = [int(i * step_size) for i in range(required_frames)]
278
+ frame_paths = self.frame_paths_fps(video, len(indices))
279
+
280
+ flag = np.all([osp.exists(p) for p in frame_paths])
281
+
282
+ if not flag:
283
+ lock_path = osp.splitext(vid_path)[0] + '.lock'
284
+ with portalocker.Lock(lock_path, 'w', timeout=30):
285
+ if not np.all([osp.exists(p) for p in frame_paths]):
286
+ images = [vid[i].asnumpy() for i in indices]
287
+ images = [Image.fromarray(arr) for arr in images]
288
+ for im, pth in zip(images, frame_paths):
289
+ if not osp.exists(pth):
290
+ im.save(pth)
291
+
292
+ return frame_paths
293
+
294
+ def save_video_into_images(self, line):
295
+ frame_paths = self.save_video_frames(line)
296
+ return frame_paths
297
+
298
+ def build_prompt(self, line, video_llm):
299
+ if isinstance(line, int):
300
+ assert line < len(self)
301
+ line = self.data.iloc[line]
302
+
303
+ video_path = os.path.normpath(os.path.join(self.data_root, line['video_path']))
304
+ if video_llm:
305
+ message = [dict(type='text', value=self.FRAMES_TMPL_SYS_4VIDEO_LLM)]
306
+ message.append(dict(type='text', value=line['question']))
307
+ message.append(dict(type='video', value=video_path))
308
+ else:
309
+ img_frame_paths = self.save_video_into_images(line)
310
+ message = [dict(type='text', value=self.FRAMES_TMPL_SYS.format(len(img_frame_paths)))]
311
+ message.append(dict(type='text', value=line['question']))
312
+ for im in img_frame_paths:
313
+ message.append(dict(type='image', value=im))
314
+ return message
315
+
316
+ @classmethod
317
+ def evaluate(self, eval_file, **judge_kwargs):
318
+ model = judge_kwargs.setdefault('model', 'gpt-4o-0806')
319
+ assert model in ['gpt-4o-0806', 'gpt-4o']
320
+
321
+ score_file = get_intermediate_file_path(eval_file, f'_{model}_score')
322
+ tmp_file = get_intermediate_file_path(eval_file, f'_{model}', 'pkl')
323
+ nproc = judge_kwargs.pop('nproc', 4)
324
+
325
+ if not osp.exists(score_file):
326
+ data = load(eval_file)
327
+ model = build_judge(system_prompt=VQA_JUDGE_SYS_PROMPT, **judge_kwargs)
328
+ lt = len(data)
329
+ lines = [data.iloc[i] for i in range(lt)]
330
+ tups = [(model, line) for line in lines]
331
+ indices = [line['index'] for line in lines]
332
+
333
+ ans = {}
334
+ if osp.exists(tmp_file):
335
+ ans = load(tmp_file)
336
+ tups = [x for x, i in zip(tups, indices) if i not in ans]
337
+ indices = [i for i in indices if i not in ans]
338
+
339
+ if len(indices):
340
+ _ = track_progress_rich(
341
+ check_ans_vqa,
342
+ tups,
343
+ nproc=nproc,
344
+ chunksize=nproc,
345
+ keys=indices,
346
+ save=tmp_file,
347
+ )
348
+ ans = load(tmp_file)
349
+ for idx in ans:
350
+ data.loc[data['index'] == idx, 'score'] = int(ans[idx].replace('Score:', '').strip())
351
+ dump(data, score_file)
352
+
353
+ rating = get_dimension_rating(score_file)
354
+ return rating
VLMEvalKit-sudoku/vlmeval/dataset/text_base.py ADDED
@@ -0,0 +1,88 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from abc import abstractmethod
2
+ from ..smp import *
3
+
4
+
5
+ class TextBaseDataset:
6
+ MODALITY = 'TEXT'
7
+ DATASET_URL = {}
8
+ DATASET_MD5 = {}
9
+
10
+ def __init__(self, dataset='MMBench', **kwargs):
11
+ self.dataset_name = dataset
12
+
13
+ data = self.load_data(dataset)
14
+
15
+ data['index'] = [str(x) for x in data['index']]
16
+
17
+ if np.all([istype(x, int) for x in data['index']]):
18
+ data['index'] = [int(x) for x in data['index']]
19
+
20
+ self.data = data
21
+ self.post_build(dataset)
22
+
23
+ def __len__(self):
24
+ return len(self.data)
25
+
26
+ def __getitem__(self, idx):
27
+ return dict(self.data.iloc[idx])
28
+
29
+ def prepare_tsv(self, url, file_md5=None):
30
+ data_root = LMUDataRoot()
31
+ os.makedirs(data_root, exist_ok=True)
32
+ update_flag = False
33
+ file_name = url.split('/')[-1]
34
+ data_path = osp.join(data_root, file_name)
35
+ if osp.exists(data_path) and (file_md5 is None or md5(data_path) == file_md5):
36
+ pass
37
+ else:
38
+ warnings.warn('The dataset tsv is not downloaded')
39
+ download_file(url, data_path)
40
+ update_flag = True
41
+
42
+ if file_size(data_path, 'GB') > 1:
43
+ local_path = data_path.replace('.tsv', '_local.tsv')
44
+ if not osp.exists(local_path) or os.environ.get('FORCE_LOCAL', None) or update_flag:
45
+ from ..tools import LOCALIZE
46
+ LOCALIZE(data_path, local_path)
47
+ data_path = local_path
48
+ return load(data_path)
49
+
50
+ def dump_image(self, line):
51
+ return []
52
+
53
+ def display(self, line):
54
+ if isinstance(line, int):
55
+ line = self.data.iloc[line]
56
+ assert isinstance(line, pd.Series) or isinstance(line, dict)
57
+ mmqa_display(line)
58
+
59
+ # Return a list of dataset names that are supported by this class, can override
60
+ @classmethod
61
+ def supported_datasets(cls):
62
+ return list(cls.DATASET_URL)
63
+
64
+ # Given the dataset name, return the dataset as a pandas dataframe, can override
65
+ def load_data(self, dataset):
66
+ url = self.DATASET_URL[dataset]
67
+ file_md5 = self.DATASET_MD5[dataset]
68
+ return self.prepare_tsv(url, file_md5)
69
+
70
+ # Post built hook, will be called after the dataset is built, can override
71
+ def post_build(self, dataset):
72
+ pass
73
+
74
+ # Given one data record, return the built prompt (a multi-modal message), can override
75
+ def build_prompt(self, line):
76
+ if isinstance(line, int):
77
+ line = self.data.iloc[line]
78
+
79
+ question = line['question']
80
+
81
+ msgs = []
82
+ msgs.append(dict(type='text', value=question))
83
+ return msgs
84
+
85
+ # Given the prediction file, return the evaluation results in the format of a dictionary or pandas dataframe
86
+ @abstractmethod
87
+ def evaluate(self, eval_file, **judge_kwargs):
88
+ pass
VLMEvalKit-sudoku/vlmeval/dataset/utils/__pycache__/mlvu.cpython-310.pyc ADDED
Binary file (8.25 kB). View file
 
VLMEvalKit-sudoku/vlmeval/dataset/utils/__pycache__/multiple_choice.cpython-310.pyc ADDED
Binary file (21.2 kB). View file
 
VLMEvalKit-sudoku/vlmeval/dataset/utils/__pycache__/omni_verifier.cpython-310.pyc ADDED
Binary file (6.68 kB). View file
 
VLMEvalKit-sudoku/vlmeval/dataset/utils/__pycache__/shortqa.cpython-310.pyc ADDED
Binary file (8.92 kB). View file
 
VLMEvalKit-sudoku/vlmeval/dataset/utils/__pycache__/spatial457.cpython-310.pyc ADDED
Binary file (2.97 kB). View file
 
VLMEvalKit-sudoku/vlmeval/dataset/utils/__pycache__/tamperbench.cpython-310.pyc ADDED
Binary file (19.8 kB). View file
 
VLMEvalKit-sudoku/vlmeval/dataset/utils/__pycache__/tempcompass.cpython-310.pyc ADDED
Binary file (8.11 kB). View file
 
VLMEvalKit-sudoku/vlmeval/dataset/utils/__pycache__/yorn.cpython-310.pyc ADDED
Binary file (8.76 kB). View file
 
VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/README.md ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # MEGA-Bench: Scaling Multimodal Evaluation to over 500 Real-World Tasks [ICLR 2025]
2
+
3
+ ![image](https://github.com/user-attachments/assets/5fd44fa9-0ec2-4298-ad0c-e883cb1edf7f)
4
+
5
+ MEGA-Bench contains 505 multimodal tasks with diverse data sources, input/output formats, and skill requirements. The taxonomy tree is derived from the application dimension, which guides and calibrates the annotation process. The benchmark is equiped with a suite of 45 evaluation metrics to handle various output formats beyond multiple-choice questions.
6
+
7
+ Following this doc, the evaluation result contains the final scores and multi-dimensional breakdown, which has a consistent format as [MEGA-Bench Leaderboard](https://huggingface.co/spaces/TIGER-Lab/MEGA-Bench). Below is an example from evaluating `Qwen-2-VL-7B-Instruct` on the core set.
8
+
9
+
10
+ ## Step-1: Install requirements for MEGA-Bench metrics to obtain the evaluation scores and breakdown analysis
11
+
12
+ ```bash
13
+ pip install -r vlmeval/dataset/utils/megabench/requirements.txt
14
+ ```
15
+
16
+
17
+ ## Step-2: Get the model response and evaluation score files with VLMEvalKit
18
+
19
+ ```bash
20
+ # Core set (440 tasks, in 16-frame setting)
21
+ python3 run.py \
22
+ --data MEGABench_core_16frame \
23
+ --model Qwen2-VL-7B-Instruct \
24
+ --work-dir your/work/dir \
25
+
26
+ # Open-ended set (65 tasks, in 16-frame setting)
27
+ python3 run.py \
28
+ --data MEGABench_open_16frame \
29
+ --model Qwen2-VL-7B-Instruct \
30
+ --work-dir your/work/dir \
31
+ ```
32
+ Note: please set up the `OPENAI_API_KEY` in the .env file to evaluate the open set.
33
+
34
+ Then you can have 2 score files in the directory like:
35
+
36
+ ```bash
37
+ your/work/dir/Qwen-2-VL-7B-Instruct/T20250706_Gbf63ab2c/megabench_score_core.json
38
+ your/work/dir/Qwen-2-VL-7B-Instruct/T20250707_Gbf63ab2c/megabench_score_open.json
39
+ ```
40
+
41
+ ## Step-3(Optional): Run MEGA-Bench scripts to obtain the breakdown analysis
42
+
43
+ Move the 2 score files into the same directory, then run the script:
44
+
45
+ ```bash
46
+ # Run the metrics for the open-ended set
47
+ cd vlmeval/dataset/utils/megabench/tools
48
+ python3 derive_breakdown_results.py --input_dir your/dir/to/megabench_scores
49
+ ```
50
+
51
+ The results in `your/dir/to/megabench_scores/analysis` are what used by [MEGA-Bench leaderboard](https://huggingface.co/spaces/TIGER-Lab/MEGA-Bench). The leaderboard can be updated by putting the files in the results directory of the leadboard's [HuggingFace space](https://huggingface.co/spaces/TIGER-Lab/MEGA-Bench/tree/main/static/eval_results/Default).
VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/__init__.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ from .aggregation_type import AggregationType
2
+ from .metric_type import MetricType
3
+ from .response_parse_type import ResponseParseType
4
+
5
+ __all__ = [AggregationType, MetricType, ResponseParseType]
VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/__pycache__/evaluator.cpython-310.pyc ADDED
Binary file (9.37 kB). View file
 
VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/__pycache__/response_parse_type.cpython-310.pyc ADDED
Binary file (1.94 kB). View file
 
VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/aggregation/min_agg.py ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from numbers import Number
2
+ from typing import Dict
3
+
4
+
5
+ class MinAggregation:
6
+ """Take the minimum of all valid scores."""
7
+
8
+ @staticmethod
9
+ def aggregate(scores: Dict[str, Number], weights: Dict[str, Number]) -> Number:
10
+ """Exact match between targets and responses."""
11
+ filtered_scores = [s for s in scores.values() if s >= 0]
12
+ if not filtered_scores:
13
+ return -1
14
+ return min(filtered_scores)
VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/aggregation_type.py ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from enum import Enum
2
+
3
+ class AggregationType(Enum):
4
+ MEAN = 0
5
+
6
+ @classmethod
7
+ def from_string(cls, s):
8
+ return cls.MEAN
9
+
10
+ def aggregate(self, field_scores, field_weights):
11
+ if not field_scores:
12
+ return 0.0
13
+
14
+ total_score = 0.0
15
+ total_weight = 0.0
16
+
17
+ for field, score in field_scores.items():
18
+ weight = field_weights.get(field, 1.0)
19
+ try:
20
+ total_score += score * weight
21
+ except:
22
+ total_score += score[0] * weight
23
+ total_weight += weight
24
+
25
+ return total_score / total_weight if total_weight > 0 else 0.0
VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/evaluator.py ADDED
@@ -0,0 +1,399 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import json
3
+ import os
4
+ from typing import Any, Dict, List
5
+ import ast
6
+ from vlmeval import load, dump
7
+
8
+
9
+ from . import MetricType, AggregationType, ResponseParseType
10
+ from .parsing.common.utils import evaluate_as_string
11
+
12
+
13
+ class MEGABenchEvaluator:
14
+ def __init__(
15
+ self,
16
+ subset_name: str,
17
+ responses_file: str,
18
+ output_file: str,
19
+ ):
20
+ """
21
+ :param hf_data_file: Path to a file containing HF dataset tasks + their metric configs
22
+ :param model_responses_file: Path to a JSON file with tasks + model responses
23
+ :param output_file: Path to store evaluated results
24
+ """
25
+ self.hf_data = self._load_hf(subset_name) # e.g. same structure used previously
26
+ self.data = self._load_json(responses_file) # The model's output
27
+ self.output_file = output_file
28
+ self.tmp_output_file = output_file.replace(".json", "_tmp.pkl")
29
+
30
+ # Build a dict of {task_name -> metric configuration} for quick lookup
31
+ self.scoring_functions = {}
32
+ for task_name, task_samples in self.hf_data.items():
33
+ self.scoring_functions[task_name] = ast.literal_eval(
34
+ task_samples[0]["metric_info"]
35
+ )
36
+
37
+ def _load_hf(self, subset_name: str) -> List[Dict[str, Any]]:
38
+ """
39
+ Load the HF dataset for the given subset name.
40
+ """
41
+ from datasets import load_dataset
42
+ dataset = load_dataset("TIGER-Lab/MEGA-Bench", subset_name)["test"]
43
+ task_dict = {}
44
+ for sample in dataset:
45
+ task_name = sample["task_name"]
46
+ if task_name not in task_dict:
47
+ task_dict[task_name] = []
48
+ task_dict[task_name].append(sample)
49
+
50
+ return task_dict
51
+
52
+ def _get_eval_context(self, task_name, query):
53
+ if "query_idx" in query:
54
+ query_idx = query["query_idx"]
55
+ eval_context = self.hf_data[task_name][query_idx]["eval_context"]
56
+ else:
57
+ global_idx = query["global_idx"]
58
+ global_idx_to_sample = {sample["id"]: sample for sample in self.hf_data[task_name]}
59
+ eval_context = global_idx_to_sample[global_idx]["eval_context"]
60
+
61
+ eval_context = ast.literal_eval(eval_context)
62
+ return eval_context
63
+
64
+ def _determine_eval_style(self, task):
65
+ metric_info = self.scoring_functions[task["task_name"]]
66
+ all_task_metrics = list(metric_info["field_score_function"].values())
67
+ eval_type = (
68
+ "rule"
69
+ if (
70
+ "gpt_4o_as_judge" not in all_task_metrics
71
+ and "ascii_art_gpt4o_judge" not in all_task_metrics
72
+ )
73
+ else "llm"
74
+ )
75
+ return eval_type
76
+
77
+ def evaluate(self):
78
+ """
79
+ The main entry point to evaluate all tasks in self.data based on the HF dataset’s metric info.
80
+ """
81
+ if os.path.exists(self.tmp_output_file):
82
+ exist_records = load(self.tmp_output_file)
83
+ else:
84
+ exist_records = {}
85
+ num_tasks = 0
86
+ num_queries = 0
87
+ total_query_score = 0.0
88
+ total_task_score = 0.0
89
+
90
+ # Evaluate each task
91
+ for task in self.data:
92
+ task_name = task.get("task_name", "")
93
+ if task_name not in exist_records:
94
+ exist_records[task_name] = {}
95
+
96
+ # If no scoring config is found for the given task_name, skip
97
+ score_config = self.scoring_functions.get(
98
+ task_name,
99
+ {
100
+ "field_score_function": {},
101
+ "aggregation": {"function": None, "field_weights": {}},
102
+ "response_parse_function": None,
103
+ },
104
+ )
105
+ if not task.get("query_response"):
106
+ # No queries to score
107
+ continue
108
+
109
+ num_tasks += 1
110
+ task_score_sum = 0.0
111
+ # Prepare the aggregator
112
+ aggregator = AggregationType.from_string(score_config["aggregation"]["function"])
113
+ field_weights = score_config["aggregation"]["field_weights"]
114
+
115
+ # Parse the metric definitions
116
+ field_score_functions = score_config.get("field_score_function", {})
117
+ global_aux_metrics = score_config.get("global_aux_metrics", {})
118
+ parser_type_str = score_config.get("response_parse_function", "dummy")
119
+ parser = ResponseParseType.from_string(parser_type_str)
120
+
121
+ # Extract the fields from the first correct_answer (assuming uniform)
122
+ first_correct = task["query_response"][0]["correct_answer"]
123
+ all_fields = list(first_correct.keys())
124
+ # Usually, we only treat “##something” fields as metadata, so skip them:
125
+ answer_fields = [f for f in all_fields if not f.startswith("##")]
126
+
127
+ # For each query in the task
128
+ for idx, query in enumerate(task["query_response"]):
129
+ num_queries += 1
130
+ response_text = query.get("response", "")
131
+ correct_answer = query["correct_answer"]
132
+
133
+ # 1) Parse the response according to the specified parser
134
+ response_obj = self._parse_response(
135
+ task_name,
136
+ parser,
137
+ response_text,
138
+ correct_answer,
139
+ answer_fields,
140
+ query,
141
+ task,
142
+ )
143
+
144
+ if idx in exist_records[task_name]:
145
+ query["scores"] = exist_records[task_name][idx]
146
+ else:
147
+ # Initialize scores for this query
148
+ query["scores"] = {"field": {}, "info": {}}
149
+
150
+ # 2) Evaluate each field
151
+ for fld, fld_metric_name in field_score_functions.items():
152
+ metric = self._build_metric(fld_metric_name, score_config)
153
+ self._evaluate_field(
154
+ task_name,
155
+ metric,
156
+ fld,
157
+ response_obj,
158
+ correct_answer,
159
+ query
160
+ )
161
+
162
+ # Evaluate global auxiliary metrics (if any)
163
+ for fld, fld_metric_name in global_aux_metrics.items():
164
+ metric = self._build_metric(fld_metric_name, score_config)
165
+ # Some tasks want the entire response object to do an additional check
166
+ # So, pass original `response_obj` under `fld` key:
167
+ tmp_obj = {fld: response_obj}
168
+ self._evaluate_field(
169
+ task_name,
170
+ metric,
171
+ fld,
172
+ tmp_obj,
173
+ correct_answer,
174
+ query,
175
+ is_aux=True,
176
+ )
177
+
178
+ exist_records[task_name][idx] = query["scores"]
179
+ if idx % 10 == 0 or idx == len(task["query_response"]) - 1:
180
+ dump(exist_records, self.tmp_output_file)
181
+
182
+ # 3) Aggregate the query-level score
183
+ query["scores"]["query"] = aggregator.aggregate(
184
+ query["scores"]["field"],
185
+ field_weights,
186
+ )
187
+
188
+ if query["scores"]["query"] >= 0:
189
+ task_score_sum += query["scores"]["query"]
190
+
191
+ # Calculate overall task score
192
+ if task["query_response"]:
193
+ mean_score = task_score_sum / len(task["query_response"])
194
+ else:
195
+ mean_score = 0.0
196
+ task["task_score"] = task_score_sum
197
+ task["mean_task_score"] = mean_score
198
+ task["eval_type"] = self._determine_eval_style(task)
199
+
200
+ total_query_score += task_score_sum
201
+ total_task_score += mean_score
202
+
203
+ print(f"[Task: {task_name}] Score = {task_score_sum} / {len(task['query_response'])}")
204
+
205
+ # Produce overall summary stats
206
+ summary = {}
207
+ if num_tasks > 0:
208
+ macro_mean_score = total_task_score / num_tasks
209
+ summary["macro_mean_score"] = macro_mean_score
210
+ else:
211
+ summary["macro_mean_score"] = 0.0
212
+
213
+ if num_queries > 0:
214
+ micro_mean_score = total_query_score / num_queries
215
+ summary["micro_mean_score"] = micro_mean_score
216
+ else:
217
+ summary["micro_mean_score"] = 0.0
218
+
219
+ summary["num_tasks"] = num_tasks
220
+ summary["num_queries"] = num_queries
221
+ # print(f"\n=== Evaluation Summary ===\n{json.dumps(summary, indent=4)}\n")
222
+
223
+ # Write back final data + summary
224
+ output_data = {
225
+ "data": self.data,
226
+ "summary": summary,
227
+ }
228
+ self._save_results(self.output_file, output_data)
229
+ print(f"Evaluation complete! Results saved to {self.output_file}")
230
+
231
+ def _evaluate_field(
232
+ self,
233
+ task_name: str,
234
+ metric: Any,
235
+ field: str,
236
+ response_obj: Dict[str, Any],
237
+ correct_answer: Dict[str, Any],
238
+ query: Dict[str, Any],
239
+ is_aux: bool = False,
240
+ ) -> float:
241
+ """Compute score for a single field using the given metric."""
242
+ eval_context = self._get_eval_context(task_name, query)
243
+
244
+ if metric == MetricType.UNSUPPORTED:
245
+ print(f"The metric for {field} in task {task_name} is not supported")
246
+ return 0.0
247
+ elif metric == MetricType.SYMBOLIC_PLANNING_TEST or metric == MetricType.PROGRAM_JUDGE:
248
+ query["scores"]["field"][field] = metric.match(
249
+ response_obj.get(field),
250
+ eval_context,
251
+ )
252
+ elif metric == MetricType.CONSTRAINED_GENERATION:
253
+ score, eval_info = metric.match(response_obj, eval_context)
254
+ query["scores"]["field"][field] = score
255
+ query["scores"]["info"][field] = eval_info
256
+ elif metric == MetricType.XML_NORM_POINT_IN_BBOX:
257
+ score, eval_info = metric.match(response_obj.get(field), eval_context)
258
+ query["scores"]["field"][field] = score
259
+ query["scores"]["info"][field] = eval_info
260
+ elif isinstance(metric, MetricType.VLM_AS_JUDGE.class_impl):
261
+ images = query.get("images", [])
262
+ question = query.get("question", "")
263
+ correct_val = correct_answer.get(field, "") if not is_aux else correct_answer
264
+ response_info = (
265
+ response_obj.get(field)
266
+ if isinstance(response_obj, dict)
267
+ else response_obj
268
+ )
269
+ query["scores"]["field"][field] = metric.match(
270
+ response_info,
271
+ correct_val,
272
+ images=images,
273
+ question=question,
274
+ eval_context=eval_context,
275
+ )
276
+ else:
277
+ correct_val = correct_answer.get(field, "") if not is_aux else correct_answer
278
+ correct_val = evaluate_as_string(correct_val) # remove extra formatting
279
+ predicted_val = response_obj.get(field, "")
280
+ query["scores"]["field"][field] = metric.match(predicted_val, correct_val)
281
+
282
+ def _parse_response(
283
+ self,
284
+ task_name: str,
285
+ parser,
286
+ response_text: str,
287
+ correct_answer: Dict[str, Any],
288
+ answer_fields: List[str],
289
+ query: Dict[str, Any],
290
+ task: Dict[str, Any],
291
+ ) -> Dict[str, Any]:
292
+ """
293
+ Parse the raw response into a structured object, depending on the parser.
294
+ """
295
+ res_parsing_pass = True
296
+ if parser.is_single_field_parser():
297
+ # single field
298
+ assert (
299
+ len(answer_fields) == 1
300
+ ), "The answer_string parse must be used when the answer has a single field"
301
+ answer_key = answer_fields[0]
302
+
303
+ global_description = task["task_description"]
304
+ query_question = query["question"]
305
+ is_single_line_ans = "\n" not in correct_answer[answer_key]
306
+
307
+ response_obj = parser.parse(
308
+ response_text,
309
+ answer_key,
310
+ global_description=global_description,
311
+ query_question=query_question,
312
+ is_single_line_ans=is_single_line_ans,
313
+ )
314
+ assert isinstance(response_obj[answer_key], str), "Single-field parsing results must be string"
315
+ else:
316
+ # Structural output (using JSON parser or other specified parsing func) or dummy parse (return all)
317
+ response_obj = parser.parse(response_text)
318
+
319
+ if parser == ResponseParseType.JSON and (
320
+ not isinstance(response_obj, dict) or not response_obj
321
+ ):
322
+ # Expect a JSON, but parsing failed,
323
+ # Record the failure parsing, and use the raw string for each field of the answer
324
+ res_parsing_pass = False
325
+ response_obj = {}
326
+ for field in correct_answer:
327
+ response_obj[field] = response_text
328
+
329
+ if not res_parsing_pass:
330
+ print(
331
+ f"Task:{task_name}, cannot parse query with global idx {query['global_idx']}"
332
+ )
333
+ return response_obj
334
+
335
+ def _build_metric(self, metric_name: str, score_config: Dict[str, Any]):
336
+ """
337
+ Given a string for the metric (e.g. 'gpt_4o_as_judge'),
338
+ return the actual MetricType or a specialized metric class.
339
+ """
340
+ metric = MetricType.from_string(metric_name)
341
+ if metric == MetricType.VLM_AS_JUDGE:
342
+ # Build the GPT4O metric using the provided config
343
+ gpt4o_configs = score_config.get("gpt4o_eval_configs", {})
344
+ metric = metric.class_impl(gpt4o_configs)
345
+ elif metric == MetricType.ASCII_ART_GPT4O_JUDGE:
346
+ # Build the ASCII Art metric using the provided config
347
+ ascii_art_configs = score_config.get("ascii_art_eval_configs", {})
348
+ metric = metric.class_impl(ascii_art_configs)
349
+ return metric
350
+
351
+ @staticmethod
352
+ def _load_json(file_path: str) -> Any:
353
+ with open(file_path, "r", encoding="utf-8") as f:
354
+ return json.load(f)
355
+
356
+ @staticmethod
357
+ def _save_results(file_path: str, data: Any) -> None:
358
+ """
359
+ Safe-write a JSON file via temp file + replace.
360
+ Since the results file is long, this avoid breaking the file in case of a crash.
361
+ """
362
+ temp_filename = f"{file_path}.tmp"
363
+ with open(temp_filename, "w", encoding="utf-8") as f:
364
+ json.dump(data, f, ensure_ascii=False, indent=4)
365
+ os.replace(temp_filename, file_path)
366
+
367
+
368
+ def main():
369
+ parser = argparse.ArgumentParser(description="Simple Evaluator")
370
+ parser.add_argument(
371
+ "--subset_name",
372
+ type=str,
373
+ required=True,
374
+ help="The subset of MEGA-Bench to evaluate.",
375
+ )
376
+ parser.add_argument(
377
+ "--submission_file",
378
+ type=str,
379
+ required=True,
380
+ help="Path to a JSON file containing model responses.",
381
+ )
382
+ parser.add_argument(
383
+ "--output_file",
384
+ type=str,
385
+ required=True,
386
+ help="Where to store the evaluation results (JSON).",
387
+ )
388
+
389
+ args = parser.parse_args()
390
+ evaluator = MEGABenchEvaluator(
391
+ subset_name=args.subset_name,
392
+ responses_file=args.submission_file,
393
+ output_file=args.output_file,
394
+ )
395
+ evaluator.evaluate()
396
+
397
+
398
+ if __name__ == "__main__":
399
+ main()
VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/metric_type.py ADDED
@@ -0,0 +1,259 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from functools import cached_property
2
+ from enum import Enum
3
+ from .utils import lazy_import
4
+ import logging
5
+
6
+
7
+ class MetricType(Enum):
8
+ """The types of metrics."""
9
+
10
+ EXACT_STR_MATCH = "exact_str_match"
11
+ SIMPLE_STR_MATCH = "simple_str_match"
12
+ CODE_RESULT_EXACT_STR_MATCH = "code_result_exact_str_match"
13
+ DICT_EXACT_STR_MATCH_AGG_RECALL = "dict_exact_str_match_agg_recall"
14
+ EXACT_STR_MATCH_CASE_INSENSITIVE = "exact_str_match_case_insensitive"
15
+ NORM_SIM_DAMERAU_LEVENSHTEIN = "normalized_similarity_damerau_levenshtein"
16
+ NEAR_STR_MATCH = "near_str_match"
17
+ NUMBER_RELATIVE_DIFF_RATIO = "number_rel_diff_ratio"
18
+ SET_EQUALITY = "set_equality"
19
+ SET_EQUALITY_CASE_INSENSITIVE = "set_equality_case_insensitive"
20
+ DICT_SET_EQUALITY_AGG_JACCARD = "dict_set_equality_agg_jaccard"
21
+ DICT_PRECISION = "dict_precision"
22
+ JACCARD_INDEX = "jaccard_index"
23
+ JACCARD_INDEX_CASE_INSENSITIVE = "jaccard_index_case_insensitive"
24
+ DICT_JACCARD_AGG_JACCARD = "dict_jaccard_agg_jaccard"
25
+ DICT_EQUALITY = "dict_equality"
26
+ SET_PRECISION = "set_precision"
27
+ POSITIVE_INT_MATCH = "positive_int_match"
28
+ CHESS_MOVE_LIST_JACCARD_INDEX = "chess_move_list_jaccard_index"
29
+ LONGEST_COMMON_LIST_PREFIX_RATIO = "longest_common_list_prefix_ratio"
30
+ ASCII_ART_GPT4O_JUDGE = "ascii_art_gpt4o_judge"
31
+ NLI_ENTAILMENT = "nli_entailment"
32
+ BLEU = "bleu"
33
+ GLEU_CN = "gleu_cn"
34
+ XML_NORM_BBOX_IOU_SINGLE = "xml_nbbox_iou_single"
35
+ LATEX_EXPR_EQUALITY = "latex_expr_equality"
36
+ TEXT_WITH_LATEX_EXPR_EQUALITY = "text_with_latex_expr_equality"
37
+ NORM_BBOX_IOU_TUPLE = "nbbox_iou_tuple"
38
+ NORM_BBOX_IOU_SINGLE = "nbbox_iou_single"
39
+ NORM_BBOX_IOU_SEQUENCE = "nbbox_iou_sequence"
40
+ DICT_NORM_BBOX_IOU_TUPLE_AGG_JACCARD = "dict_nbbox_iou_tuple_agg_jaccard"
41
+ XML_NORM_POINT_IN_BBOX = "xml_norm_point_in_bbox"
42
+ XML_NORM_POINT_DISTANCE = "xml_norm_point_distance"
43
+ GEO_PROXIMITY_LOCATION_DICT = "geo_proximity_location_dict"
44
+ NORMALIZED_RMSE = "normalized_rmse"
45
+ PROGRAM_JUDGE = "program_judge"
46
+ STR_SET_EQUALITY_LINE_BREAK = "str_set_equality_line_break"
47
+ STR_SET_EQUALITY_COMMA = "str_set_equality_comma"
48
+ SEQUENCE_EQUALITY = "sequence_equality"
49
+ SEQUENCE_EQUALITY_CASE_INSENSITIVE = "sequence_equality_case_insensitive"
50
+ SEQUENCE_ACCURACY_CASE_INSENSITIVE = "sequence_accuracy_case_insensitive"
51
+ ANGLE_SEQ_FLOAT_RMSE = "angle_seq_float_rmse"
52
+ SYMBOLIC_PLANNING_TEST = "symbolic_planning_test"
53
+ MULTI_REF_PHRASE_EVAL = "multi_ref_phrase"
54
+ GENERAL_SINGLE_NUMERICAL_MATCH = "general_single_numerical_match"
55
+ BOXED_SINGLE_NUMERICAL_MATCH = "boxed_single_numerical_match"
56
+ SEQUENCE_COORDS_SIMILARITY = "sequence_coords_similarity"
57
+ CONSTRAINED_GENERATION = "constrained_generation"
58
+ VLM_AS_JUDGE = "gpt_4o_as_judge"
59
+ UNSUPPORTED = "unsupported"
60
+
61
+ @cached_property
62
+ def class_impl(self):
63
+ lazy_imports = {
64
+ MetricType.SIMPLE_STR_MATCH: lazy_import(
65
+ "vlmeval.dataset.utils.megabench.scoring.simple_str_match", "SimpleStrMatch"
66
+ ),
67
+ MetricType.EXACT_STR_MATCH: lazy_import(
68
+ "vlmeval.dataset.utils.megabench.scoring.exact_str_match", "ExactStrMatch"
69
+ ),
70
+ MetricType.CODE_RESULT_EXACT_STR_MATCH: lazy_import(
71
+ "vlmeval.dataset.utils.megabench.scoring.exact_str_match", "CodeResultExactStrMatch"
72
+ ),
73
+ MetricType.DICT_EXACT_STR_MATCH_AGG_RECALL: lazy_import(
74
+ "vlmeval.dataset.utils.megabench.scoring.dict_exact_match_agg_recall",
75
+ "DictExactStrMatchAggRecall",
76
+ ),
77
+ MetricType.EXACT_STR_MATCH_CASE_INSENSITIVE: lazy_import(
78
+ "vlmeval.dataset.utils.megabench.scoring.exact_str_match_case_insensitive",
79
+ "ExactStrMatchCaseInsensitive",
80
+ ),
81
+ MetricType.NORM_SIM_DAMERAU_LEVENSHTEIN: lazy_import(
82
+ "vlmeval.dataset.utils.megabench.scoring.normalized_similarity_damerau_levenshtein",
83
+ "NormalizedSimilarityDamerauLevenshtein",
84
+ ),
85
+ MetricType.NEAR_STR_MATCH: lazy_import(
86
+ "vlmeval.dataset.utils.megabench.scoring.near_str_match", "NearStrMatch"
87
+ ),
88
+ MetricType.NUMBER_RELATIVE_DIFF_RATIO: lazy_import(
89
+ "vlmeval.dataset.utils.megabench.scoring.number_rel_diff_ratio", "NumberRelDiffRatio"
90
+ ),
91
+ MetricType.SET_EQUALITY: lazy_import(
92
+ "vlmeval.dataset.utils.megabench.scoring.set_equality", "SetEquality"
93
+ ),
94
+ MetricType.SET_EQUALITY_CASE_INSENSITIVE: lazy_import(
95
+ "vlmeval.dataset.utils.megabench.scoring.set_equality", "SetEqualityCaseInsensitive"
96
+ ),
97
+ MetricType.DICT_SET_EQUALITY_AGG_JACCARD: lazy_import(
98
+ "vlmeval.dataset.utils.megabench.scoring.dict_set_equality_agg_jaccard",
99
+ "DictSetEqualityAggJaccard",
100
+ ),
101
+ MetricType.DICT_EQUALITY: lazy_import(
102
+ "vlmeval.dataset.utils.megabench.scoring.dict_equality",
103
+ "DictEquality",
104
+ ),
105
+ MetricType.DICT_PRECISION: lazy_import(
106
+ "vlmeval.dataset.utils.megabench.scoring.dict_equality",
107
+ "DictPrecision",
108
+ ),
109
+ MetricType.JACCARD_INDEX: lazy_import("vlmeval.dataset.utils.megabench.scoring.jaccard", "Jaccard"),
110
+ MetricType.JACCARD_INDEX_CASE_INSENSITIVE: lazy_import(
111
+ "vlmeval.dataset.utils.megabench.scoring.jaccard", "JaccardCaseInsensitive"
112
+ ),
113
+ MetricType.DICT_JACCARD_AGG_JACCARD: lazy_import(
114
+ "vlmeval.dataset.utils.megabench.scoring.dict_jaccard_agg_jaccard", "DictJaccardAggJaccard"
115
+ ),
116
+ MetricType.SET_PRECISION: lazy_import(
117
+ "vlmeval.dataset.utils.megabench.scoring.set_precision", "SetPrecision"
118
+ ),
119
+ MetricType.POSITIVE_INT_MATCH: lazy_import(
120
+ "vlmeval.dataset.utils.megabench.scoring.positive_int_match", "PositiveIntMatch"
121
+ ),
122
+ MetricType.CHESS_MOVE_LIST_JACCARD_INDEX: lazy_import(
123
+ "vlmeval.dataset.utils.megabench.scoring.chess_jaccard", "ChessMoveJaccard"
124
+ ),
125
+ MetricType.LONGEST_COMMON_LIST_PREFIX_RATIO: lazy_import(
126
+ "vlmeval.dataset.utils.megabench.scoring.longest_common_list_prefix_ratio",
127
+ "LongestCommonListPrefixRatio",
128
+ ),
129
+ MetricType.ASCII_ART_GPT4O_JUDGE: lazy_import(
130
+ "vlmeval.dataset.utils.megabench.scoring.ascii_art_gpt4o_judge",
131
+ "AsciiArtVLMJudgeScore",
132
+ ),
133
+ MetricType.NLI_ENTAILMENT: lazy_import(
134
+ "vlmeval.dataset.utils.megabench.scoring.nli_entailment", "NliEntailment"
135
+ ),
136
+ MetricType.BLEU: lazy_import(
137
+ "vlmeval.dataset.utils.megabench.scoring.sacrebleu_bleu",
138
+ "Bleu",
139
+ ),
140
+ MetricType.GLEU_CN: lazy_import(
141
+ "vlmeval.dataset.utils.megabench.scoring.gleu",
142
+ "GLEUChinese",
143
+ ),
144
+ MetricType.XML_NORM_BBOX_IOU_SINGLE: lazy_import(
145
+ "vlmeval.dataset.utils.megabench.scoring.xml_nbbox_iou", "XmlNbboxIouSingle"
146
+ ),
147
+ MetricType.BOXED_SINGLE_NUMERICAL_MATCH: lazy_import(
148
+ "vlmeval.dataset.utils.megabench.scoring.general_numerical_match", "BoxedSingleNumericalMatch"
149
+ ),
150
+ MetricType.GENERAL_SINGLE_NUMERICAL_MATCH: lazy_import(
151
+ "vlmeval.dataset.utils.megabench.scoring.general_numerical_match", "GeneralSingleNumericalMatch"
152
+ ),
153
+ MetricType.SEQUENCE_COORDS_SIMILARITY: lazy_import(
154
+ "vlmeval.dataset.utils.megabench.scoring.coordinate_sequence_match", "CoordsSequenceSimilarity"
155
+ ),
156
+ MetricType.LATEX_EXPR_EQUALITY: lazy_import(
157
+ "vlmeval.dataset.utils.megabench.scoring.latex_expr_equality",
158
+ "LatexExprEquality",
159
+ ),
160
+ MetricType.TEXT_WITH_LATEX_EXPR_EQUALITY: lazy_import(
161
+ "vlmeval.dataset.utils.megabench.scoring.latex_expr_equality",
162
+ "TextLatexExprEquality",
163
+ ),
164
+ MetricType.NORM_BBOX_IOU_TUPLE: lazy_import(
165
+ "vlmeval.dataset.utils.megabench.scoring.nbbox_iou", "NbboxIouTuple"
166
+ ),
167
+ MetricType.NORM_BBOX_IOU_SINGLE: lazy_import(
168
+ "vlmeval.dataset.utils.megabench.scoring.nbbox_iou", "NbboxIouSingle"
169
+ ),
170
+ MetricType.NORM_BBOX_IOU_SEQUENCE: lazy_import(
171
+ "vlmeval.dataset.utils.megabench.scoring.nbbox_iou", "NbboxIouSequence"
172
+ ),
173
+ MetricType.DICT_NORM_BBOX_IOU_TUPLE_AGG_JACCARD: lazy_import(
174
+ "vlmeval.dataset.utils.megabench.scoring.dict_nbbox_iou_tuple_agg_jaccard",
175
+ "DictNbboxIouTupleAggJaccard",
176
+ ),
177
+ MetricType.XML_NORM_POINT_IN_BBOX: lazy_import(
178
+ "vlmeval.dataset.utils.megabench.scoring.xml_norm_point_in_bbox",
179
+ "XmlNormPointInBbox",
180
+ ),
181
+ MetricType.XML_NORM_POINT_DISTANCE: lazy_import(
182
+ "vlmeval.dataset.utils.megabench.scoring.xml_norm_point_distance",
183
+ "XmlNormPointDistance",
184
+ ),
185
+ MetricType.GEO_PROXIMITY_LOCATION_DICT: lazy_import(
186
+ "vlmeval.dataset.utils.megabench.scoring.geo_proximity", "GeoProximityLocationDict"
187
+ ),
188
+ MetricType.NORMALIZED_RMSE: lazy_import(
189
+ "vlmeval.dataset.utils.megabench.scoring.mse", "NormalizedRMSE"
190
+ ),
191
+ MetricType.PROGRAM_JUDGE: lazy_import(
192
+ "vlmeval.dataset.utils.megabench.scoring.program_judge", "ProgramJudge"
193
+ ),
194
+ MetricType.STR_SET_EQUALITY_LINE_BREAK: lazy_import(
195
+ "vlmeval.dataset.utils.megabench.scoring.set_equality", "StringSetEqualityLineSplit"
196
+ ),
197
+ MetricType.STR_SET_EQUALITY_COMMA: lazy_import(
198
+ "vlmeval.dataset.utils.megabench.scoring.set_equality", "StringSetEqualityCommaSplit"
199
+ ),
200
+ MetricType.SEQUENCE_EQUALITY: lazy_import(
201
+ "vlmeval.dataset.utils.megabench.scoring.sequence_equality", "SequenceEquality"
202
+ ),
203
+ MetricType.SEQUENCE_EQUALITY_CASE_INSENSITIVE: lazy_import(
204
+ "vlmeval.dataset.utils.megabench.scoring.sequence_equality", "SequenceEqualityCaseInsensitive"
205
+ ),
206
+ MetricType.SEQUENCE_ACCURACY_CASE_INSENSITIVE: lazy_import(
207
+ "vlmeval.dataset.utils.megabench.scoring.sequence_equality", "SequenceAccuracyCaseInsensitive"
208
+ ),
209
+ MetricType.ANGLE_SEQ_FLOAT_RMSE: lazy_import(
210
+ "vlmeval.dataset.utils.megabench.scoring.mse", "AngleSeqFloatRMSE"
211
+ ),
212
+ MetricType.SYMBOLIC_PLANNING_TEST: lazy_import(
213
+ "vlmeval.dataset.utils.megabench.scoring.symbolic_planning", "SymbolicPlanningMetricTest"
214
+ ),
215
+ MetricType.MULTI_REF_PHRASE_EVAL: lazy_import(
216
+ "vlmeval.dataset.utils.megabench.scoring.multi_ref_phrase", "MultipleReferencePhraseEval"
217
+ ),
218
+ MetricType.CONSTRAINED_GENERATION: lazy_import(
219
+ "vlmeval.dataset.utils.megabench.scoring.constrained_generation", "ConstrainedGenerationEval"
220
+ ),
221
+ MetricType.VLM_AS_JUDGE: lazy_import(
222
+ "vlmeval.dataset.utils.megabench.scoring.vlm_as_judge", "VLMJudgeScore"
223
+ ),
224
+ }
225
+
226
+ if self not in lazy_imports:
227
+ logging.error(f"Metric {self} not implemented...")
228
+
229
+ importer = lazy_imports.get(
230
+ self,
231
+ lazy_import("vlmeval.dataset.utils.megabench.scoring.unsupported_scoring", "UnsupportedScoring"),
232
+ )
233
+ return importer()
234
+
235
+ def match(self, response: str, correct_answer: str, task_info=None):
236
+ if not task_info:
237
+ return self.class_impl.match(response, correct_answer)
238
+ else:
239
+ return self.class_impl.match(response, correct_answer, task_info)
240
+
241
+ @classmethod
242
+ def from_string(cls, s):
243
+ try:
244
+ if s is None:
245
+ return cls("unsupported")
246
+ return cls(s.lower())
247
+ except KeyError as exc:
248
+ raise ValueError(f"Invalid metric type: {s}") from exc
249
+
250
+ @classmethod
251
+ def get_all_values(cls):
252
+ return list(cls)
253
+
254
+
255
+ # List all of the supported metrics:
256
+ if __name__ == "__main__":
257
+ print("All MetricType values:")
258
+ for metric_type in MetricType.get_all_values():
259
+ print(f"{metric_type.name}: {metric_type.value}")
VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/parsing/answer_str_parse.py ADDED
@@ -0,0 +1,137 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ from .common.parsers import parse_json
3
+ from .common.utils import (
4
+ extract_code_block_content,
5
+ extract_answer_content,
6
+ evaluate_as_string,
7
+ drop_additional_text,
8
+ )
9
+
10
+ logger = logging.getLogger("errorLogger")
11
+
12
+
13
+ class AnswerStrParse:
14
+ """Parse the response for the single answer field."""
15
+
16
+ @classmethod
17
+ def _parse(
18
+ cls,
19
+ response: str,
20
+ *,
21
+ is_ascii_art: bool = False,
22
+ should_remove_surrounding_whitespace=True,
23
+ global_description: str = "",
24
+ query_question: str = "",
25
+ is_single_line_ans: bool = None,
26
+ ) -> dict:
27
+ """Try to parse a single answer."""
28
+ if response is None:
29
+ response = ""
30
+
31
+ # Extract the answer content based on "Answer: ..." format
32
+ answer_content = extract_answer_content(
33
+ response,
34
+ is_ascii_art=is_ascii_art,
35
+ should_remove_surrounding_whitespace=should_remove_surrounding_whitespace,
36
+ )
37
+
38
+ # Extract things from the code block if response is wrapped by a code block
39
+ answer_content, is_code = extract_code_block_content(
40
+ answer_content,
41
+ is_ascii_art=is_ascii_art,
42
+ should_remove_surrounding_whitespace=should_remove_surrounding_whitespace,
43
+ )
44
+
45
+ if not is_code and is_single_line_ans and not is_ascii_art:
46
+ answer_content = drop_additional_text(answer_content)
47
+
48
+ # Check if the content is a potential dict or list.
49
+ if answer_content.startswith("{") or answer_content.startswith("["):
50
+ # Attempt to parse the content as JSON
51
+ response_obj = parse_json(answer_content)
52
+ if response_obj == {}:
53
+ if "{}" not in answer_content:
54
+ return answer_content
55
+ elif response_obj == []:
56
+ # logger.error(
57
+ # f"Unexpected answer parsing error:\n{response=}\n{global_description=}\n{query_question=}\n{is_ascii_art=}"
58
+ # )
59
+ if "[]" not in answer_content:
60
+ return answer_content
61
+ return str(response_obj) # make sure the response to the metric is always a string
62
+ else:
63
+ # drop the redundant string quotes
64
+ answer_content = evaluate_as_string(answer_content)
65
+ return answer_content
66
+
67
+ @classmethod
68
+ def parse(
69
+ cls,
70
+ response: str,
71
+ answer_key: str,
72
+ *,
73
+ global_description: str = "",
74
+ query_question: str = "",
75
+ is_single_line_ans: bool = None,
76
+ ) -> dict:
77
+ """Try to parse a single answer."""
78
+ response_parsed = cls._parse(
79
+ response,
80
+ is_ascii_art=False,
81
+ global_description=global_description,
82
+ query_question=query_question,
83
+ is_single_line_ans=is_single_line_ans,
84
+ )
85
+ results = {answer_key: response_parsed}
86
+ return results
87
+
88
+
89
+ class AsciiAnswerStrParse(AnswerStrParse):
90
+ """Parse the response for the single ASCII answer field."""
91
+
92
+ @classmethod
93
+ def parse(
94
+ cls,
95
+ response: str,
96
+ answer_key: str,
97
+ *,
98
+ global_description: str = "",
99
+ query_question: str = "",
100
+ is_single_line_ans: bool = None,
101
+ ) -> dict:
102
+ """Try to parse a single answer."""
103
+ response_parsed = cls._parse(
104
+ response,
105
+ is_ascii_art=True,
106
+ global_description=global_description,
107
+ query_question=query_question,
108
+ is_single_line_ans=is_single_line_ans,
109
+ )
110
+ results = {answer_key: response_parsed}
111
+ return results
112
+
113
+
114
+ class VerbatimAnswerStrParse(AnswerStrParse):
115
+ """Parse the response for a single answer field that should not have preceding or trailing whitespace removed."""
116
+
117
+ @classmethod
118
+ def parse(
119
+ cls,
120
+ response: str,
121
+ answer_key: str,
122
+ *,
123
+ global_description: str = "",
124
+ query_question: str = "",
125
+ is_single_line_ans: bool = None,
126
+ ) -> dict:
127
+ """Try to parse a single answer."""
128
+ response_parsed = cls._parse(
129
+ response,
130
+ is_ascii_art=True,
131
+ should_remove_surrounding_whitespace=False,
132
+ global_description=global_description,
133
+ query_question=query_question,
134
+ is_single_line_ans=is_single_line_ans,
135
+ )
136
+ results = {answer_key: response_parsed}
137
+ return results
VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/parsing/dummy_parse.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ class DummyParse:
2
+
3
+ @staticmethod
4
+ def parse(response: str, *args, **kwargs) -> dict:
5
+ """return the raw string without doing anything"""
6
+ return response.strip()
VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/parsing/json_parse.py ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .common.parsers import parse_json
2
+ from .common.utils import evaluate_as_string
3
+
4
+
5
+ class JsonParse:
6
+ """Load the response as a JSON object."""
7
+
8
+ @staticmethod
9
+ def parse(response: str):
10
+ """Parse the JSON object, including nested JSON strings."""
11
+ parsed_res = parse_json(response)
12
+ # Drop the potentially duplicated string quotes
13
+ if isinstance(parsed_res, dict):
14
+ for key, val in parsed_res.items():
15
+ parsed_res[key] = evaluate_as_string(val)
16
+
17
+ return parsed_res
VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/requirements.txt ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ antlr4-python3-runtime==4.11.0
2
+ filelock==3.16.1
3
+ geopy==2.4.1
4
+ jieba==0.42.1
5
+ nltk==3.9.1
6
+ numpy==1.26.4
7
+ pronouncing==0.2.0
8
+ rapidfuzz==3.9.5
9
+ regex==2024.7.24
10
+ requests==2.32.3
11
+ requests_cache==1.2.1
12
+ sacrebleu==2.4.3
13
+ sympy==1.13.2
14
+ tqdm==4.66.4
15
+ Unidecode==1.3.8
VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/response_parse_type.py ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from functools import cached_property
2
+ from enum import Enum
3
+ from .parsing.json_parse import JsonParse
4
+ from .parsing.answer_str_parse import (
5
+ AnswerStrParse,
6
+ AsciiAnswerStrParse,
7
+ VerbatimAnswerStrParse,
8
+ )
9
+ from vlmeval.dataset.utils.megabench.parsing.dummy_parse import DummyParse
10
+
11
+
12
+ class ResponseParseType(Enum):
13
+ """Parse the response."""
14
+
15
+ JSON = "json"
16
+ ANSWER_STR = "answer_string"
17
+ ASCII_ANSWER_STR = "ascii_answer_string"
18
+ VERBATIM_ANSWER_STR = "verbatim_answer_string"
19
+ DUMMY = "dummy"
20
+ UNSUPPORTED = "unsupported"
21
+
22
+ @cached_property
23
+ def class_impl(self):
24
+ if self == ResponseParseType.ANSWER_STR:
25
+ return AnswerStrParse
26
+ elif self == ResponseParseType.ASCII_ANSWER_STR:
27
+ return AsciiAnswerStrParse
28
+ elif self == ResponseParseType.VERBATIM_ANSWER_STR:
29
+ return VerbatimAnswerStrParse
30
+ elif self == ResponseParseType.DUMMY:
31
+ return DummyParse
32
+ else:
33
+ return JsonParse
34
+
35
+ def is_single_field_parser(self):
36
+ return self in [
37
+ ResponseParseType.ANSWER_STR,
38
+ ResponseParseType.ASCII_ANSWER_STR,
39
+ ResponseParseType.VERBATIM_ANSWER_STR,
40
+ ]
41
+
42
+ def parse(self, response: str, *args, **kwargs):
43
+ """Parse the response."""
44
+ return self.class_impl.parse(response, *args, **kwargs)
45
+
46
+ @staticmethod
47
+ def from_string(s):
48
+ """Initialize the response parsing type from a string."""
49
+ try:
50
+ if s is None:
51
+ return ResponseParseType("unsupported")
52
+ return ResponseParseType(s.lower())
53
+ except KeyError as exc:
54
+ raise ValueError(f"Invalid metric type: {s}") from exc
VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/scoring/nli_entailment.py ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from transformers import pipeline
3
+
4
+
5
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
6
+ pipe = pipeline(
7
+ "text-classification", model="microsoft/deberta-large-mnli", device=device
8
+ )
9
+
10
+
11
+ class NliEntailment:
12
+ """NLI entailment, where the correct answer is used as the premise."""
13
+
14
+ @staticmethod
15
+ def match(response, correct_answer) -> int:
16
+ """Return whether the response and correct answer agree with each other."""
17
+ if not isinstance(response, str) or isinstance(correct_answer, str):
18
+ return 0
19
+ resp = pipe(f"[CLS] {correct_answer.strip()} [SEP] {response.strip()} [SEP]")
20
+ return 1 if resp[0]["label"] == "ENTAILMENT" else 0