SitongGong commited on
Commit
7f3f41b
·
verified ·
1 Parent(s): ab8d915

Upload folder: ESTP-Bench

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +12 -0
  2. ESTP-Bench/.gitattributes +754 -0
  3. ESTP-Bench/.mdl +0 -0
  4. ESTP-Bench/.msc +3 -0
  5. ESTP-Bench/.mv +1 -0
  6. ESTP-Bench/estp_dataset/__pycache__/livechat.cpython-310.pyc +0 -0
  7. ESTP-Bench/estp_dataset/benchmark/__init__.py +2 -0
  8. ESTP-Bench/estp_dataset/benchmark/__pycache__/__init__.cpython-310.pyc +0 -0
  9. ESTP-Bench/estp_dataset/benchmark/__pycache__/benchmark.cpython-310.pyc +0 -0
  10. ESTP-Bench/estp_dataset/benchmark/__pycache__/estp.cpython-310.pyc +0 -0
  11. ESTP-Bench/estp_dataset/benchmark/benchmark.py +0 -0
  12. ESTP-Bench/estp_dataset/benchmark/check_error_data.ipynb +97 -0
  13. ESTP-Bench/estp_dataset/benchmark/estp.py +607 -0
  14. ESTP-Bench/estp_dataset/benchmark/eval.py +428 -0
  15. ESTP-Bench/estp_dataset/benchmark/eval_cost.py +170 -0
  16. ESTP-Bench/estp_dataset/benchmark/eval_cqa.py +347 -0
  17. ESTP-Bench/estp_dataset/benchmark/eval_findcase.py +446 -0
  18. ESTP-Bench/estp_dataset/benchmark/eval_singleQA.sh +387 -0
  19. ESTP-Bench/estp_dataset/benchmark/evalate_singleQA.py +484 -0
  20. ESTP-Bench/estp_dataset/benchmark/merge_prediction_result.ipynb +44 -0
  21. ESTP-Bench/estp_dataset/cqa_anno.json +0 -0
  22. ESTP-Bench/estp_dataset/dataset.py +1 -0
  23. ESTP-Bench/estp_dataset/estpCqa_baseline/CLIP_streaming_v2.json +0 -0
  24. ESTP-Bench/estp_dataset/estpCqa_baseline/CLIP_streaming_v2evaluator_deepseek_1_2.json.part0 +0 -0
  25. ESTP-Bench/estp_dataset/estpCqa_baseline/CLIP_streaming_v2evaluator_deepseek_1_2.json.part1 +0 -0
  26. ESTP-Bench/estp_dataset/estpCqa_baseline/CLIP_streaming_v2evaluator_deepseek_1_2.json.part2 +0 -0
  27. ESTP-Bench/estp_dataset/estpCqa_baseline/CLIP_streaming_v2evaluator_deepseek_1_2.json.part3 +0 -0
  28. ESTP-Bench/estp_dataset/estpCqa_baseline/CLIP_streaming_v2evaluator_deepseek_1_2.json.part4 +0 -0
  29. ESTP-Bench/estp_dataset/estpCqa_baseline/CLIP_streaming_v2evaluator_deepseek_1_2.json.part5 +0 -0
  30. ESTP-Bench/estp_dataset/estpCqa_baseline/CLIP_streaming_v2evaluator_deepseek_1_2.json.part6 +0 -0
  31. ESTP-Bench/estp_dataset/estpCqa_baseline/CLIP_streaming_v2evaluator_deepseek_1_2.json.part7 +0 -0
  32. ESTP-Bench/estp_dataset/estpCqa_baseline/EgoVLP_streaming_v2.json +0 -0
  33. ESTP-Bench/estp_dataset/estpCqa_baseline/EgoVLP_streaming_v2evaluator_deepseek_1_2.json.part0 +0 -0
  34. ESTP-Bench/estp_dataset/estpCqa_baseline/EgoVLP_streaming_v2evaluator_deepseek_1_2.json.part1 +0 -0
  35. ESTP-Bench/estp_dataset/estpCqa_baseline/EgoVLP_streaming_v2evaluator_deepseek_1_2.json.part2 +0 -0
  36. ESTP-Bench/estp_dataset/estpCqa_baseline/EgoVLP_streaming_v2evaluator_deepseek_1_2.json.part3 +0 -0
  37. ESTP-Bench/estp_dataset/estpCqa_baseline/EgoVLP_streaming_v2evaluator_deepseek_1_2.json.part4 +0 -0
  38. ESTP-Bench/estp_dataset/estpCqa_baseline/EgoVLP_streaming_v2evaluator_deepseek_1_2.json.part5 +0 -0
  39. ESTP-Bench/estp_dataset/estpCqa_baseline/EgoVLP_streaming_v2evaluator_deepseek_1_2.json.part6 +0 -0
  40. ESTP-Bench/estp_dataset/estpCqa_baseline/EgoVLP_streaming_v2evaluator_deepseek_1_2.json.part7 +0 -0
  41. ESTP-Bench/estp_dataset/estpCqa_baseline/InternVLV28_fbf_0.175.json.part0 +0 -0
  42. ESTP-Bench/estp_dataset/estpCqa_baseline/InternVLV28_fbf_0.175.json.part1 +0 -0
  43. ESTP-Bench/estp_dataset/estpCqa_baseline/InternVLV28_fbf_0.175evaluator_deepseek_1_2.json.part0 +0 -0
  44. ESTP-Bench/estp_dataset/estpCqa_baseline/InternVLV28_fbf_0.175evaluator_deepseek_1_2.json.part1 +0 -0
  45. ESTP-Bench/estp_dataset/estpCqa_baseline/InternVLV28_fbf_0.175evaluator_deepseek_1_2.json.part2 +0 -0
  46. ESTP-Bench/estp_dataset/estpCqa_baseline/InternVLV28_fbf_0.175evaluator_deepseek_1_2.json.part3 +0 -0
  47. ESTP-Bench/estp_dataset/estpCqa_baseline/InternVLV28_fbf_0.175evaluator_deepseek_1_2.json.part4 +0 -0
  48. ESTP-Bench/estp_dataset/estpCqa_baseline/InternVLV28_fbf_0.175evaluator_deepseek_1_2.json.part5 +0 -0
  49. ESTP-Bench/estp_dataset/estpCqa_baseline/InternVLV28_fbf_0.175evaluator_deepseek_1_2.json.part6 +0 -0
  50. ESTP-Bench/estp_dataset/estpCqa_baseline/InternVLV28_fbf_0.175evaluator_deepseek_1_2.json.part7 +0 -0
.gitattributes CHANGED
@@ -57,3 +57,15 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
57
  # Video files - compressed
58
  *.mp4 filter=lfs diff=lfs merge=lfs -text
59
  *.webm filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
57
  # Video files - compressed
58
  *.mp4 filter=lfs diff=lfs merge=lfs -text
59
  *.webm filter=lfs diff=lfs merge=lfs -text
60
+ ESTP-Bench/.msc filter=lfs diff=lfs merge=lfs -text
61
+ ESTP-Bench/estp_dataset/estpCqa_ours/LivebaseStage2.5.json.part0 filter=lfs diff=lfs merge=lfs -text
62
+ ESTP-Bench/estp_dataset/estpCqa_ours/LivebaseStage2.5.json.part1 filter=lfs diff=lfs merge=lfs -text
63
+ ESTP-Bench/estp_dataset/estpCqa_ours/LivebaseStage3.5_high0.31_11.json.part0 filter=lfs diff=lfs merge=lfs -text
64
+ ESTP-Bench/estp_dataset/estpCqa_ours/LivebaseStage3.5_high0.31_11.json.part1 filter=lfs diff=lfs merge=lfs -text
65
+ ESTP-Bench/estp_dataset/estpSqa_ours/LIVE_IT0.95.json.part2 filter=lfs diff=lfs merge=lfs -text
66
+ ESTP-Bench/estp_dataset/estp_bench_sq_VideollmOnline0.9.json.part1 filter=lfs diff=lfs merge=lfs -text
67
+ ESTP-Bench/estp_dataset/estp_bench_sq_VideollmOnline0.9evaluator_deepseek_1_2.json.part1 filter=lfs diff=lfs merge=lfs -text
68
+ ESTP-Bench/estp_dataset/estp_bench_sq_VideollmOnline0.9evaluator_deepseek_5_5.json.part1 filter=lfs diff=lfs merge=lfs -text
69
+ ESTP-Bench/estp_dataset/eval.ipynb filter=lfs diff=lfs merge=lfs -text
70
+ ESTP-Bench/estp_dataset/tmp_predict_minicpmv_contextual.json filter=lfs diff=lfs merge=lfs -text
71
+ ESTP-Bench/estp_dataset/tmp_predict_minicpmv_v2.json filter=lfs diff=lfs merge=lfs -text
ESTP-Bench/.gitattributes ADDED
@@ -0,0 +1,754 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bin.* filter=lfs diff=lfs merge=lfs -text
5
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.model filter=lfs diff=lfs merge=lfs -text
12
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
13
+ *.onnx filter=lfs diff=lfs merge=lfs -text
14
+ *.ot filter=lfs diff=lfs merge=lfs -text
15
+ *.parquet filter=lfs diff=lfs merge=lfs -text
16
+ *.pb filter=lfs diff=lfs merge=lfs -text
17
+ *.pt filter=lfs diff=lfs merge=lfs -text
18
+ *.pth filter=lfs diff=lfs merge=lfs -text
19
+ *.rar filter=lfs diff=lfs merge=lfs -text
20
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
21
+ *.tar filter=lfs diff=lfs merge=lfs -text
22
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
23
+ *.mat filter=lfs diff=lfs merge=lfs -text
24
+ *.npz filter=lfs diff=lfs merge=lfs -text
25
+ *.npy filter=lfs diff=lfs merge=lfs -text
26
+ *.h5 filter=lfs diff=lfs merge=lfs -text
27
+ *.hdf5 filter=lfs diff=lfs merge=lfs -text
28
+ *.pickle filter=lfs diff=lfs merge=lfs -text
29
+ *.pkl filter=lfs diff=lfs merge=lfs -text
30
+ *.tflite filter=lfs diff=lfs merge=lfs -text
31
+ *.tgz filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zstandard filter=lfs diff=lfs merge=lfs -text
35
+ *.tfevents* filter=lfs diff=lfs merge=lfs -text
36
+
37
+ *.ark* filter=lfs diff=lfs merge=lfs -text
38
+ **/*ckpt*data* filter=lfs diff=lfs merge=lfs -text
39
+ **/*ckpt*.meta filter=lfs diff=lfs merge=lfs -text
40
+ **/*ckpt*.index filter=lfs diff=lfs merge=lfs -text
41
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
42
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
43
+ *.jpg filter=lfs diff=lfs merge=lfs -text
44
+ *.png filter=lfs diff=lfs merge=lfs -text
45
+ *.jpeg filter=lfs diff=lfs merge=lfs -text
46
+ *.bmp filter=lfs diff=lfs merge=lfs -text
47
+ *.gif filter=lfs diff=lfs merge=lfs -text
48
+ *.webp filter=lfs diff=lfs merge=lfs -text
49
+ *.mp3 filter=lfs diff=lfs merge=lfs -text
50
+ *.wav filter=lfs diff=lfs merge=lfs -text
51
+ *.wma filter=lfs diff=lfs merge=lfs -text
52
+
53
+ *.ogg filter=lfs diff=lfs merge=lfs -text
54
+ *.m4a filter=lfs diff=lfs merge=lfs -text
55
+ *.m3u8 filter=lfs diff=lfs merge=lfs -text
56
+ *.amr filter=lfs diff=lfs merge=lfs -text
57
+ *.audio filter=lfs diff=lfs merge=lfs -text
58
+
59
+ *.flv filter=lfs diff=lfs merge=lfs -text
60
+
61
+ *.mpg filter=lfs diff=lfs merge=lfs -text
62
+ *.asf filter=lfs diff=lfs merge=lfs -text
63
+ *.mov filter=lfs diff=lfs merge=lfs -text
64
+ *.mpeg filter=lfs diff=lfs merge=lfs -text
65
+ *.3gp filter=lfs diff=lfs merge=lfs -text
66
+ *.wmv filter=lfs diff=lfs merge=lfs -text
67
+ *.rmvb filter=lfs diff=lfs merge=lfs -text
68
+ *.rm filter=lfs diff=lfs merge=lfs -text
69
+ *.ts filter=lfs diff=lfs merge=lfs -text
70
+ *.mkv filter=lfs diff=lfs merge=lfs -text
71
+ *.flash filter=lfs diff=lfs merge=lfs -text
72
+ *.vob filter=lfs diff=lfs merge=lfs -text
73
+ *.pdf filter=lfs diff=lfs merge=lfs -text
74
+ *.ost filter=lfs diff=lfs merge=lfs -text
75
+ *.pst filter=lfs diff=lfs merge=lfs -text
76
+ *.doc filter=lfs diff=lfs merge=lfs -text
77
+ *.docx filter=lfs diff=lfs merge=lfs -text
78
+ *.txt filter=lfs diff=lfs merge=lfs -text
79
+ *.ppt filter=lfs diff=lfs merge=lfs -text
80
+ *.pptx filter=lfs diff=lfs merge=lfs -text
81
+ *.xls filter=lfs diff=lfs merge=lfs -text
82
+ *.xlsx filter=lfs diff=lfs merge=lfs -text
83
+ *.vsd filter=lfs diff=lfs merge=lfs -text
84
+ *.vsdx filter=lfs diff=lfs merge=lfs -text
85
+ *.jsonl filter=lfs diff=lfs merge=lfs -text
86
+
87
+ dataset_infos.json ignore
88
+ *.csv filter=lfs diff=lfs merge=lfs -text
89
+ *.tsv filter=lfs diff=lfs merge=lfs -text
90
+
91
+ estp_dataset/estpCqa_baseline/InternVLV28_passive_v2evaluator_deepseek_1_2.json.part1 filter=lfs diff=lfs merge=lfs -text
92
+ estp_dataset/estpCqa_baseline/InternVLV28_passive_v2evaluator_deepseek_1_2.json.part2 filter=lfs diff=lfs merge=lfs -text
93
+ estp_dataset/estpCqa_baseline/InternVLV28_passive_v2evaluator_deepseek_1_2.json.part7 filter=lfs diff=lfs merge=lfs -text
94
+ estp_dataset/estpCqa_baseline/InternVLV28_passive_v2evaluator_deepseek_1_2.json.part3 filter=lfs diff=lfs merge=lfs -text
95
+ estp_dataset/estpCqa_baseline/InternVLV28_passive_v2evaluator_deepseek_1_2.json.part5 filter=lfs diff=lfs merge=lfs -text
96
+ estp_dataset/estpCqa_baseline/LLaVANextVideo7B_fbf_0.175evaluator_deepseek_1_2.json.part2 filter=lfs diff=lfs merge=lfs -text
97
+ estp_dataset/estpCqa_baseline/LLaVANextVideo7B_fbf_0.175.json.part0 filter=lfs diff=lfs merge=lfs -text
98
+ estp_dataset/estpCqa_baseline/LLaVANextVideo7B_fbf_0.175.json.part1 filter=lfs diff=lfs merge=lfs -text
99
+ estp_dataset/estpCqa_baseline/LLaVANextVideo7B_fbf_0.175evaluator_deepseek_1_2.json.part1 filter=lfs diff=lfs merge=lfs -text
100
+ estp_dataset/estpCqa_baseline/LLaVANextVideo7B_fbf_0.175evaluator_deepseek_1_2.json.part7 filter=lfs diff=lfs merge=lfs -text
101
+ estp_dataset/estpCqa_baseline/LLaVANextVideo7B_passive.json filter=lfs diff=lfs merge=lfs -text
102
+ estp_dataset/estpCqa_baseline/LLaVANextVideo7B_fbf_0.175evaluator_deepseek_1_2.json.part5 filter=lfs diff=lfs merge=lfs -text
103
+ estp_dataset/estpCqa_baseline/LLaVANextVideo7B_fbf_0.175evaluator_deepseek_1_2.json.part6 filter=lfs diff=lfs merge=lfs -text
104
+ estp_dataset/estpCqa_baseline/LLaVANextVideo7B_passive_v2.json filter=lfs diff=lfs merge=lfs -text
105
+ estp_dataset/estpCqa_baseline/LLaVAOneVision_fbf_0.175.json.part0 filter=lfs diff=lfs merge=lfs -text
106
+ estp_dataset/estpCqa_baseline/LLaVAOneVision_fbf_0.175.json.part1 filter=lfs diff=lfs merge=lfs -text
107
+ estp_dataset/estpCqa_baseline/EgoVLP_streaming_v2evaluator_deepseek_1_2.json.part2 filter=lfs diff=lfs merge=lfs -text
108
+ estp_dataset/estpCqa_baseline/CLIP_streaming_v2.json filter=lfs diff=lfs merge=lfs -text
109
+ estp_dataset/estpCqa_baseline/EgoVLP_streaming_v2.json filter=lfs diff=lfs merge=lfs -text
110
+ estp_dataset/estpCqa_baseline/InternVLV28_passive_v2evaluator_deepseek_1_2.json.part4 filter=lfs diff=lfs merge=lfs -text
111
+ estp_dataset/estpCqa_baseline/InternVLV28_fbf_0.175.json.part0 filter=lfs diff=lfs merge=lfs -text
112
+ estp_dataset/estpCqa_baseline/InternVLV28_fbf_0.175evaluator_deepseek_1_2.json.part0 filter=lfs diff=lfs merge=lfs -text
113
+ estp_dataset/estpCqa_baseline/InternVLV28_fbf_0.175evaluator_deepseek_1_2.json.part6 filter=lfs diff=lfs merge=lfs -text
114
+ estp_dataset/estpCqa_baseline/InternVLV28_passive_v2.json filter=lfs diff=lfs merge=lfs -text
115
+ estp_dataset/estpCqa_baseline/LLaVAOneVision_fbf_0.175evaluator_deepseek_1_2.json.part6 filter=lfs diff=lfs merge=lfs -text
116
+ estp_dataset/estpCqa_baseline/InternVLV28_fbf_0.175.json.part1 filter=lfs diff=lfs merge=lfs -text
117
+ estp_dataset/estpCqa_baseline/LLaVAOneVision_fbf_debug0.175.json filter=lfs diff=lfs merge=lfs -text
118
+ estp_dataset/estpCqa_baseline/LLaVAOneVision_passive.json filter=lfs diff=lfs merge=lfs -text
119
+ estp_dataset/estpCqa_baseline/InternVLV28_passive.json filter=lfs diff=lfs merge=lfs -text
120
+ estp_dataset/estpCqa_baseline/LLaVAOneVision_passive_v2.json filter=lfs diff=lfs merge=lfs -text
121
+ estp_dataset/estpCqa_baseline/LLaVAOneVision_passiveevaluator_deepseek_1_2.json.part2 filter=lfs diff=lfs merge=lfs -text
122
+ estp_dataset/estpCqa_baseline/Lavila_streaming_v2.json filter=lfs diff=lfs merge=lfs -text
123
+ estp_dataset/estpCqa_baseline/Lavila_streaming_v2evaluator_deepseek_1_2.json.part2 filter=lfs diff=lfs merge=lfs -text
124
+ estp_dataset/estpCqa_baseline/MMDuet.json.part2 filter=lfs diff=lfs merge=lfs -text
125
+ estp_dataset/estpCqa_baseline/MMDuetevaluator_deepseek_1_2.json.part2 filter=lfs diff=lfs merge=lfs -text
126
+ estp_dataset/estpCqa_baseline/MMDuetevaluator_deepseek_1_2.json.part0 filter=lfs diff=lfs merge=lfs -text
127
+ estp_dataset/estpCqa_baseline/MMDuet.json.part0 filter=lfs diff=lfs merge=lfs -text
128
+ estp_dataset/estpCqa_baseline/MMDuet.json.part1 filter=lfs diff=lfs merge=lfs -text
129
+ estp_dataset/estpCqa_baseline/MMDuetevaluator_deepseek_1_2.json.part1 filter=lfs diff=lfs merge=lfs -text
130
+ estp_dataset/estpCqa_baseline/MMDuetevaluator_deepseek_1_2.json.part6 filter=lfs diff=lfs merge=lfs -text
131
+ estp_dataset/estpCqa_baseline/MMDuetevaluator_deepseek_1_2.json.part3 filter=lfs diff=lfs merge=lfs -text
132
+ estp_dataset/estpCqa_baseline/MMDuetevaluator_deepseek_1_2.json.part5 filter=lfs diff=lfs merge=lfs -text
133
+ estp_dataset/estpCqa_baseline/MiniCPMV_fbf_0.175.json.part0 filter=lfs diff=lfs merge=lfs -text
134
+ estp_dataset/estpCqa_baseline/MiniCPMV_fbf_0.175.json.part1 filter=lfs diff=lfs merge=lfs -text
135
+ estp_dataset/estpCqa_baseline/MMDuetevaluator_deepseek_1_2.json.part7 filter=lfs diff=lfs merge=lfs -text
136
+ estp_dataset/estpCqa_baseline/MiniCPMV_fbf_0.175.json.part2 filter=lfs diff=lfs merge=lfs -text
137
+ estp_dataset/estpCqa_baseline/MMDuetevaluator_deepseek_1_2.json.part4 filter=lfs diff=lfs merge=lfs -text
138
+ estp_dataset/estpCqa_baseline/MiniCPMV_fbf_0.175evaluator_deepseek_1_2.json.part2 filter=lfs diff=lfs merge=lfs -text
139
+ estp_dataset/estpCqa_baseline/MiniCPMV_fbf_0.175evaluator_deepseek_1_2.json.part1 filter=lfs diff=lfs merge=lfs -text
140
+ estp_dataset/estpCqa_baseline/MiniCPMV_fbf_0.175evaluator_deepseek_1_2.json.part0 filter=lfs diff=lfs merge=lfs -text
141
+ estp_dataset/estpCqa_baseline/MiniCPMV_fbf_0.175evaluator_deepseek_1_2.json.part4 filter=lfs diff=lfs merge=lfs -text
142
+ estp_dataset/estpCqa_baseline/MiniCPMV_fbf_0.175evaluator_deepseek_1_2.json.part5 filter=lfs diff=lfs merge=lfs -text
143
+ estp_dataset/estpCqa_baseline/MiniCPMV_fbf_0.175evaluator_deepseek_1_2.json.part3 filter=lfs diff=lfs merge=lfs -text
144
+ estp_dataset/estpCqa_baseline/MiniCPMV_fbf_0.175evaluator_deepseek_1_2.json.part6 filter=lfs diff=lfs merge=lfs -text
145
+ estp_dataset/estpCqa_baseline/MiniCPMV_fbf_0.175evaluator_deepseek_1_2.json.part7 filter=lfs diff=lfs merge=lfs -text
146
+ estp_dataset/estpCqa_baseline/MiniCPMV_passive.json filter=lfs diff=lfs merge=lfs -text
147
+ estp_dataset/estpCqa_baseline/MiniCPMV_passive_debug.json filter=lfs diff=lfs merge=lfs -text
148
+ estp_dataset/estpCqa_baseline/MiniCPMV_passive_v2.json filter=lfs diff=lfs merge=lfs -text
149
+ estp_dataset/estpCqa_baseline/MiniCPMV_passiveevaluator_deepseek_1_2.json.part0 filter=lfs diff=lfs merge=lfs -text
150
+ estp_dataset/estpCqa_baseline/MiniCPMV_passiveevaluator_deepseek_1_2.json.part2 filter=lfs diff=lfs merge=lfs -text
151
+ estp_dataset/estpCqa_baseline/MiniCPMV_passiveevaluator_deepseek_1_2.json.part3 filter=lfs diff=lfs merge=lfs -text
152
+ estp_dataset/estpCqa_baseline/MiniCPMV_passiveevaluator_deepseek_1_2.json.part7 filter=lfs diff=lfs merge=lfs -text
153
+ estp_dataset/estpCqa_baseline/MiniCPMV_passive_v2evaluator_deepseek_1_2.json.part7 filter=lfs diff=lfs merge=lfs -text
154
+ estp_dataset/estpCqa_baseline/Qwen2VL_fbf_0.175.json.part0 filter=lfs diff=lfs merge=lfs -text
155
+ estp_dataset/estpCqa_baseline/Qwen2VL_fbf_0.175.json.part1 filter=lfs diff=lfs merge=lfs -text
156
+ estp_dataset/estpCqa_baseline/Qwen2VL_fbf_0.175evaluator_deepseek_1_2.json.part1 filter=lfs diff=lfs merge=lfs -text
157
+ estp_dataset/estpCqa_baseline/Qwen2VL_fbf_0.175evaluator_deepseek_1_2.json.part4 filter=lfs diff=lfs merge=lfs -text
158
+ estp_dataset/estpCqa_baseline/Qwen2VL_fbf_0.175evaluator_deepseek_1_2.json.part6 filter=lfs diff=lfs merge=lfs -text
159
+ estp_dataset/estpCqa_baseline/Qwen2VL_fbf_0.175evaluator_deepseek_1_2.json.part5 filter=lfs diff=lfs merge=lfs -text
160
+ estp_dataset/estpCqa_baseline/Qwen2VL_passive.json filter=lfs diff=lfs merge=lfs -text
161
+ estp_dataset/estpCqa_baseline/Qwen2VL_passive_debug.json filter=lfs diff=lfs merge=lfs -text
162
+ estp_dataset/estpCqa_baseline/Qwen2VL_passive_v2.json filter=lfs diff=lfs merge=lfs -text
163
+ estp_dataset/estpSqa_baseline/LLaVANextVideo7B_passive.json filter=lfs diff=lfs merge=lfs -text
164
+ estp_dataset/estpCqa_baseline/VideollmOnline0.8.json.part0 filter=lfs diff=lfs merge=lfs -text
165
+ estp_dataset/estpCqa_baseline/VideollmOnline0.8.json.part1 filter=lfs diff=lfs merge=lfs -text
166
+ estp_dataset/estpCqa_baseline/InternVLV28_passive_v2evaluator_deepseek_1_2.json.part0 filter=lfs diff=lfs merge=lfs -text
167
+ estp_dataset/estpCqa_baseline/VideollmOnline0.8evaluator_deepseek_1_2.json.part2 filter=lfs diff=lfs merge=lfs -text
168
+ estp_dataset/estpCqa_baseline/VideollmOnline0.8evaluator_deepseek_1_2.json.part0 filter=lfs diff=lfs merge=lfs -text
169
+ estp_dataset/estpCqa_baseline/VideollmOnline0.8.json.part2 filter=lfs diff=lfs merge=lfs -text
170
+ estp_dataset/estpCqa_baseline/MiniCPMV_passive_v2evaluator_deepseek_1_2.json.part2 filter=lfs diff=lfs merge=lfs -text
171
+ estp_dataset/estpCqa_baseline/VideollmOnline0.8evaluator_deepseek_1_2.json.part7 filter=lfs diff=lfs merge=lfs -text
172
+ estp_dataset/estpCqa_baseline/VideollmOnline0.8evaluator_deepseek_1_2.json.part1 filter=lfs diff=lfs merge=lfs -text
173
+ estp_dataset/estpCqa_baseline/VideollmOnline0.8evaluator_deepseek_1_2.json.part3 filter=lfs diff=lfs merge=lfs -text
174
+ estp_dataset/estpCqa_ours/LivebaseStage2.5.json.part0 filter=lfs diff=lfs merge=lfs -text
175
+ estp_dataset/estpCqa_ours/LivebaseStage2.5.json.part1 filter=lfs diff=lfs merge=lfs -text
176
+ estp_dataset/estpCqa_baseline/VideollmOnline0.8evaluator_deepseek_1_2.json.part5 filter=lfs diff=lfs merge=lfs -text
177
+ estp_dataset/estpSqa_baseline/LLaVANextVideo7B_fbf_singleQA_0.175evaluator_deepseek_1_2.json.part3 filter=lfs diff=lfs merge=lfs -text
178
+ estp_dataset/estpSqa_baseline/LLaVAOneVision_fbf_singleQA_0.175.json.part0 filter=lfs diff=lfs merge=lfs -text
179
+ estp_dataset/estpSqa_baseline/LLaVAOneVision_fbf_singleQA_0.175evaluator_deepseek_1_2.json.part2 filter=lfs diff=lfs merge=lfs -text
180
+ estp_dataset/estpSqa_baseline/LLaVAOneVision_fbf_singleQA_0.175evaluator_deepseek_1_2.json.part0 filter=lfs diff=lfs merge=lfs -text
181
+ estp_dataset/estpSqa_baseline/LLaVAOneVision_fbf_singleQA_0.175evaluator_deepseek_1_2.json.part4 filter=lfs diff=lfs merge=lfs -text
182
+ estp_dataset/estpSqa_baseline/LLaVAOneVision_fbf_singleQA_0.175.json.part1 filter=lfs diff=lfs merge=lfs -text
183
+ estp_dataset/estpSqa_baseline/LLaVAOneVision_fbf_singleQA_0.175evaluator_deepseek_1_2.json.part3 filter=lfs diff=lfs merge=lfs -text
184
+ estp_dataset/estpSqa_baseline/LLaVAOneVision_fbf_singleQA_0.175evaluator_deepseek_1_2.json.part5 filter=lfs diff=lfs merge=lfs -text
185
+ estp_dataset/estpSqa_baseline/LLaVAOneVision_fbf_singleQA_0.175evaluator_deepseek_1_2.json.part7 filter=lfs diff=lfs merge=lfs -text
186
+ estp_dataset/estpSqa_baseline/LLaVAOneVision_passive.json filter=lfs diff=lfs merge=lfs -text
187
+ estp_dataset/estpSqa_baseline/LLaVAOneVision_passiveevaluator_deepseek_1_2.json.part0 filter=lfs diff=lfs merge=lfs -text
188
+ estp_dataset/estpSqa_baseline/LLaVAOneVision_passiveevaluator_deepseek_1_2.json.part2 filter=lfs diff=lfs merge=lfs -text
189
+ estp_dataset/estpSqa_baseline/LLaVAOneVision_passiveevaluator_deepseek_1_2.json.part3 filter=lfs diff=lfs merge=lfs -text
190
+ estp_dataset/estpSqa_baseline/LLaVAOneVision_passiveevaluator_deepseek_1_2.json.part7 filter=lfs diff=lfs merge=lfs -text
191
+ estp_dataset/estpSqa_baseline/LLaVAOneVision_passiveevaluator_deepseek_5_5.json.part0 filter=lfs diff=lfs merge=lfs -text
192
+ estp_dataset/estpSqa_baseline/LLaVAOneVision_passiveevaluator_deepseek_5_5.json.part7 filter=lfs diff=lfs merge=lfs -text
193
+ estp_dataset/estpSqa_baseline/Lavila_streaming.json filter=lfs diff=lfs merge=lfs -text
194
+ estp_dataset/estpSqa_baseline/Lavila_streaming_v2.json filter=lfs diff=lfs merge=lfs -text
195
+ estp_dataset/estpCqa_baseline/Qwen2VL_passive_evaluator_deepseek_1_2.json filter=lfs diff=lfs merge=lfs -text
196
+ estp_dataset/estpSqa_baseline/MMDuet.json.part1 filter=lfs diff=lfs merge=lfs -text
197
+ estp_dataset/estpSqa_baseline/MMDuet.json.part0 filter=lfs diff=lfs merge=lfs -text
198
+ estp_dataset/estpSqa_baseline/MMDuet.json.part3 filter=lfs diff=lfs merge=lfs -text
199
+ estp_dataset/estpSqa_baseline/MMDuetevaluator_deepseek_1_2.json.part0 filter=lfs diff=lfs merge=lfs -text
200
+ estp_dataset/estpSqa_baseline/MMDuetevaluator_deepseek_1_2.json.part6 filter=lfs diff=lfs merge=lfs -text
201
+ estp_dataset/estpSqa_baseline/MMDuetevaluator_deepseek_1_2.json.part4 filter=lfs diff=lfs merge=lfs -text
202
+ estp_dataset/estpSqa_baseline/MMDuetevaluator_deepseek_1_2.json.part7 filter=lfs diff=lfs merge=lfs -text
203
+ estp_dataset/estpSqa_baseline/MMDuet.json.part2 filter=lfs diff=lfs merge=lfs -text
204
+ estp_dataset/estpSqa_baseline/MiniCPMV_fbf_singleQA_0.175.json.part0 filter=lfs diff=lfs merge=lfs -text
205
+ estp_dataset/estpSqa_baseline/MiniCPMV_fbf_singleQA_0.175.json.part2 filter=lfs diff=lfs merge=lfs -text
206
+ estp_dataset/estpSqa_baseline/MiniCPMV_fbf_singleQA_0.175.json.part1 filter=lfs diff=lfs merge=lfs -text
207
+ estp_dataset/estpSqa_baseline/MiniCPMV_fbf_singleQA_0.175_v2.json.part1 filter=lfs diff=lfs merge=lfs -text
208
+ estp_dataset/estpSqa_baseline/MiniCPMV_fbf_singleQA_0.175_v2.json.part0 filter=lfs diff=lfs merge=lfs -text
209
+ estp_dataset/estpSqa_baseline/MMDuetevaluator_deepseek_1_2.json.part1 filter=lfs diff=lfs merge=lfs -text
210
+ estp_dataset/estpSqa_baseline/MiniCPMV_fbf_singleQA_0.175_v2.json.part3 filter=lfs diff=lfs merge=lfs -text
211
+ estp_dataset/estpSqa_baseline/MiniCPMV_fbf_singleQA_0.175evaluator_deepseek_1_2.json.part1 filter=lfs diff=lfs merge=lfs -text
212
+ estp_dataset/estpSqa_baseline/MMDuetevaluator_deepseek_1_2.json.part2 filter=lfs diff=lfs merge=lfs -text
213
+ estp_dataset/estpSqa_baseline/MiniCPMV_fbf_singleQA_0.175evaluator_deepseek_1_2.json.part4 filter=lfs diff=lfs merge=lfs -text
214
+ estp_dataset/estpSqa_baseline/MiniCPMV_fbf_singleQA_0.175_v2.json.part2 filter=lfs diff=lfs merge=lfs -text
215
+ estp_dataset/estpSqa_baseline/MiniCPMV_fbf_singleQA_0.175evaluator_deepseek_1_2.json.part3 filter=lfs diff=lfs merge=lfs -text
216
+ estp_dataset/estpSqa_baseline/MiniCPMV_fbf_singleQA_0.175evaluator_deepseek_1_2.json.part0 filter=lfs diff=lfs merge=lfs -text
217
+ estp_dataset/estpSqa_baseline/MiniCPMV_fbf_singleQA_0.175evaluator_deepseek_1_2.json.part5 filter=lfs diff=lfs merge=lfs -text
218
+ estp_dataset/estpSqa_baseline/MiniCPMV_fbf_singleQA_0.175evaluator_deepseek_1_2.json.part2 filter=lfs diff=lfs merge=lfs -text
219
+ estp_dataset/estpSqa_baseline/MiniCPMV_fbf_singleQA_0.175evaluator_deepseek_1_2.json.part6 filter=lfs diff=lfs merge=lfs -text
220
+ estp_dataset/estpSqa_baseline/MiniCPMV_fbf_singleQA_0.175evaluator_deepseek_1_2.json.part7 filter=lfs diff=lfs merge=lfs -text
221
+ estp_dataset/estpSqa_baseline/Qwen2VL_fbf_singleQA_0.175.json.part0 filter=lfs diff=lfs merge=lfs -text
222
+ estp_dataset/estpSqa_baseline/Qwen2VL_fbf_singleQA_0.175.json.part2 filter=lfs diff=lfs merge=lfs -text
223
+ estp_dataset/estpSqa_baseline/Qwen2VL_fbf_singleQA_0.175.json.part1 filter=lfs diff=lfs merge=lfs -text
224
+ estp_dataset/estpSqa_baseline/MMDuetevaluator_deepseek_1_2.json.part3 filter=lfs diff=lfs merge=lfs -text
225
+ estp_dataset/estpCqa_baseline/VideollmOnline0.8evaluator_deepseek_1_2.json.part6 filter=lfs diff=lfs merge=lfs -text
226
+ estp_dataset/estpSqa_baseline/Qwen2VL_fbf_singleQA_0.175evaluator_deepseek_1_2.json.part1 filter=lfs diff=lfs merge=lfs -text
227
+ estp_dataset/estpSqa_baseline/Qwen2VL_fbf_singleQA_0.175evaluator_deepseek_1_2.json.part0 filter=lfs diff=lfs merge=lfs -text
228
+ estp_dataset/estpCqa_ours/LivebaseStage2_lowevaluator_deepseek_1_2.json.part1 filter=lfs diff=lfs merge=lfs -text
229
+ estp_dataset/estpSqa_baseline/Qwen2VL_fbf_singleQA_0.175evaluator_deepseek_1_2.json.part3 filter=lfs diff=lfs merge=lfs -text
230
+ estp_dataset/estpSqa_baseline/Qwen2VL_fbf_singleQA_0.175evaluator_deepseek_1_2.json.part5 filter=lfs diff=lfs merge=lfs -text
231
+ estp_dataset/estpSqa_baseline/Qwen2VL_fbf_singleQA_0.175evaluator_deepseek_1_2.json.part2 filter=lfs diff=lfs merge=lfs -text
232
+ estp_dataset/estpSqa_baseline/Qwen2VL_fbf_singleQA_0.175evaluator_deepseek_1_2.json.part4 filter=lfs diff=lfs merge=lfs -text
233
+ estp_dataset/estpSqa_baseline/Qwen2VL_passive.json filter=lfs diff=lfs merge=lfs -text
234
+ estp_dataset/estpSqa_baseline/Qwen2VL_fbf_singleQA_0.175evaluator_deepseek_1_2.json.part6 filter=lfs diff=lfs merge=lfs -text
235
+ estp_dataset/estpSqa_baseline/Qwen2VL_fbf_singleQA_0.175.json.part3 filter=lfs diff=lfs merge=lfs -text
236
+ estp_dataset/estpSqa_baseline/LLaVAOneVision_fbf_singleQA_0.175evaluator_deepseek_1_2.json.part1 filter=lfs diff=lfs merge=lfs -text
237
+ estp_dataset/estpSqa_baseline/TimeChat_passive.json filter=lfs diff=lfs merge=lfs -text
238
+ estp_dataset/estpSqa_baseline/Qwen2VL_fbf_singleQA_0.175evaluator_deepseek_1_2.json.part7 filter=lfs diff=lfs merge=lfs -text
239
+ estp_dataset/estpCqa_ours/LivebaseStage3.5_high.json.part1 filter=lfs diff=lfs merge=lfs -text
240
+ estp_dataset/estpCqa_ours/LivebaseStage3.5_high.json.part6 filter=lfs diff=lfs merge=lfs -text
241
+ estp_dataset/estpCqa_ours/LivebaseStage3.5_high.json.part2 filter=lfs diff=lfs merge=lfs -text
242
+ estp_dataset/estpCqa_ours/LivebaseStage3.5_high.json.part3 filter=lfs diff=lfs merge=lfs -text
243
+ estp_dataset/estpCqa_ours/LivebaseStage3.5_high0.31_11_debug.json filter=lfs diff=lfs merge=lfs -text
244
+ estp_dataset/estpCqa_ours/LivebaseStage3.5_high0.31_11.json.part0 filter=lfs diff=lfs merge=lfs -text
245
+ estp_dataset/estpCqa_ours/LivebaseStage3.5_high.json.part5 filter=lfs diff=lfs merge=lfs -text
246
+ estp_dataset/estpCqa_ours/LivebaseStage3.5_highevaluator_deepseek_1_2.json.part0 filter=lfs diff=lfs merge=lfs -text
247
+ estp_dataset/estpCqa_ours/LivebaseStage3.5_highevaluator_deepseek_1_2.json.part5 filter=lfs diff=lfs merge=lfs -text
248
+ estp_dataset/estpCqa_ours/LivebaseStage3.5_highevaluator_deepseek_1_2.json.part6 filter=lfs diff=lfs merge=lfs -text
249
+ estp_dataset/estpCqa_ours/LivebaseStage3.5_high.json.part4 filter=lfs diff=lfs merge=lfs -text
250
+ estp_dataset/estpCqa_ours/LivebaseStage2_lowevaluator_deepseek_1_2.json.part6 filter=lfs diff=lfs merge=lfs -text
251
+ estp_dataset/estpCqa_baseline/VideollmOnline0.8evaluator_deepseek_1_2.json.part4 filter=lfs diff=lfs merge=lfs -text
252
+ estp_dataset/estpCqa_ours/LivebaseStage3.5_high.json.part7 filter=lfs diff=lfs merge=lfs -text
253
+ estp_dataset/estpCqa_ours/LivebaseStage3.5_high0.31_11.json.part1 filter=lfs diff=lfs merge=lfs -text
254
+ estp_dataset/estpCqa_ours/LivebaseStage3.5_highevaluator_deepseek_1_2.json.part1 filter=lfs diff=lfs merge=lfs -text
255
+ estp_dataset/estpCqa_ours/LivebaseStage3.5_low.json.part1 filter=lfs diff=lfs merge=lfs -text
256
+ estp_dataset/estpCqa_ours/LivebaseStage3.5_highevaluator_deepseek_1_2.json.part7 filter=lfs diff=lfs merge=lfs -text
257
+ estp_dataset/estpCqa_ours/LivebaseStage3.5_low.json.part2 filter=lfs diff=lfs merge=lfs -text
258
+ estp_dataset/estpCqa_ours/LivebaseStage3.5_low.json.part3 filter=lfs diff=lfs merge=lfs -text
259
+ estp_dataset/estpCqa_ours/LivebaseStage3.5_low.json.part0 filter=lfs diff=lfs merge=lfs -text
260
+ estp_dataset/estpCqa_ours/LivebaseStage3_high0.31_1_low.json.part0 filter=lfs diff=lfs merge=lfs -text
261
+ estp_dataset/estpCqa_ours/LivebaseStage3.5_highevaluator_deepseek_1_2.json.part4 filter=lfs diff=lfs merge=lfs -text
262
+ estp_dataset/estpSqa_baseline/MMDuetevaluator_deepseek_1_2.json.part5 filter=lfs diff=lfs merge=lfs -text
263
+ estp_dataset/estpCqa_ours/LivebaseStage3.5_lowevaluator_deepseek_1_2.json.part1 filter=lfs diff=lfs merge=lfs -text
264
+ estp_dataset/estpCqa_ours/LivebaseStage3.5_lowevaluator_deepseek_1_2.json.part2 filter=lfs diff=lfs merge=lfs -text
265
+ estp_dataset/estpCqa_ours/LivebaseStage3.5_lowevaluator_deepseek_1_2.json.part5 filter=lfs diff=lfs merge=lfs -text
266
+ estp_dataset/estpCqa_ours/LivebaseStage3.5_lowevaluator_deepseek_1_2.json.part6 filter=lfs diff=lfs merge=lfs -text
267
+ estp_dataset/estpCqa_ours/LivebaseStage3.5_lowevaluator_deepseek_1_2.json.part0 filter=lfs diff=lfs merge=lfs -text
268
+ estp_dataset/estpCqa_ours/LivebaseStage3.5_lowevaluator_deepseek_1_2.json.part4 filter=lfs diff=lfs merge=lfs -text
269
+ estp_dataset/estpCqa_ours/LivebaseStage3.5_lowevaluator_deepseek_1_2.json.part7 filter=lfs diff=lfs merge=lfs -text
270
+ estp_dataset/estpCqa_ours/LivebaseStage3.5_highevaluator_deepseek_1_2.json.part3 filter=lfs diff=lfs merge=lfs -text
271
+ estp_dataset/estpCqa_ours/LivebaseStage3.5_lowevaluator_deepseek_1_2.json.part3 filter=lfs diff=lfs merge=lfs -text
272
+ estp_dataset/estpCqa_ours/LivebaseStage3_high0.31_1_lowevaluator_deepseek_1_2.json.part1 filter=lfs diff=lfs merge=lfs -text
273
+ estp_dataset/estpCqa_ours/LivebaseStage3.5_highevaluator_deepseek_1_2.json.part2 filter=lfs diff=lfs merge=lfs -text
274
+ estp_dataset/estpCqa_ours/LivebaseStage3_high0.31_1_lowevaluator_deepseek_1_2.json.part2 filter=lfs diff=lfs merge=lfs -text
275
+ estp_dataset/estpCqa_ours/LivebaseStage3_high0.31_1_low.json.part3 filter=lfs diff=lfs merge=lfs -text
276
+ estp_dataset/estpCqa_ours/LivebaseStage3_high0.31_1_lowevaluator_deepseek_1_2.json.part0 filter=lfs diff=lfs merge=lfs -text
277
+ estp_dataset/estpCqa_ours/LivebaseStage3_high0.31_1_lowevaluator_deepseek_1_2.json.part4 filter=lfs diff=lfs merge=lfs -text
278
+ estp_dataset/estpCqa_ours/LivebaseStage3_high0.31_1_lowevaluator_deepseek_1_2.json.part6 filter=lfs diff=lfs merge=lfs -text
279
+ estp_dataset/estpCqa_ours/LivebaseStage3_high0.31_1_lowevaluator_deepseek_1_2.json.part5 filter=lfs diff=lfs merge=lfs -text
280
+ estp_dataset/estpCqa_ours/LivebaseStage3_high0.31_1_lowevaluator_deepseek_1_2.json.part3 filter=lfs diff=lfs merge=lfs -text
281
+ estp_dataset/estpCqa_ours/LivebaseStage3_high0.31_1_lowevaluator_deepseek_1_2.json.part7 filter=lfs diff=lfs merge=lfs -text
282
+ estp_dataset/estpSqa_baseline/CLIP_streaming.json filter=lfs diff=lfs merge=lfs -text
283
+ estp_dataset/estpSqa_baseline/CLIP_streaming_v2.json filter=lfs diff=lfs merge=lfs -text
284
+ estp_dataset/estpSqa_baseline/CLIP_streamingevaluator_deepseek_1_2.json.part0 filter=lfs diff=lfs merge=lfs -text
285
+ estp_dataset/estpSqa_baseline/CLIP_streamingevaluator_deepseek_1_2.json.part5 filter=lfs diff=lfs merge=lfs -text
286
+ estp_dataset/estpSqa_baseline/CLIP_streamingevaluator_deepseek_1_2.json.part7 filter=lfs diff=lfs merge=lfs -text
287
+ estp_dataset/estpSqa_baseline/EgoVLP_streaming.json filter=lfs diff=lfs merge=lfs -text
288
+ estp_dataset/estpSqa_baseline/EgoVLP_streamingevaluator_deepseek_1_2.json.part2 filter=lfs diff=lfs merge=lfs -text
289
+ estp_dataset/estpSqa_baseline/EgoVLP_streaming_v2.json filter=lfs diff=lfs merge=lfs -text
290
+ estp_dataset/estpSqa_baseline/InternVLV28_fbf_0.175.json.part0 filter=lfs diff=lfs merge=lfs -text
291
+ estp_dataset/estpSqa_baseline/InternVLV28_fbf_0.175.json.part1 filter=lfs diff=lfs merge=lfs -text
292
+ estp_dataset/estpSqa_baseline/InternVLV28_passiveevaluator_deepseek_1_2.json.part2 filter=lfs diff=lfs merge=lfs -text
293
+ estp_dataset/estpSqa_baseline/InternVLV28_passiveevaluator_deepseek_1_2.json.part0 filter=lfs diff=lfs merge=lfs -text
294
+ estp_dataset/estpSqa_baseline/InternVLV28_passiveevaluator_deepseek_5_5.json.part0 filter=lfs diff=lfs merge=lfs -text
295
+ estp_dataset/estpSqa_baseline/InternVLV28_passive.json filter=lfs diff=lfs merge=lfs -text
296
+ estp_dataset/estpSqa_baseline/InternVLV28_passiveevaluator_deepseek_1_2.json.part7 filter=lfs diff=lfs merge=lfs -text
297
+ estp_dataset/estpSqa_baseline/LLaVANextVideo7B_fbf_singleQA.json.part0 filter=lfs diff=lfs merge=lfs -text
298
+ estp_dataset/estpSqa_baseline/InternVLV28_passiveevaluator_deepseek_5_5.json.part2 filter=lfs diff=lfs merge=lfs -text
299
+ estp_dataset/estpSqa_baseline/LLaVANextVideo7B_fbf_singleQA.json.part1 filter=lfs diff=lfs merge=lfs -text
300
+ estp_dataset/estpSqa_baseline/LLaVANextVideo7B_fbf_singleQA_0.175evaluator_deepseek_1_2.json.part4 filter=lfs diff=lfs merge=lfs -text
301
+ estp_dataset/estpSqa_baseline_5cases/LLaVANextVideo7B_fbf_5cases.json filter=lfs diff=lfs merge=lfs -text
302
+ estp_dataset/estpSqa_baseline/LLaVANextVideo7B_fbf_singleQA_0.175.json.part1 filter=lfs diff=lfs merge=lfs -text
303
+ estp_dataset/estpSqa_baseline/LLaVANextVideo7B_fbf_singleQA_0.175.json.part0 filter=lfs diff=lfs merge=lfs -text
304
+ estp_dataset/estpSqa_baseline/LLaVANextVideo7B_fbf_singleQA_0.175evaluator_deepseek_1_2.json.part5 filter=lfs diff=lfs merge=lfs -text
305
+ estp_dataset/estpSqa_baseline/LLaVANextVideo7B_fbf_singleQA_0.175evaluator_deepseek_1_2.json.part1 filter=lfs diff=lfs merge=lfs -text
306
+ estp_dataset/estpSqa_baseline/InternVLV28_passiveevaluator_deepseek_5_5.json.part7 filter=lfs diff=lfs merge=lfs -text
307
+ estp_dataset/estpSqa_baseline/LLaVANextVideo7B_fbf_singleQA_0.175evaluator_deepseek_1_2.json.part2 filter=lfs diff=lfs merge=lfs -text
308
+ estp_dataset/estpCqa_ours/LivebaseStage3_high0.31_1_low.json.part1 filter=lfs diff=lfs merge=lfs -text
309
+ estp_dataset/estpSqa_baseline/LLaVANextVideo7B_fbf_singleQA_0.175evaluator_deepseek_1_2.json.part7 filter=lfs diff=lfs merge=lfs -text
310
+ estp_dataset/estpSqa_baseline/LLaVANextVideo7B_fbf_singleQA_0.175evaluator_deepseek_1_2.json.part0 filter=lfs diff=lfs merge=lfs -text
311
+ estp_dataset/estpCqa_ours/LivebaseStage3_high0.31_1_low.json.part2 filter=lfs diff=lfs merge=lfs -text
312
+ estp_dataset/estpSqa_baseline_5cases/MiniCPMV_fbf_5cases.json.part2 filter=lfs diff=lfs merge=lfs -text
313
+ estp_dataset/estpSqa_baseline_5cases/LLaVAOneVision_fbf_5cases.json filter=lfs diff=lfs merge=lfs -text
314
+
315
+ estp_dataset/estpSqa_baseline_5cases/Qwen2VL_fbf_5cases.json.part3 filter=lfs diff=lfs merge=lfs -text
316
+ estp_dataset/estpSqa_baseline_5cases/Qwen2VL_fbf_5casesevaluator_deepseek_1_1.json.part7 filter=lfs diff=lfs merge=lfs -text
317
+ estp_dataset/estpSqa_baseline_5cases/Qwen2VL_fbf_5casesevaluator_deepseek_5_5.json.part7 filter=lfs diff=lfs merge=lfs -text
318
+ estp_dataset/estp_bench_sq_VideollmOnline0.9evaluator_deepseek_5_5.json.part5 filter=lfs diff=lfs merge=lfs -text
319
+ estp_dataset/estp_bench_sq_VideollmOnline0.9evaluator_deepseek_1_2.json.part3 filter=lfs diff=lfs merge=lfs -text
320
+ estp_dataset/estpSqa_ours/LIVE_IT0.95.json.part0 filter=lfs diff=lfs merge=lfs -text
321
+ estp_dataset/eval.ipynb filter=lfs diff=lfs merge=lfs -text
322
+ estp_dataset/estp_bench_sq_VideollmOnline0.9evaluator_deepseek_5_5.json.part4 filter=lfs diff=lfs merge=lfs -text
323
+ estp_dataset/estp_bench_sq_VideollmOnline0.9evaluator_deepseek_1_2.json.part2 filter=lfs diff=lfs merge=lfs -text
324
+ estp_dataset/estp_bench_sq_VideollmOnline0.9evaluator_deepseek_5_5.json.part3 filter=lfs diff=lfs merge=lfs -text
325
+ estp_dataset/estp_bench_sq_VideollmOnline0.9evaluator_deepseek_5_5.json.part2 filter=lfs diff=lfs merge=lfs -text
326
+ estp_dataset/estpSqa_ours/LIVE_IT0.95.json.part3 filter=lfs diff=lfs merge=lfs -text
327
+ estp_dataset/estpSqa_ours/LIVE_IT0.95.json.part2 filter=lfs diff=lfs merge=lfs -text
328
+ estp_dataset/estpSqa_ours/LIVE_IT0.95.json.part1 filter=lfs diff=lfs merge=lfs -text
329
+ estp_dataset/result/estpBenchSq_fbf_EWOInDomainITstage2.json.part2 filter=lfs diff=lfs merge=lfs -text
330
+ estp_dataset/result/estpBenchSq_fbf_EWOInDomainITstage2.json.part3 filter=lfs diff=lfs merge=lfs -text
331
+ estp_dataset/result/estpBenchSq_fbf_EWOInDomainITstage2.json.part1 filter=lfs diff=lfs merge=lfs -text
332
+ estp_dataset/result/estpBenchSq_fbf_EWOInDomainITstage2.json.part0 filter=lfs diff=lfs merge=lfs -text
333
+ estp_dataset/result/estpBenchSq_fbf_EWOInDomainITstage2.json filter=lfs diff=lfs merge=lfs -text
334
+ estp_dataset/result/estpBenchSq_fbf_EWOInDomainITstage2.json.part4 filter=lfs diff=lfs merge=lfs -text
335
+ estp_dataset/result/estpBenchSq_fbf_EWOInDomainITstage2_debug.json filter=lfs diff=lfs merge=lfs -text
336
+ estp_dataset/result/estpBenchSq_fbf_EWOInDomainITstage2evaluator_llama_5_5.json.part0 filter=lfs diff=lfs merge=lfs -text
337
+ estp_dataset/result/estpBenchSq_fbf_EWOInDomainITstage2_v2.json.part0 filter=lfs diff=lfs merge=lfs -text
338
+ estp_dataset/result/estpBenchSq_fbf_EWOInDomainITstage2_v2.json.part1 filter=lfs diff=lfs merge=lfs -text
339
+ estp_dataset/result/estpBenchSq_fbf_EWOInDomainITstage2evaluator_llama_5_5.json.part1 filter=lfs diff=lfs merge=lfs -text
340
+ estp_dataset/result/estpBenchSq_fbf_EWOInDomainITstage2_v2.json.part3 filter=lfs diff=lfs merge=lfs -text
341
+ estp_dataset/result/estpBenchSq_fbf_EWOInDomainITstage2_v2.json.part2 filter=lfs diff=lfs merge=lfs -text
342
+ estp_dataset/result/estpBenchSq_fbf_EWOInDomainITstage2evaluator_llama_5_5.json.part2 filter=lfs diff=lfs merge=lfs -text
343
+ estp_dataset/result/estpBenchSq_fbf_EWOInDomainITstage2evaluator_llama_5_5.json.part3 filter=lfs diff=lfs merge=lfs -text
344
+ estp_dataset/estpSqa_ours/LivebaseStage2.json.part2 filter=lfs diff=lfs merge=lfs -text
345
+ estp_dataset/result/estp_bench_sq_EWO_frame_by_frame_v6_fusion_dinov2.json.part0 filter=lfs diff=lfs merge=lfs -text
346
+ estp_dataset/result/estp_bench_sq_EWO_frame_by_frame_v6_fusion_dinov2.json.part1 filter=lfs diff=lfs merge=lfs -text
347
+ estp_dataset/result/estp_bench_sq_EWO_frame_by_frame_v6_fusion_dinov2.json.part4 filter=lfs diff=lfs merge=lfs -text
348
+ estp_dataset/result/estp_bench_sq_EWO_frame_by_frame_v6_fusion_dinov2evaluator_llama_5_5.json.part0 filter=lfs diff=lfs merge=lfs -text
349
+ estp_dataset/result/estp_bench_sq_EWO_frame_by_frame_v6_fusion_dinov2evaluator_llama_5_5.json.part2 filter=lfs diff=lfs merge=lfs -text
350
+ estp_dataset/result/estp_bench_sq_EWO_frame_by_frame_v6_fusion_dinov2evaluator_llama_5_5.json.part1 filter=lfs diff=lfs merge=lfs -text
351
+ estp_dataset/estpSqa_ours/LivebaseStage2evaluator_deepseek_1_2.json.part0 filter=lfs diff=lfs merge=lfs -text
352
+ estp_dataset/estpSqa_ours/LivebaseStage2.json.part3 filter=lfs diff=lfs merge=lfs -text
353
+ estp_dataset/estpSqa_ours/LivebaseStage2evaluator_deepseek_1_2.json.part1 filter=lfs diff=lfs merge=lfs -text
354
+ estp_dataset/estpSqa_ours/LivebaseStage2evaluator_deepseek_1_2.json.part3 filter=lfs diff=lfs merge=lfs -text
355
+ estp_dataset/estpSqa_ours/LivebaseStage2evaluator_deepseek_1_2.json.part2 filter=lfs diff=lfs merge=lfs -text
356
+ estp_dataset/estpSqa_ours/LivebaseStage2evaluator_deepseek_1_2.json.part5 filter=lfs diff=lfs merge=lfs -text
357
+ estp_dataset/estpSqa_ours/LivebaseStage2.json.part1 filter=lfs diff=lfs merge=lfs -text
358
+ estp_dataset/estpSqa_ours/LivebaseStage2evaluator_deepseek_1_2.json.part7 filter=lfs diff=lfs merge=lfs -text
359
+ estp_dataset/estpSqa_ours/LivebaseStage2evaluator_deepseek_1_2.json.part6 filter=lfs diff=lfs merge=lfs -text
360
+ estp_dataset/estpSqa_ours/LivebaseStage2.json.part0 filter=lfs diff=lfs merge=lfs -text
361
+ estp_dataset/estpSqa_ours/LivebaseStage3_high0.31_11.json.part2 filter=lfs diff=lfs merge=lfs -text
362
+ estp_dataset/estpSqa_ours/LivebaseStage3_high0.31_11.json.part1 filter=lfs diff=lfs merge=lfs -text
363
+ estp_dataset/estpSqa_ours/LivebaseStage3_high0.31_11.json.part0 filter=lfs diff=lfs merge=lfs -text
364
+ estp_dataset/estpSqa_ours/LivebaseStage3_high0.31_1_low.json.part0 filter=lfs diff=lfs merge=lfs -text
365
+ estp_dataset/estpSqa_ours/LivebaseStage3_high0.31_1_low.json.part2 filter=lfs diff=lfs merge=lfs -text
366
+ estp_dataset/estpSqa_ours/LivebaseStage3_high0.31_1_lowevaluator_deepseek_1_2.json.part0 filter=lfs diff=lfs merge=lfs -text
367
+ estp_dataset/estpSqa_ours/LivebaseStage3_high0.31_1_low.json.part1 filter=lfs diff=lfs merge=lfs -text
368
+ estp_dataset/estpSqa_ours/LivebaseStage3_high0.31_1_lowevaluator_deepseek_1_2.json.part1 filter=lfs diff=lfs merge=lfs -text
369
+ estp_dataset/estpSqa_ours/LivebaseStage3_high0.31_1_lowevaluator_deepseek_1_2.json.part4 filter=lfs diff=lfs merge=lfs -text
370
+ estp_dataset/estpSqa_ours/LivebaseStage3_high0.31_1_lowevaluator_deepseek_1_2.json.part6 filter=lfs diff=lfs merge=lfs -text
371
+ estp_dataset/estpSqa_ours/LivebaseStage3_high0.31_1_lowevaluator_deepseek_1_2.json.part7 filter=lfs diff=lfs merge=lfs -text
372
+ estp_dataset/estpSqa_ours/LivebaseStage3_high11_.json.part1 filter=lfs diff=lfs merge=lfs -text
373
+ estp_dataset/estpSqa_ours/LivebaseStage3_high11_evaluator_deepseek_1_2.json.part5 filter=lfs diff=lfs merge=lfs -text
374
+ estp_dataset/estpSqa_ours/LivebaseStage3_high11_.json.part2 filter=lfs diff=lfs merge=lfs -text
375
+ estp_dataset/estpSqa_ours/LivebaseStage3_high11_evaluator_deepseek_1_2.json.part2 filter=lfs diff=lfs merge=lfs -text
376
+ estp_dataset/estpSqa_ours/LivebaseStage3_high11_evaluator_deepseek_1_2.json.part6 filter=lfs diff=lfs merge=lfs -text
377
+ estp_dataset/estpSqa_ours/LivebaseStage3_high11_evaluator_deepseek_1_2.json.part7 filter=lfs diff=lfs merge=lfs -text
378
+ estp_dataset/estpSqa_ours/LivebaseStage3_high11_evaluator_deepseek_1_2.json.part3 filter=lfs diff=lfs merge=lfs -text
379
+ estp_dataset/estpSqa_ours/LivebaseStage3_high11_evaluator_deepseek_1_2.json.part1 filter=lfs diff=lfs merge=lfs -text
380
+ estp_dataset/estpSqa_ours/LivebaseStage3_high11_evaluator_deepseek_1_2.json.part0 filter=lfs diff=lfs merge=lfs -text
381
+ estp_dataset/estpSqa_ours/LivebaseStage3_high11_evaluator_deepseek_1_2.json.part4 filter=lfs diff=lfs merge=lfs -text
382
+ estp_dataset/estp_bench_sq_VideollmOnline0.9evaluator_deepseek_5_5.json.part7 filter=lfs diff=lfs merge=lfs -text
383
+ estp_dataset/estpSqa_ours/LivebaseStage2evaluator_deepseek_1_2.json.part4 filter=lfs diff=lfs merge=lfs -text
384
+ estp_dataset/estpSqa_baseline_5cases/MiniCPMV_fbf_5casesevaluator_deepseek_5_5.json.part0 filter=lfs diff=lfs merge=lfs -text
385
+ estp_dataset/estpSqa_baseline_5cases/MiniCPMV_fbf_5casesevaluator_deepseek_5_5.json.part1 filter=lfs diff=lfs merge=lfs -text
386
+ estp_dataset/estp_bench_sq.json filter=lfs diff=lfs merge=lfs -text
387
+ estp_dataset/estpSqa_ours/LivebaseStage3_high11_.json.part0 filter=lfs diff=lfs merge=lfs -text
388
+ estp_dataset/estp_bench_sq_EWO_frame_by_frame_v3.json filter=lfs diff=lfs merge=lfs -text
389
+ estp_dataset/estp_bench_cq_v2.json filter=lfs diff=lfs merge=lfs -text
390
+ estp_dataset/estp_bench_cq.json filter=lfs diff=lfs merge=lfs -text
391
+ estp_dataset/estp_bench_cq_v3.json filter=lfs diff=lfs merge=lfs -text
392
+ estp_dataset/estp_bench_sq_EWO_frame_by_frame.json filter=lfs diff=lfs merge=lfs -text
393
+ estp_dataset/estp_bench_sq_EWO_frame_by_frame_v2.json filter=lfs diff=lfs merge=lfs -text
394
+ estp_dataset/estp_bench_sq_EWO_frame_by_frame55.json filter=lfs diff=lfs merge=lfs -text
395
+ estp_dataset/estp_bench_sq_VideollmOnline0.8evaluator_deepseek_5_5.json.part0 filter=lfs diff=lfs merge=lfs -text
396
+ estp_dataset/estp_bench_sq_VideollmOnline.json filter=lfs diff=lfs merge=lfs -text
397
+ estp_dataset/estp_bench_sq_EWO_frame_by_frame_v4_fusion_dinov2.json filter=lfs diff=lfs merge=lfs -text
398
+ estp_dataset/estp_bench_sq_EWO_frame_by_frame_v5_fusion_dinov2.json filter=lfs diff=lfs merge=lfs -text
399
+ estp_dataset/estp_bench_sq_EWO_frame_by_frame_v6_fusion_dinov2.json filter=lfs diff=lfs merge=lfs -text
400
+ estp_dataset/estp_bench_sq_VideollmOnline0.8.json.part0 filter=lfs diff=lfs merge=lfs -text
401
+ estp_dataset/estp_bench_sq_EWO_frame_by_frame_v6_fusion_dinov2_debug.json filter=lfs diff=lfs merge=lfs -text
402
+ estp_dataset/estp_bench_sq_VideollmOnline0.8.json.part1 filter=lfs diff=lfs merge=lfs -text
403
+ estp_dataset/estp_bench_sq_VideollmOnline0.8.json.part2 filter=lfs diff=lfs merge=lfs -text
404
+ estp_dataset/estp_bench_sq_VideollmOnline0.9.json.part1 filter=lfs diff=lfs merge=lfs -text
405
+ estp_dataset/estp_bench_sq_VideollmOnline0.9evaluator_deepseek_1_2.json.part1 filter=lfs diff=lfs merge=lfs -text
406
+ estp_dataset/estp_bench_sq_VideollmOnline0.8evaluator_deepseek_5_5.json.part1 filter=lfs diff=lfs merge=lfs -text
407
+ estp_dataset/estp_bench_sq_VideollmOnline0.9evaluator_deepseek_1_2.json.part5 filter=lfs diff=lfs merge=lfs -text
408
+ estp_dataset/estp_bench_sq_VideollmOnline0.8evaluator_deepseek_1_2.json.part2 filter=lfs diff=lfs merge=lfs -text
409
+ estp_dataset/estp_bench_sq_VideollmOnline0.9evaluator_deepseek_1_2.json.part7 filter=lfs diff=lfs merge=lfs -text
410
+ estp_dataset/estp_bench_sq_VideollmOnline0.9evaluator_deepseek_1_2.json.part6 filter=lfs diff=lfs merge=lfs -text
411
+ estp_dataset/estp_bench_sq_VideollmOnline0.9evaluator_deepseek_5_5.json.part1 filter=lfs diff=lfs merge=lfs -text
412
+ estp_dataset/estp_bench_sq_VideollmOnline0.9.json.part0 filter=lfs diff=lfs merge=lfs -text
413
+ estp_dataset/estp_bench_sq_VideollmOnline0.9evaluator_deepseek_1_2.json.part0 filter=lfs diff=lfs merge=lfs -text
414
+ estp_dataset/estp_bench_sq_VideollmOnline0.9evaluator_deepseek_5_5.json.part6 filter=lfs diff=lfs merge=lfs -text
415
+ estp_dataset/estp_bench_sq_VideollmOnline0.9evaluator_deepseek_1_2.json.part4 filter=lfs diff=lfs merge=lfs -text
416
+ estp_dataset/estp_bench_sq_VideollmOnline0.9evaluator_deepseek_5_5.json.part0 filter=lfs diff=lfs merge=lfs -text
417
+ estp_dataset/estp_bench_sq_VideollmOnline0.9.json.part2 filter=lfs diff=lfs merge=lfs -text
418
+
419
+ full_scale_2fps_max384/dedaa2ad-306d-47b3-a4c4-90b6b2c37e78.mp4 filter=lfs diff=lfs merge=lfs -text
420
+ estp_dataset/result/estp_bench_sq_MiniCPMV_passiveevaluator_llama_5_5.json.part1 filter=lfs diff=lfs merge=lfs -text
421
+ estp_dataset/result/estp_bench_sq_MiniCPMV_passiveevaluator_llama_5_5.json.part0 filter=lfs diff=lfs merge=lfs -text
422
+ full_scale_2fps_max384/b3df1e5e-0c7a-4ac5-84b6-41acde78d57d.mp4 filter=lfs diff=lfs merge=lfs -text
423
+ full_scale_2fps_max384/b062ea19-1128-47fc-9ed2-da1644a62745.mp4 filter=lfs diff=lfs merge=lfs -text
424
+ full_scale_2fps_max384/b493e8e3-4e18-4480-8e63-c21e25bf99f7.mp4 filter=lfs diff=lfs merge=lfs -text
425
+ full_scale_2fps_max384/b6d7c19a-c056-40d3-9f92-7b11c8c61d26.mp4 filter=lfs diff=lfs merge=lfs -text
426
+ full_scale_2fps_max384/b4068f40-69e9-4217-81ba-84e59c25f125.mp4 filter=lfs diff=lfs merge=lfs -text
427
+ full_scale_2fps_max384/ab93e55b-11cb-4332-b247-b3fb2fc67f53.mp4 filter=lfs diff=lfs merge=lfs -text
428
+ full_scale_2fps_max384/b68f87f0-29be-4516-a34a-dda69dd84a56.mp4 filter=lfs diff=lfs merge=lfs -text
429
+ full_scale_2fps_max384/af4828ab-2ff7-4935-87e0-08881989f178.mp4 filter=lfs diff=lfs merge=lfs -text
430
+ full_scale_2fps_max384/b739682e-97d3-468c-ab32-eff051aaf9ec.mp4 filter=lfs diff=lfs merge=lfs -text
431
+ full_scale_2fps_max384/b5b0a768-ff32-49e4-945f-dc31b32e72cc.mp4 filter=lfs diff=lfs merge=lfs -text
432
+ full_scale_2fps_max384/b2a1b8ca-99d6-4f26-953f-426e89649e90.mp4 filter=lfs diff=lfs merge=lfs -text
433
+ full_scale_2fps_max384/b6b4f7a1-f3ec-425c-93eb-acc931acb2b5.mp4 filter=lfs diff=lfs merge=lfs -text
434
+ full_scale_2fps_max384/b7682631-4cb0-4a23-81bf-597a91c90591.mp4 filter=lfs diff=lfs merge=lfs -text
435
+ full_scale_2fps_max384/ab6f68a2-e5ba-48ce-ac24-7fb3df0584f2.mp4 filter=lfs diff=lfs merge=lfs -text
436
+ full_scale_2fps_max384/afbab8ce-797a-4c6e-800b-5acd9bf1653e.mp4 filter=lfs diff=lfs merge=lfs -text
437
+ full_scale_2fps_max384/bb03ea40-924a-450f-8521-9773c017395a.mp4 filter=lfs diff=lfs merge=lfs -text
438
+ full_scale_2fps_max384/b0a30641-ad93-4d6a-92a9-f277c78f1aa5.mp4 filter=lfs diff=lfs merge=lfs -text
439
+ full_scale_2fps_max384/bd970f5b-3fd9-4ae9-9f2b-738e8ca54c1e.mp4 filter=lfs diff=lfs merge=lfs -text
440
+ full_scale_2fps_max384/c07e1c6f-3bf8-4d6b-9272-87c4b98f8336.mp4 filter=lfs diff=lfs merge=lfs -text
441
+ full_scale_2fps_max384/bd1d22e0-2898-424f-8ec8-d47cab2e9fcb.mp4 filter=lfs diff=lfs merge=lfs -text
442
+ full_scale_2fps_max384/b9eed644-56a9-4a0b-b1a3-5cc0d6688297.mp4 filter=lfs diff=lfs merge=lfs -text
443
+ full_scale_2fps_max384/bc95b190-02a0-435a-a7cf-90d20ae0e23a.mp4 filter=lfs diff=lfs merge=lfs -text
444
+ full_scale_2fps_max384/c5938442-90c1-413c-be06-186222d8274f.mp4 filter=lfs diff=lfs merge=lfs -text
445
+ full_scale_2fps_max384/bb2297d3-91b7-4ec4-b39b-cc64694929db.mp4 filter=lfs diff=lfs merge=lfs -text
446
+ full_scale_2fps_max384/b999ac9b-d285-4f44-944a-a0795969ac51.mp4 filter=lfs diff=lfs merge=lfs -text
447
+ full_scale_2fps_max384/c517f92a-3b49-42f9-9927-c290a61f9202.mp4 filter=lfs diff=lfs merge=lfs -text
448
+ full_scale_2fps_max384/c1344588-b8e2-409f-8c84-2d27a0fe7934.mp4 filter=lfs diff=lfs merge=lfs -text
449
+ full_scale_2fps_max384/c037be7a-e56b-4053-a48b-c593bf069ef4.mp4 filter=lfs diff=lfs merge=lfs -text
450
+ full_scale_2fps_max384/c2241ea5-5c32-488e-b8d0-09676ef304ee.mp4 filter=lfs diff=lfs merge=lfs -text
451
+ full_scale_2fps_max384/c6047868-51a3-4bb8-b833-5453c1fa563c.mp4 filter=lfs diff=lfs merge=lfs -text
452
+ full_scale_2fps_max384/c5fd91c3-07d2-4514-a693-381bca377a0b.mp4 filter=lfs diff=lfs merge=lfs -text
453
+ full_scale_2fps_max384/c745f5ed-8c4f-4736-a38c-1cd22ce5094a.mp4 filter=lfs diff=lfs merge=lfs -text
454
+ full_scale_2fps_max384/c4333895-ed19-42fe-9323-271a41bdfe4c.mp4 filter=lfs diff=lfs merge=lfs -text
455
+ full_scale_2fps_max384/c730a60b-236c-4e7b-9d7a-9967ff7ee9ac.mp4 filter=lfs diff=lfs merge=lfs -text
456
+ full_scale_2fps_max384/c2d06df7-5d3a-4116-9edb-f1c81a4f669b.mp4 filter=lfs diff=lfs merge=lfs -text
457
+ full_scale_2fps_max384/c77015b4-40e1-44f5-b411-6b962dfd3157.mp4 filter=lfs diff=lfs merge=lfs -text
458
+ full_scale_2fps_max384/c7d5d40f-840c-4be0-b79d-ab41394479a2.mp4 filter=lfs diff=lfs merge=lfs -text
459
+ full_scale_2fps_max384/c34effc7-6198-4c85-80c7-c8be85b77dd3.mp4 filter=lfs diff=lfs merge=lfs -text
460
+ full_scale_2fps_max384/c6e14005-854e-462f-a5a0-2474b759b1cc.mp4 filter=lfs diff=lfs merge=lfs -text
461
+ full_scale_2fps_max384/cbfeb6c8-ec1f-4884-bcbf-14d7764c4a36.mp4 filter=lfs diff=lfs merge=lfs -text
462
+ full_scale_2fps_max384/c864505c-9d49-4da4-bf7e-254b4c348c03.mp4 filter=lfs diff=lfs merge=lfs -text
463
+ full_scale_2fps_max384/989f3c2c-edd2-4f30-9925-56771e2bf7cc.mp4 filter=lfs diff=lfs merge=lfs -text
464
+ full_scale_2fps_max384/cc575a16-64fd-4cda-9248-5d85f506fdfd.mp4 filter=lfs diff=lfs merge=lfs -text
465
+ full_scale_2fps_max384/c615d305-14fe-41cd-9099-caf420beb604.mp4 filter=lfs diff=lfs merge=lfs -text
466
+ full_scale_2fps_max384/d651a4c5-b3ef-4f9a-b69c-5e9baebae9ad.mp4 filter=lfs diff=lfs merge=lfs -text
467
+ full_scale_2fps_max384/d74c95ba-e072-44f8-bb2f-2631379af06d.mp4 filter=lfs diff=lfs merge=lfs -text
468
+ full_scale_2fps_max384/c628c1d2-4ce3-4f28-93a4-cc9d3dd9de11.mp4 filter=lfs diff=lfs merge=lfs -text
469
+ full_scale_2fps_max384/acc6839e-9d6d-46db-921b-51812834d3b2.mp4 filter=lfs diff=lfs merge=lfs -text
470
+ full_scale_2fps_max384/d2606d87-2a9b-48df-96b2-5df1a4457215.mp4 filter=lfs diff=lfs merge=lfs -text
471
+ full_scale_2fps_max384/d7951b74-71fc-4dd7-a874-408088870e72.mp4 filter=lfs diff=lfs merge=lfs -text
472
+ full_scale_2fps_max384/d02fe2b4-fd4a-4b8a-baac-e1c90379acb9.mp4 filter=lfs diff=lfs merge=lfs -text
473
+ full_scale_2fps_max384/d27c5c7e-db9c-48f4-bea5-9970078d352e.mp4 filter=lfs diff=lfs merge=lfs -text
474
+ full_scale_2fps_max384/da80cc0f-c003-4ef3-b5ca-b09d3b105a03.mp4 filter=lfs diff=lfs merge=lfs -text
475
+ full_scale_2fps_max384/c833a9f6-c59e-4f4e-816f-77f1a32c6c05.mp4 filter=lfs diff=lfs merge=lfs -text
476
+ full_scale_2fps_max384/dddce8ac-09b0-4b13-b0fa-eb18e1f27b21.mp4 filter=lfs diff=lfs merge=lfs -text
477
+ full_scale_2fps_max384/ce90f6f8-a6a8-47b5-bb4c-6e12698164a3.mp4 filter=lfs diff=lfs merge=lfs -text
478
+ full_scale_2fps_max384/eb1b6e62-7197-4a75-bc39-c373e558fd97.mp4 filter=lfs diff=lfs merge=lfs -text
479
+ full_scale_2fps_max384/e7cda49b-1c03-4d74-a759-c2eeb96465b4.mp4 filter=lfs diff=lfs merge=lfs -text
480
+ full_scale_2fps_max384/e7742d04-2dc5-4046-9d0c-d45c6fe74d25.mp4 filter=lfs diff=lfs merge=lfs -text
481
+ full_scale_2fps_max384/e4d961f5-176f-4dde-864a-bb94523312a1.mp4 filter=lfs diff=lfs merge=lfs -text
482
+ full_scale_2fps_max384/e72082e8-f9e6-42ac-ac45-de30f9adee9d.mp4 filter=lfs diff=lfs merge=lfs -text
483
+ full_scale_2fps_max384/eb628b6d-001d-48ab-90ed-730e47f299b9.mp4 filter=lfs diff=lfs merge=lfs -text
484
+ full_scale_2fps_max384/d939955c-6db9-4406-8b7b-085494320bc7.mp4 filter=lfs diff=lfs merge=lfs -text
485
+ full_scale_2fps_max384/e8657b65-be92-401d-a8d8-2fa32cb861c0.mp4 filter=lfs diff=lfs merge=lfs -text
486
+ full_scale_2fps_max384/b1543ea3-e86b-4545-9195-f1bf83e02b41.mp4 filter=lfs diff=lfs merge=lfs -text
487
+ full_scale_2fps_max384/7bb3ad7f-9bdd-4806-afd6-efb7aa7babbc.mp4 filter=lfs diff=lfs merge=lfs -text
488
+ full_scale_2fps_max384/ec143498-9db7-4ee7-a0cf-43538717e570.mp4 filter=lfs diff=lfs merge=lfs -text
489
+ full_scale_2fps_max384/eb2c9739-dba0-479c-81b7-9dda51c4ac20.mp4 filter=lfs diff=lfs merge=lfs -text
490
+ full_scale_2fps_max384/de8ea8a7-6bdb-4679-9b17-9dc6a79e45e7.mp4 filter=lfs diff=lfs merge=lfs -text
491
+ full_scale_2fps_max384/ec9f9e41-aa3d-4466-8da9-69bcf23ffba2.mp4 filter=lfs diff=lfs merge=lfs -text
492
+ full_scale_2fps_max384/edaaa1c1-ea36-4318-86b0-becc9b7de0f4.mp4 filter=lfs diff=lfs merge=lfs -text
493
+ full_scale_2fps_max384/ed60dcdb-b273-44e7-b5dc-f9527d7c403f.mp4 filter=lfs diff=lfs merge=lfs -text
494
+ full_scale_2fps_max384/ee5ed7ca-667d-4a31-bdc8-13a9b8008246.mp4 filter=lfs diff=lfs merge=lfs -text
495
+ full_scale_2fps_max384/eb5cb2b0-59e6-45da-af1b-ba86c7ab0b54.mp4 filter=lfs diff=lfs merge=lfs -text
496
+ full_scale_2fps_max384/f1b5ae1e-742c-4802-9e61-ff8aae83e12e.mp4 filter=lfs diff=lfs merge=lfs -text
497
+ full_scale_2fps_max384/ec41b9e1-dbf9-45c3-bf88-4b250873cad0.mp4 filter=lfs diff=lfs merge=lfs -text
498
+ full_scale_2fps_max384/e6f1accd-e18d-43ec-b854-e7633b15cbf7.mp4 filter=lfs diff=lfs merge=lfs -text
499
+ full_scale_2fps_max384/f20fb254-9648-40ea-a500-5493a1ad9eac.mp4 filter=lfs diff=lfs merge=lfs -text
500
+ full_scale_2fps_max384/efd052ce-4af1-48f4-8546-c13428ee92ec.mp4 filter=lfs diff=lfs merge=lfs -text
501
+ full_scale_2fps_max384/f26ca7c3-2dfe-4e74-be61-529635df723f.mp4 filter=lfs diff=lfs merge=lfs -text
502
+ full_scale_2fps_max384/f442873e-6f2b-493f-a6db-718cd20a732a.mp4 filter=lfs diff=lfs merge=lfs -text
503
+ full_scale_2fps_max384/f265c11a-5b65-49e2-b77a-8735f2aa9a48.mp4 filter=lfs diff=lfs merge=lfs -text
504
+ full_scale_2fps_max384/ec344610-74f4-4765-9c3f-0837ef78055d.mp4 filter=lfs diff=lfs merge=lfs -text
505
+ full_scale_2fps_max384/f5984df0-dbe0-42d6-8e74-a0e1368f48e8.mp4 filter=lfs diff=lfs merge=lfs -text
506
+ full_scale_2fps_max384/b8592091-2f2c-4c37-a931-3c84e3eff3c7.mp4 filter=lfs diff=lfs merge=lfs -text
507
+ full_scale_2fps_max384/eec01ded-9bf6-4da4-b61c-ac6a022e8ce4.mp4 filter=lfs diff=lfs merge=lfs -text
508
+ full_scale_2fps_max384/c485fef8-8456-48e0-8ccd-d6cb0ba8c6f8.mp4 filter=lfs diff=lfs merge=lfs -text
509
+ estp_dataset/result/estp_bench_sq_VideollmOnlineevaluator_llama_5_5.json.part0 filter=lfs diff=lfs merge=lfs -text
510
+ full_scale_2fps_max384/f7866811-1030-4be5-af61-7f1dd797f36b.mp4 filter=lfs diff=lfs merge=lfs -text
511
+ full_scale_2fps_max384/f57c99dc-02c7-4d3a-a4dc-893e74d523b1.mp4 filter=lfs diff=lfs merge=lfs -text
512
+ full_scale_2fps_max384/f9ce56a3-3e9b-4edd-b133-c05eed792ec0.mp4 filter=lfs diff=lfs merge=lfs -text
513
+ full_scale_2fps_max384/8d4895eb-b2d1-43b8-90f1-e80fcc37797b.mp4 filter=lfs diff=lfs merge=lfs -text
514
+ full_scale_2fps_max384/f89e52da-01ee-4773-95ca-e8a5431ccd65.mp4 filter=lfs diff=lfs merge=lfs -text
515
+ full_scale_2fps_max384/fb02682a-1e0e-49ee-a0f4-0df7f5fbe98a.mp4 filter=lfs diff=lfs merge=lfs -text
516
+ full_scale_2fps_max384/fbb89f93-2922-49ac-b127-dd20711deff2.mp4 filter=lfs diff=lfs merge=lfs -text
517
+ full_scale_2fps_max384/f12a83a9-77ee-495c-9f11-2d41137a0b1a.mp4 filter=lfs diff=lfs merge=lfs -text
518
+ full_scale_2fps_max384/f40e0f92-2250-46c9-98a0-8ccf23d164e0.mp4 filter=lfs diff=lfs merge=lfs -text
519
+ full_scale_2fps_max384/fb7cc35d-3272-44a4-b8f2-15cd24fa345b.mp4 filter=lfs diff=lfs merge=lfs -text
520
+ full_scale_2fps_max384/grp-056db3f1-f957-46c8-b16b-c8fce22e78f9.mp4 filter=lfs diff=lfs merge=lfs -text
521
+ full_scale_2fps_max384/grp-1982af6d-8f53-43a5-b81a-cda5a8099e6e.mp4 filter=lfs diff=lfs merge=lfs -text
522
+ full_scale_2fps_max384/d14d6c08-b80a-446a-988e-5b57a079097c.mp4 filter=lfs diff=lfs merge=lfs -text
523
+ full_scale_2fps_max384/grp-27825332-6946-4c1c-9b4f-e4de4f13b718.mp4 filter=lfs diff=lfs merge=lfs -text
524
+ full_scale_2fps_max384/grp-1a9bb53f-01ae-4bd7-afa3-a92570679a7a.mp4 filter=lfs diff=lfs merge=lfs -text
525
+ estp_dataset/result/estp_bench_sq_VideollmOnlineevaluator_llama_5_5.json.part1 filter=lfs diff=lfs merge=lfs -text
526
+ full_scale_2fps_max384/grp-5251b41e-91b1-4b87-9a18-c631b1d17e2e.mp4 filter=lfs diff=lfs merge=lfs -text
527
+ full_scale_2fps_max384/grp-2bccee1b-0ade-47ad-8e15-ad6c00861540.mp4 filter=lfs diff=lfs merge=lfs -text
528
+ full_scale_2fps_max384/grp-304735ba-6bf5-4d39-bcb5-0dabddb11d68.mp4 filter=lfs diff=lfs merge=lfs -text
529
+ full_scale_2fps_max384/fa21a282-6099-45cd-a4a7-39daf7973ec0.mp4 filter=lfs diff=lfs merge=lfs -text
530
+ full_scale_2fps_max384/grp-47305621-3729-44cc-8f1f-814faf214c40.mp4 filter=lfs diff=lfs merge=lfs -text
531
+ full_scale_2fps_max384/d20f6551-0272-4a69-8b02-da7ffcf29e0f.mp4 filter=lfs diff=lfs merge=lfs -text
532
+ full_scale_2fps_max384/fe0d5ffc-686e-4846-bce0-1a18e513bf95.mp4 filter=lfs diff=lfs merge=lfs -text
533
+ full_scale_2fps_max384/ffb6dfc1-d2f9-45b1-8e25-2c7d0f32d635.mp4 filter=lfs diff=lfs merge=lfs -text
534
+ full_scale_2fps_max384/grp-5b24c19c-0bde-46ce-a32e-418b5ffaa8a3.mp4 filter=lfs diff=lfs merge=lfs -text
535
+ full_scale_2fps_max384/grp-67759ea1-52a2-45ee-b129-f482532d83df.mp4 filter=lfs diff=lfs merge=lfs -text
536
+ full_scale_2fps_max384/grp-9c5c9efc-608f-4fdf-9c29-2251a451c8f9.mp4 filter=lfs diff=lfs merge=lfs -text
537
+ full_scale_2fps_max384/f6a6c5d5-3fe0-42af-9ddb-fa1a63521c0d.mp4 filter=lfs diff=lfs merge=lfs -text
538
+ full_scale_2fps_max384/grp-93d50e4c-0a3b-430a-8267-01fbe5f302f4.mp4 filter=lfs diff=lfs merge=lfs -text
539
+ full_scale_2fps_max384/grp-8912a138-6c68-424d-8be5-59e0d4f5c173.mp4 filter=lfs diff=lfs merge=lfs -text
540
+ full_scale_2fps_max384/grp-88bae242-7f3a-45d4-b129-5d69b1a1e15a.mp4 filter=lfs diff=lfs merge=lfs -text
541
+ full_scale_2fps_max384/grp-7fb63b81-0a4f-4e9e-906b-60d1935d53c7.mp4 filter=lfs diff=lfs merge=lfs -text
542
+ full_scale_2fps_max384/grp-79f47a60-f1e9-4232-88b8-a1836e7dfd30.mp4 filter=lfs diff=lfs merge=lfs -text
543
+ full_scale_2fps_max384/grp-c56e7e04-8787-4df1-98c6-352076f61e53.mp4 filter=lfs diff=lfs merge=lfs -text
544
+ estp_dataset/result/tmp_predict_minicpmv_contextual_frame_by_frame.json.part0 filter=lfs diff=lfs merge=lfs -text
545
+ full_scale_2fps_max384/grp-c56fa4ea-b7e1-4d2c-b1f3-b97bb6fe0c56.mp4 filter=lfs diff=lfs merge=lfs -text
546
+ estp_dataset/result/tmp_predict_minicpmv_contextual_frame_by_frame.json.part2 filter=lfs diff=lfs merge=lfs -text
547
+ full_scale_2fps_max384/grp-a9c519a7-4776-42d6-bcf1-270f0d302843.mp4 filter=lfs diff=lfs merge=lfs -text
548
+ full_scale_2fps_max384/grp-cfe4d1f3-20c1-45d9-8bd3-5a7a8e7443f4.mp4 filter=lfs diff=lfs merge=lfs -text
549
+ full_scale_2fps_max384/grp-b93ab731-52c3-43b2-9d99-4229663ba67c.mp4 filter=lfs diff=lfs merge=lfs -text
550
+ full_scale_2fps_max384/grp-cec778f9-9b54-4b67-b013-116378fd7a85.mp4 filter=lfs diff=lfs merge=lfs -text
551
+ full_scale_2fps_max384/grp-3d7ccb44-b05d-4b67-baae-4e0f55d8307b.mp4 filter=lfs diff=lfs merge=lfs -text
552
+ full_scale_2fps_max384/grp-f248a4aa-dd14-42fb-91fc-ec9f6bc16f20.mp4 filter=lfs diff=lfs merge=lfs -text
553
+ estp_dataset/result/tmp_predict_minicpmv_contextual_frame_by_frame.json.part1 filter=lfs diff=lfs merge=lfs -text
554
+ full_scale_2fps_max384/grp-d8f0089b-cac5-41f4-af66-e11d6f54a43c.mp4 filter=lfs diff=lfs merge=lfs -text
555
+ full_scale_2fps_max384/grp-84322f50-7fce-4d49-a04b-9ecd7afd4119.mp4 filter=lfs diff=lfs merge=lfs -text
556
+ full_scale_2fps_max384/grp-719d9e89-4eb2-49ea-be14-dc2637dc303f.mp4 filter=lfs diff=lfs merge=lfs -text
557
+ estp_dataset/result/tmp_predict_minicpmv_contextual_frame_by_frameevaluator_llama_5_5.json.part0 filter=lfs diff=lfs merge=lfs -text
558
+ full_scale_2fps_max384/grp-51fc62f8-00f4-44e3-af9c-7ebb63da6c3d.mp4 filter=lfs diff=lfs merge=lfs -text
559
+ full_scale_2fps_max384/grp-e5fb613b-1c6a-4439-8735-6414a6344c76.mp4 filter=lfs diff=lfs merge=lfs -text
560
+ full_scale_2fps_max384/grp-ebce88dd-4852-4506-9dcc-5f5798ce1cbf.mp4 filter=lfs diff=lfs merge=lfs -text
561
+ estp_dataset/result/tmp_predict_minicpmv_contextual_frame_by_frameevaluator_llama_5_5.json.part1 filter=lfs diff=lfs merge=lfs -text
562
+ estp_dataset/tmp_predict_minicpmv_v3_correctness.json.part0 filter=lfs diff=lfs merge=lfs -text
563
+ full_scale_2fps_max384/f50acf72-0347-47b3-a0a7-9916311d5c3a.mp4 filter=lfs diff=lfs merge=lfs -text
564
+ estp_dataset/tmp_predict_minicpmv_v3.json filter=lfs diff=lfs merge=lfs -text
565
+ full_scale_2fps_max384/f0041c6c-06b2-4c3b-b8e6-934305b4226e.mp4 filter=lfs diff=lfs merge=lfs -text
566
+ estp_dataset/tmp_predict_minicpmv_v3.json.part0 filter=lfs diff=lfs merge=lfs -text
567
+ estp_dataset/tmp_predict_minicpmv_v2.json filter=lfs diff=lfs merge=lfs -text
568
+ full_scale_2fps_max384/f9b214ac-cb2c-4a1e-b6f1-518293398e35.mp4 filter=lfs diff=lfs merge=lfs -text
569
+ estp_dataset/tmp_predict_minicpmv_v3.json.part1 filter=lfs diff=lfs merge=lfs -text
570
+ estp_dataset/tmp_predict_minicpmv_v3_correctness.json.part1 filter=lfs diff=lfs merge=lfs -text
571
+ full_scale_2fps_max384/002c3b5c-ed86-4af3-99a1-4b497b7c8a86.mp4 filter=lfs diff=lfs merge=lfs -text
572
+ estp_dataset/train_it/estpBenchSq5Cases_fbf_EWOStage2_v1.json filter=lfs diff=lfs merge=lfs -text
573
+ full_scale_2fps_max384/070776bf-f15a-4b6e-a906-d1ef9e132592.mp4 filter=lfs diff=lfs merge=lfs -text
574
+ full_scale_2fps_max384/07309684-1f6e-4977-ab74-f3e63c361f36.mp4 filter=lfs diff=lfs merge=lfs -text
575
+ full_scale_2fps_max384/029ff9ab-ac4c-43a5-ae53-153228c52a21.mp4 filter=lfs diff=lfs merge=lfs -text
576
+ full_scale_2fps_max384/09b27569-7cb6-49e7-bf02-ec5333f04273.mp4 filter=lfs diff=lfs merge=lfs -text
577
+ full_scale_2fps_max384/023bf95e-28de-43b4-a43f-720edba667a5.mp4 filter=lfs diff=lfs merge=lfs -text
578
+ full_scale_2fps_max384/05b8b906-b9c5-48b4-be27-c91b5c28f92b.mp4 filter=lfs diff=lfs merge=lfs -text
579
+ full_scale_2fps_max384/0daaa66c-fb5f-4680-b84c-2f65f0f0bd9b.mp4 filter=lfs diff=lfs merge=lfs -text
580
+ full_scale_2fps_max384/0e6fb738-05fc-4dd5-9746-a8e10efe8c20.mp4 filter=lfs diff=lfs merge=lfs -text
581
+ full_scale_2fps_max384/055ba601-c243-4d4a-8a2c-694a5c88824c.mp4 filter=lfs diff=lfs merge=lfs -text
582
+ full_scale_2fps_max384/10a04147-ae19-4c5e-b13b-b0b7005938a2.mp4 filter=lfs diff=lfs merge=lfs -text
583
+ estp_dataset/train_it/estpBenchSq5Cases_fbf_EWOLivebaseStage2_v1.json.part0 filter=lfs diff=lfs merge=lfs -text
584
+ full_scale_2fps_max384/10bf25b0-f781-4de3-b66c-9a4d06e3643a.mp4 filter=lfs diff=lfs merge=lfs -text
585
+ full_scale_2fps_max384/106e9447-8a09-4e89-964e-61c927827a56.mp4 filter=lfs diff=lfs merge=lfs -text
586
+ full_scale_2fps_max384/0a01978c-e16d-4587-95f1-49efa3ab15d9.mp4 filter=lfs diff=lfs merge=lfs -text
587
+ estp_dataset/tmp_predict_minicpmv_contextual.json filter=lfs diff=lfs merge=lfs -text
588
+ full_scale_2fps_max384/0fcf23a0-fc53-4378-9a99-18c4f109f659.mp4 filter=lfs diff=lfs merge=lfs -text
589
+ full_scale_2fps_max384/15ce76af-7eb0-4d27-9289-c9c71863f603.mp4 filter=lfs diff=lfs merge=lfs -text
590
+ full_scale_2fps_max384/110352df-c7a3-4fec-a707-42936d101c28.mp4 filter=lfs diff=lfs merge=lfs -text
591
+ full_scale_2fps_max384/134d9865-d9ee-47e5-a121-589a26c6b3ba.mp4 filter=lfs diff=lfs merge=lfs -text
592
+ estp_dataset/train_it/estpBenchSq5Cases_fbf_EWOLivebaseStage2_v1.json.part2 filter=lfs diff=lfs merge=lfs -text
593
+ full_scale_2fps_max384/1a327b0c-b78b-4ae2-add0-49334bccddcd.mp4 filter=lfs diff=lfs merge=lfs -text
594
+ full_scale_2fps_max384/182dffe9-d2bf-4a7d-b1d8-137470f0143d.mp4 filter=lfs diff=lfs merge=lfs -text
595
+ full_scale_2fps_max384/16d55886-6e1e-4195-9918-12dc4568320e.mp4 filter=lfs diff=lfs merge=lfs -text
596
+ full_scale_2fps_max384/13c76616-f168-4af0-8d2a-fe82ce232d6a.mp4 filter=lfs diff=lfs merge=lfs -text
597
+ estp_dataset/train_it/estpBenchSq5Cases_fbf_EWOLivebaseStage2_v1.json.part1 filter=lfs diff=lfs merge=lfs -text
598
+ estp_dataset/train_it/estpBenchSq5Cases_fbf_EWOLivebaseStage2_v1.json.part3 filter=lfs diff=lfs merge=lfs -text
599
+ full_scale_2fps_max384/174bee0b-b5d6-4e22-b81d-048005ae6ba6.mp4 filter=lfs diff=lfs merge=lfs -text
600
+ full_scale_2fps_max384/214b62df-2fb1-4b0e-af9d-42b0dd37defb.mp4 filter=lfs diff=lfs merge=lfs -text
601
+ full_scale_2fps_max384/228eb02a-6c89-4aa0-9cd4-0cdab1550c83.mp4 filter=lfs diff=lfs merge=lfs -text
602
+ full_scale_2fps_max384/1b333aaf-1c1b-4043-bbea-5b5eacbfaf5e.mp4 filter=lfs diff=lfs merge=lfs -text
603
+ full_scale_2fps_max384/1b9f06a7-b26d-4c74-863e-1d4fa22bbc37.mp4 filter=lfs diff=lfs merge=lfs -text
604
+ full_scale_2fps_max384/04fe8f4d-081e-437e-a56a-2d53b6233fc9.mp4 filter=lfs diff=lfs merge=lfs -text
605
+ estp_dataset/tmp_predict_minicpmv.json filter=lfs diff=lfs merge=lfs -text
606
+ full_scale_2fps_max384/26f1c77e-814e-4609-ad67-12447d1627e1.mp4 filter=lfs diff=lfs merge=lfs -text
607
+ full_scale_2fps_max384/27b7edc0-0b05-4a4a-9dd0-893ba585faf8.mp4 filter=lfs diff=lfs merge=lfs -text
608
+ full_scale_2fps_max384/0e0d6704-1c6c-4a62-bc97-cc55658cf8ac.mp4 filter=lfs diff=lfs merge=lfs -text
609
+ full_scale_2fps_max384/26dc2fae-b3b5-4c4a-ba51-efc6bc740f52.mp4 filter=lfs diff=lfs merge=lfs -text
610
+ full_scale_2fps_max384/277e72e5-013a-446d-8463-778f1c786105.mp4 filter=lfs diff=lfs merge=lfs -text
611
+ full_scale_2fps_max384/44e3114f-8654-48ee-97c5-9506311ba188.mp4 filter=lfs diff=lfs merge=lfs -text
612
+ full_scale_2fps_max384/18e84829-901a-414d-8a2b-d1d2b3244db7.mp4 filter=lfs diff=lfs merge=lfs -text
613
+ full_scale_2fps_max384/448472cf-8cbb-4aa9-ae72-99ef72adae87.mp4 filter=lfs diff=lfs merge=lfs -text
614
+ full_scale_2fps_max384/46d00bf5-ed73-4e5f-84eb-9c880eec10d8.mp4 filter=lfs diff=lfs merge=lfs -text
615
+ full_scale_2fps_max384/1558e9f2-d7f3-4a23-9627-4240f506d7df.mp4 filter=lfs diff=lfs merge=lfs -text
616
+ full_scale_2fps_max384/45dc74e1-c8dd-443a-a7a6-ca4215144e97.mp4 filter=lfs diff=lfs merge=lfs -text
617
+ full_scale_2fps_max384/26718e04-3f7a-443d-a589-ca814f05b60e.mp4 filter=lfs diff=lfs merge=lfs -text
618
+ full_scale_2fps_max384/200a8f32-c44a-463d-ba39-0190a0bddf7a.mp4 filter=lfs diff=lfs merge=lfs -text
619
+ full_scale_2fps_max384/26b4e1c1-54c7-419f-b2d3-63ea52fd3540.mp4 filter=lfs diff=lfs merge=lfs -text
620
+ full_scale_2fps_max384/49d20f86-b516-49be-a27b-a450955c9f46.mp4 filter=lfs diff=lfs merge=lfs -text
621
+ full_scale_2fps_max384/47078bd8-6b58-458c-8c34-8d905e21079a.mp4 filter=lfs diff=lfs merge=lfs -text
622
+ full_scale_2fps_max384/52d7e473-06a6-4464-81f4-08199bf5cb6a.mp4 filter=lfs diff=lfs merge=lfs -text
623
+ full_scale_2fps_max384/54337e20-df12-4133-9e34-618e0381265e.mp4 filter=lfs diff=lfs merge=lfs -text
624
+ full_scale_2fps_max384/4be2455a-6719-45d4-85c1-86de5bebdd44.mp4 filter=lfs diff=lfs merge=lfs -text
625
+ full_scale_2fps_max384/51378d03-1fb8-481f-8e40-fb947567d25d.mp4 filter=lfs diff=lfs merge=lfs -text
626
+ full_scale_2fps_max384/434a5651-9901-4b32-b782-d9a2bb52e805.mp4 filter=lfs diff=lfs merge=lfs -text
627
+ full_scale_2fps_max384/444a032d-faef-447e-a5dd-c3c8386672b6.mp4 filter=lfs diff=lfs merge=lfs -text
628
+ full_scale_2fps_max384/5093842e-7cf9-4509-bf04-6e0ec6b75b27.mp4 filter=lfs diff=lfs merge=lfs -text
629
+ full_scale_2fps_max384/4ce9f93b-f44c-46fe-9050-3452d2f39abb.mp4 filter=lfs diff=lfs merge=lfs -text
630
+ full_scale_2fps_max384/4c45d8f4-e026-4ca3-a1e4-6d32da434469.mp4 filter=lfs diff=lfs merge=lfs -text
631
+ full_scale_2fps_max384/60209262-ab8e-4214-a8dd-4764a9a4b26d.mp4 filter=lfs diff=lfs merge=lfs -text
632
+ full_scale_2fps_max384/4a58fe42-9ebe-4dfa-a103-e577a151a3f2.mp4 filter=lfs diff=lfs merge=lfs -text
633
+ full_scale_2fps_max384/3d3cddf9-7c61-43af-8b1a-46aa78983a5a.mp4 filter=lfs diff=lfs merge=lfs -text
634
+ full_scale_2fps_max384/0ca23a40-6daf-4503-bfa2-f315a79b7317.mp4 filter=lfs diff=lfs merge=lfs -text
635
+ full_scale_2fps_max384/484d4760-a55f-4f58-be67-e98d3920f1d3.mp4 filter=lfs diff=lfs merge=lfs -text
636
+ full_scale_2fps_max384/5a10fa29-fd9d-4d5f-baf3-d1bdffb77e4a.mp4 filter=lfs diff=lfs merge=lfs -text
637
+ full_scale_2fps_max384/4d770048-cfde-411f-ad53-e4d57a7dd0d1.mp4 filter=lfs diff=lfs merge=lfs -text
638
+ full_scale_2fps_max384/4c6f009a-f0ca-4557-8723-4111761e0a9a.mp4 filter=lfs diff=lfs merge=lfs -text
639
+ full_scale_2fps_max384/5c172db2-eca2-4f9a-8963-b2111123fef2.mp4 filter=lfs diff=lfs merge=lfs -text
640
+ full_scale_2fps_max384/5825062f-4827-407c-bbd3-91ca010d25c2.mp4 filter=lfs diff=lfs merge=lfs -text
641
+ full_scale_2fps_max384/5aa8f84b-aea8-4e40-b710-82b93875e1ae.mp4 filter=lfs diff=lfs merge=lfs -text
642
+ full_scale_2fps_max384/5f9cb028-fa02-47ef-845f-3b149a527e46.mp4 filter=lfs diff=lfs merge=lfs -text
643
+ full_scale_2fps_max384/603a427f-9191-4ca4-a1b0-dd3c5e7fda70.mp4 filter=lfs diff=lfs merge=lfs -text
644
+ full_scale_2fps_max384/6322a6f2-f271-4335-bf46-d428a5a58298.mp4 filter=lfs diff=lfs merge=lfs -text
645
+ full_scale_2fps_max384/63f0f27d-645c-4efc-9162-6f0cb0353bc8.mp4 filter=lfs diff=lfs merge=lfs -text
646
+ full_scale_2fps_max384/62623424-f671-4380-b12c-1acdb04afeee.mp4 filter=lfs diff=lfs merge=lfs -text
647
+ full_scale_2fps_max384/680e61d7-86e0-47cd-a6e1-83868aaaf3f0.mp4 filter=lfs diff=lfs merge=lfs -text
648
+ full_scale_2fps_max384/6af04762-9ccf-41aa-bbfb-48a443c7cec3.mp4 filter=lfs diff=lfs merge=lfs -text
649
+ full_scale_2fps_max384/707fb659-9f76-482b-b83b-e0bc1f090cf7.mp4 filter=lfs diff=lfs merge=lfs -text
650
+ full_scale_2fps_max384/50e55c8f-b939-4b5c-86b5-e0b53d199d27.mp4 filter=lfs diff=lfs merge=lfs -text
651
+ full_scale_2fps_max384/6d3939f5-5636-43c7-94bb-86318392bd2d.mp4 filter=lfs diff=lfs merge=lfs -text
652
+ full_scale_2fps_max384/543ac6fc-9f77-4cdd-b7fe-6721cb037916.mp4 filter=lfs diff=lfs merge=lfs -text
653
+ full_scale_2fps_max384/6d20e1a8-bb8e-49ff-aa5d-ec861e7f5432.mp4 filter=lfs diff=lfs merge=lfs -text
654
+ full_scale_2fps_max384/77362fa3-e23a-4942-bd53-7d58b23b979b.mp4 filter=lfs diff=lfs merge=lfs -text
655
+ full_scale_2fps_max384/74d38b4a-6f22-43b3-b631-9234c05f27ab.mp4 filter=lfs diff=lfs merge=lfs -text
656
+ full_scale_2fps_max384/67bc312a-3b49-4847-aa79-63ad7d3e4217.mp4 filter=lfs diff=lfs merge=lfs -text
657
+ full_scale_2fps_max384/74e37c3b-9b5d-41ef-a806-61000cb6857c.mp4 filter=lfs diff=lfs merge=lfs -text
658
+ full_scale_2fps_max384/74dbf8d0-b10d-4993-831e-7618801d2d95.mp4 filter=lfs diff=lfs merge=lfs -text
659
+ full_scale_2fps_max384/50c1df1c-e016-472a-b0ac-d297b0c82e87.mp4 filter=lfs diff=lfs merge=lfs -text
660
+ full_scale_2fps_max384/623bc0af-7d4f-4b85-9263-2d04e016283d.mp4 filter=lfs diff=lfs merge=lfs -text
661
+ full_scale_2fps_max384/478d7fa4-b174-4266-b0ff-bd180ba0b806.mp4 filter=lfs diff=lfs merge=lfs -text
662
+ full_scale_2fps_max384/28bc1ee7-b0c1-4f30-934a-0ab665779d90.mp4 filter=lfs diff=lfs merge=lfs -text
663
+ full_scale_2fps_max384/04ac4c40-22fd-42aa-a7f0-ee597ffb7058.mp4 filter=lfs diff=lfs merge=lfs -text
664
+ full_scale_2fps_max384/2876b375-e848-412c-8a6f-0664cbab6a33.mp4 filter=lfs diff=lfs merge=lfs -text
665
+ full_scale_2fps_max384/2c0c6508-397f-4c48-aeb7-abc7a3cae8d1.mp4 filter=lfs diff=lfs merge=lfs -text
666
+ full_scale_2fps_max384/1c2da42f-8b03-4643-bbd2-f29e2fc86bf6.mp4 filter=lfs diff=lfs merge=lfs -text
667
+ full_scale_2fps_max384/5e64c89d-34ce-49a3-949a-dc90385cdbcc.mp4 filter=lfs diff=lfs merge=lfs -text
668
+ full_scale_2fps_max384/775acd8e-086f-48cf-adf3-c154f0d0bd2d.mp4 filter=lfs diff=lfs merge=lfs -text
669
+ full_scale_2fps_max384/2c6e772c-4681-44a1-966c-dd84c65183ad.mp4 filter=lfs diff=lfs merge=lfs -text
670
+ full_scale_2fps_max384/2c56528c-5499-4307-bfd1-cbb2d26b1d8b.mp4 filter=lfs diff=lfs merge=lfs -text
671
+ full_scale_2fps_max384/2a2ff7db-5460-4296-a8a7-946ba628226d.mp4 filter=lfs diff=lfs merge=lfs -text
672
+ full_scale_2fps_max384/053ae0a6-937d-4bdc-8001-d3dfe7899811.mp4 filter=lfs diff=lfs merge=lfs -text
673
+ full_scale_2fps_max384/3bd5bf35-d6ac-43b2-ab75-1558a37c8550.mp4 filter=lfs diff=lfs merge=lfs -text
674
+ full_scale_2fps_max384/7af460ca-08ba-4a4e-92d2-5f3e612e3642.mp4 filter=lfs diff=lfs merge=lfs -text
675
+ full_scale_2fps_max384/7bca86ab-7327-4a38-8d2f-0a668da52f5d.mp4 filter=lfs diff=lfs merge=lfs -text
676
+ full_scale_2fps_max384/892a25e2-df0c-4577-af57-b720e73654f6.mp4 filter=lfs diff=lfs merge=lfs -text
677
+ full_scale_2fps_max384/7ed8365c-37f5-4033-978a-2d3c1faa0e33.mp4 filter=lfs diff=lfs merge=lfs -text
678
+ full_scale_2fps_max384/2dbdd409-0be0-447c-8ae8-3f107fe9af80.mp4 filter=lfs diff=lfs merge=lfs -text
679
+ full_scale_2fps_max384/2d442f12-3595-4b2d-b8c5-583b65c0c49f.mp4 filter=lfs diff=lfs merge=lfs -text
680
+ full_scale_2fps_max384/2f46d1e6-2a85-4d46-b955-10c2eded661c.mp4 filter=lfs diff=lfs merge=lfs -text
681
+ full_scale_2fps_max384/2f23b607-f2e6-4f58-85d3-004c840bead2.mp4 filter=lfs diff=lfs merge=lfs -text
682
+ full_scale_2fps_max384/2c365418-7598-48e2-a2c7-6c1885a351f4.mp4 filter=lfs diff=lfs merge=lfs -text
683
+ full_scale_2fps_max384/ae2cd423-6d11-4b18-959c-9e6ad872249f.mp4 filter=lfs diff=lfs merge=lfs -text
684
+ full_scale_2fps_max384/82935e33-3b6e-4a9c-91be-9887ac0beab4.mp4 filter=lfs diff=lfs merge=lfs -text
685
+ full_scale_2fps_max384/8a6a3316-d682-4a76-81db-b244081765c9.mp4 filter=lfs diff=lfs merge=lfs -text
686
+ full_scale_2fps_max384/2d1bf98e-a133-4f63-bc1d-91f718078a52.mp4 filter=lfs diff=lfs merge=lfs -text
687
+ full_scale_2fps_max384/33fced0b-4bf6-4989-9174-cda7bb51ecef.mp4 filter=lfs diff=lfs merge=lfs -text
688
+ full_scale_2fps_max384/36c093d9-dbff-4fdd-bbc4-5302ae13415a.mp4 filter=lfs diff=lfs merge=lfs -text
689
+ full_scale_2fps_max384/7fc72743-4b55-48de-b988-4c690bda912f.mp4 filter=lfs diff=lfs merge=lfs -text
690
+ full_scale_2fps_max384/89af735b-7c13-4366-befe-efd6cd5402ff.mp4 filter=lfs diff=lfs merge=lfs -text
691
+ full_scale_2fps_max384/80111886-6bab-4b16-aac6-1dfa42357d8b.mp4 filter=lfs diff=lfs merge=lfs -text
692
+ full_scale_2fps_max384/81fc547e-75af-48cf-91c0-9f77ee633e1d.mp4 filter=lfs diff=lfs merge=lfs -text
693
+ full_scale_2fps_max384/3817b065-3d46-44ff-b6a6-8c4d646165f2.mp4 filter=lfs diff=lfs merge=lfs -text
694
+ full_scale_2fps_max384/89da8b4f-0b3a-4fba-8c77-58633d8b699b.mp4 filter=lfs diff=lfs merge=lfs -text
695
+ full_scale_2fps_max384/28151b38-7249-4c73-80b2-55c4b2046eba.mp4 filter=lfs diff=lfs merge=lfs -text
696
+ full_scale_2fps_max384/84ccc9f7-8e02-4bfd-bd44-1658814df7cc.mp4 filter=lfs diff=lfs merge=lfs -text
697
+ full_scale_2fps_max384/368e4468-1f75-4cdb-a9d0-9b8982a3cbef.mp4 filter=lfs diff=lfs merge=lfs -text
698
+ full_scale_2fps_max384/89a95cd8-ca13-4edf-807e-d62138fd30e0.mp4 filter=lfs diff=lfs merge=lfs -text
699
+ full_scale_2fps_max384/272e8bcd-32a8-48ce-a9c4-a5f0a4f15145.mp4 filter=lfs diff=lfs merge=lfs -text
700
+ full_scale_2fps_max384/8cda2388-6f45-46d4-8ded-4ba2e23b309f.mp4 filter=lfs diff=lfs merge=lfs -text
701
+ full_scale_2fps_max384/2e04b2bb-e5d3-4f66-a16e-f7756e14e749.mp4 filter=lfs diff=lfs merge=lfs -text
702
+ full_scale_2fps_max384/85665a8d-6683-4aa0-bff8-3cd5e752c8f2.mp4 filter=lfs diff=lfs merge=lfs -text
703
+ full_scale_2fps_max384/3837916b-c0e8-4359-8a73-524d40660731.mp4 filter=lfs diff=lfs merge=lfs -text
704
+ full_scale_2fps_max384/864fa3d8-9b18-44cb-a8e9-9b40765e2d0c.mp4 filter=lfs diff=lfs merge=lfs -text
705
+ full_scale_2fps_max384/78d7751d-36b9-473b-a1fb-9c14debbf61e.mp4 filter=lfs diff=lfs merge=lfs -text
706
+ full_scale_2fps_max384/7ddbf8a2-5b3e-44cd-b9dc-db17ec06831b.mp4 filter=lfs diff=lfs merge=lfs -text
707
+ full_scale_2fps_max384/8b82316c-c293-49a7-b401-a3cc026ab1d0.mp4 filter=lfs diff=lfs merge=lfs -text
708
+ full_scale_2fps_max384/5315437a-2530-47e5-a0e4-4ac656c7cfa4.mp4 filter=lfs diff=lfs merge=lfs -text
709
+ full_scale_2fps_max384/7913c8a8-1f99-4c2d-8a5d-9a5793162447.mp4 filter=lfs diff=lfs merge=lfs -text
710
+ full_scale_2fps_max384/9531cee8-0f60-4a68-8243-397ab7670a75.mp4 filter=lfs diff=lfs merge=lfs -text
711
+ full_scale_2fps_max384/3a481a46-928f-46fd-87e3-7725c5bcbbc9.mp4 filter=lfs diff=lfs merge=lfs -text
712
+ full_scale_2fps_max384/8e84c762-b0f9-4a7d-982e-010f27999435.mp4 filter=lfs diff=lfs merge=lfs -text
713
+ full_scale_2fps_max384/3af4cfae-ba88-42f8-ba7e-e832034dccba.mp4 filter=lfs diff=lfs merge=lfs -text
714
+ full_scale_2fps_max384/712ef11e-dc19-4fe1-ae66-0d9ec934196e.mp4 filter=lfs diff=lfs merge=lfs -text
715
+ full_scale_2fps_max384/3dd00666-a34d-4b45-bb4c-7a4c2342e475.mp4 filter=lfs diff=lfs merge=lfs -text
716
+ full_scale_2fps_max384/9089e01e-c28e-4767-9478-ffd7daba9b79.mp4 filter=lfs diff=lfs merge=lfs -text
717
+ full_scale_2fps_max384/869a2290-5509-49d0-8cc6-e0a0230790d3.mp4 filter=lfs diff=lfs merge=lfs -text
718
+ full_scale_2fps_max384/972f660f-27ad-49ae-bf00-8da9d6d8d708.mp4 filter=lfs diff=lfs merge=lfs -text
719
+ full_scale_2fps_max384/95836c4a-e856-4c04-9c68-800b154b6bc6.mp4 filter=lfs diff=lfs merge=lfs -text
720
+ full_scale_2fps_max384/9adc0c59-1167-4311-abf3-7b57a0743b1d.mp4 filter=lfs diff=lfs merge=lfs -text
721
+ full_scale_2fps_max384/7c969dc3-aea2-49d4-bd0e-48191b4e5a51.mp4 filter=lfs diff=lfs merge=lfs -text
722
+ full_scale_2fps_max384/90e4acc5-28a2-4bd4-972c-c6f7e18e1cde.mp4 filter=lfs diff=lfs merge=lfs -text
723
+ full_scale_2fps_max384/9b9364bc-a51c-43a1-a82e-628c4078d5c2.mp4 filter=lfs diff=lfs merge=lfs -text
724
+ full_scale_2fps_max384/2b21d0f3-cb6b-4b5e-a418-fbe2dd69e023.mp4 filter=lfs diff=lfs merge=lfs -text
725
+ full_scale_2fps_max384/8fe9742a-355f-4503-9652-0a9ebaf0ed42.mp4 filter=lfs diff=lfs merge=lfs -text
726
+ full_scale_2fps_max384/98dc31a1-edbb-42d6-8d1c-0b43fe6f9c28.mp4 filter=lfs diff=lfs merge=lfs -text
727
+ full_scale_2fps_max384/41309ada-0a81-47b2-bdd1-453a178db18a.mp4 filter=lfs diff=lfs merge=lfs -text
728
+ full_scale_2fps_max384/9e7fa594-5774-4681-a034-50a5c83cff8f.mp4 filter=lfs diff=lfs merge=lfs -text
729
+ full_scale_2fps_max384/a13a145f-920a-44ec-8aef-b489c097f4a7.mp4 filter=lfs diff=lfs merge=lfs -text
730
+ full_scale_2fps_max384/4220db3a-4708-4fd1-ab67-0ef1586906cd.mp4 filter=lfs diff=lfs merge=lfs -text
731
+ full_scale_2fps_max384/960b820b-1807-4219-8e74-94c4d00abc41.mp4 filter=lfs diff=lfs merge=lfs -text
732
+ full_scale_2fps_max384/984fadaf-0d6e-4814-ac10-00f37c71322f.mp4 filter=lfs diff=lfs merge=lfs -text
733
+ full_scale_2fps_max384/35b10335-0485-4d1f-8fa2-d4aeab8a022c.mp4 filter=lfs diff=lfs merge=lfs -text
734
+ full_scale_2fps_max384/a4ec18ff-d6b4-4002-a5e7-83980534dc0e.mp4 filter=lfs diff=lfs merge=lfs -text
735
+ full_scale_2fps_max384/4335b0cc-e10c-45df-aad3-a1fa66b7a7a8.mp4 filter=lfs diff=lfs merge=lfs -text
736
+ full_scale_2fps_max384/863011c8-ebf7-4900-87d1-3c7930f95dab.mp4 filter=lfs diff=lfs merge=lfs -text
737
+ full_scale_2fps_max384/a51026e3-4163-46ba-8d54-766fa8a17a7a.mp4 filter=lfs diff=lfs merge=lfs -text
738
+ full_scale_2fps_max384/a5bf18b5-15d4-4843-8269-3f31154e3736.mp4 filter=lfs diff=lfs merge=lfs -text
739
+ full_scale_2fps_max384/42476ba0-f357-4bc2-ae65-8d2d92fd3faa.mp4 filter=lfs diff=lfs merge=lfs -text
740
+ full_scale_2fps_max384/a7a31f23-8348-405e-a01a-cad63010bcc7.mp4 filter=lfs diff=lfs merge=lfs -text
741
+ full_scale_2fps_max384/aa14c20d-cadf-4393-935f-52175427539d.mp4 filter=lfs diff=lfs merge=lfs -text
742
+ full_scale_2fps_max384/97c1c805-291f-4b2f-bef9-0495d9d2fc92.mp4 filter=lfs diff=lfs merge=lfs -text
743
+ full_scale_2fps_max384/9e08a7b1-eb04-4d11-bb0b-f4cd42f0a819.mp4 filter=lfs diff=lfs merge=lfs -text
744
+ full_scale_2fps_max384/4194c6fa-04c4-4206-a7a5-f3f9d6cb2cec.mp4 filter=lfs diff=lfs merge=lfs -text
745
+ full_scale_2fps_max384/a9463083-1f17-40e2-bb71-cf4f16c4a54d.mp4 filter=lfs diff=lfs merge=lfs -text
746
+ full_scale_2fps_max384/ab865129-78fa-47d4-8a50-ff8c5533246f.mp4 filter=lfs diff=lfs merge=lfs -text
747
+ full_scale_2fps_max384/97eaf9ae-9b92-493d-a555-4fcdb7c039a1.mp4 filter=lfs diff=lfs merge=lfs -text
748
+ full_scale_2fps_max384/a8486c50-4576-41e3-a9ad-ee809e40c648.mp4 filter=lfs diff=lfs merge=lfs -text
749
+ full_scale_2fps_max384/39011e23-5fe4-41cd-b146-f3f9e3f3941a.mp4 filter=lfs diff=lfs merge=lfs -text
750
+ full_scale_2fps_max384/aa6d234e-174f-4521-a404-03335c89f06b.mp4 filter=lfs diff=lfs merge=lfs -text
751
+ full_scale_2fps_max384/ae11b3ee-0ef5-4ff8-90f6-c5fd3fef8072.mp4 filter=lfs diff=lfs merge=lfs -text
752
+ full_scale_2fps_max384/a6213dda-1620-4d74-a2bf-5576b0c95e41.mp4 filter=lfs diff=lfs merge=lfs -text
753
+ full_scale_2fps_max384/grp-0ee8d0ef-c5ba-42c5-9c40-8718faa081ec.mp4 filter=lfs diff=lfs merge=lfs -text
754
+ full_scale_2fps_max384/3a158275-c4c2-4bde-a186-788341e43ed4.mp4 filter=lfs diff=lfs merge=lfs -text
ESTP-Bench/.mdl ADDED
Binary file (42 Bytes). View file
 
ESTP-Bench/.msc ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e8c837d173da95685529158bc8b355b67d862a0f005f1e4fde528e532a7c0297
3
+ size 125749
ESTP-Bench/.mv ADDED
@@ -0,0 +1 @@
 
 
1
+ master
ESTP-Bench/estp_dataset/__pycache__/livechat.cpython-310.pyc ADDED
Binary file (11.1 kB). View file
 
ESTP-Bench/estp_dataset/benchmark/__init__.py ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ from .benchmark import *
2
+ from .estp import *
ESTP-Bench/estp_dataset/benchmark/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (196 Bytes). View file
 
ESTP-Bench/estp_dataset/benchmark/__pycache__/benchmark.cpython-310.pyc ADDED
Binary file (155 Bytes). View file
 
ESTP-Bench/estp_dataset/benchmark/__pycache__/estp.cpython-310.pyc ADDED
Binary file (14.7 kB). View file
 
ESTP-Bench/estp_dataset/benchmark/benchmark.py ADDED
File without changes
ESTP-Bench/estp_dataset/benchmark/check_error_data.ipynb ADDED
@@ -0,0 +1,97 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": 6,
6
+ "metadata": {},
7
+ "outputs": [
8
+ {
9
+ "name": "stdout",
10
+ "output_type": "stream",
11
+ "text": [
12
+ "{'video_uid': 'd20f6551-0272-4a69-8b02-da7ffcf29e0f', 'conversation': [{'role': 'user', 'content': 'What should I do next to continue making a recipe after completing the earlier tasks: slice leek, heating a skillet, prepare soapy mixture in a bowl, peel garlic? Please provide clear instructions.', 'time': 434.8484}, {'role': 'assistant', 'content': 'First, add more water to the soapy mixture in the bowl to ensure it’s ready for cleaning.', 'time': 435.57229, 'start_time': 434.8484, 'end_time': 435.57229, 'stamp_time': 435.210345}, {'role': 'assistant', 'content': 'Next, dice the peeled garlic finely for better flavor distribution in your dish.', 'time': 445.22206, 'start_time': 435.57229, 'end_time': 445.22206, 'stamp_time': 440.397175}, {'role': 'assistant', 'content': 'Take a moment to check the recipe in the manual to confirm the next steps.', 'time': 839.977, 'start_time': 449.299, 'end_time': 839.977, 'stamp_time': 644.6379999999999}, {'role': 'assistant', 'content': 'Before proceeding, wash your hands thoroughly to maintain hygiene.', 'time': 860.10599, 'start_time': 840.78309, 'end_time': 860.10599, 'stamp_time': 850.44454}, {'role': 'assistant', 'content': 'Now, prepare the stock that will be used as a base for your soup.', 'time': 875.37843, 'start_time': 860.42941, 'end_time': 875.37843, 'stamp_time': 867.90392}, {'role': 'assistant', 'content': 'Add the prepared stock to the sliced leek in the skillet to start cooking the soup.', 'time': 987.484, 'start_time': 875.74605, 'end_time': 987.484, 'stamp_time': 931.6150250000001}, {'role': 'assistant', 'content': 'Clear the countertop to create more workspace and keep the area tidy.', 'time': 996.95592, 'start_time': 988.81578, 'end_time': 996.95592, 'stamp_time': 992.88585}, {'role': 'assistant', 'content': 'Wash the knife in the soapy mixture to clean it after use.', 'time': 1033.83901, 'start_time': 1006.96205, 'end_time': 1033.83901, 'stamp_time': 1020.4005299999999}, {'role': 'assistant', 'content': 'Arrange the plates into the cupboard to keep them ready for serving.', 'time': 1088.19204, 'start_time': 1068.89759, 'end_time': 1088.19204, 'stamp_time': 1078.544815}, {'role': 'assistant', 'content': 'Wash the kitchen utensils in the soapy mixture to ensure they’re clean and ready for use.', 'time': 1108.97613, 'start_time': 1095.386, 'end_time': 1108.97613, 'stamp_time': 1102.181065}, {'role': 'assistant', 'content': 'Finally, stir the potato leek soup in the skillet to combine all the ingredients evenly.', 'time': 1289.00491, 'start_time': 1109.72215, 'end_time': 1289.00491, 'stamp_time': 1199.36353}], 'duration': 867.9392953125, 'start_time': 434.8484, 'end_time': 1302.7876953125, 'Task Type': 'Action Reasoning', 'evaluator_output_text': [[], [], [], [], [], [], [], [], [], [], []], 'evaluator_output_reponse': [[], [], [], [], [], [], [], [], [], [], []]}\n",
13
+ "{'video_uid': 'grp-a9c519a7-4776-42d6-bcf1-270f0d302843', 'conversation': [{'role': 'user', 'content': 'What is the next thing I need to do to make pancakes after the steps already completed: gathers utensils on countertop, pours flour in bowl, stores flour in cabinet, removes ingredients from fridge, prepares flour mixture in bowl, turns on cooking gas, prepares flour mixture in bowl, inspects flour pack, heats butter in pan, prepares pancake in pan? Include any important details.', 'time': 381.307}, {'role': 'assistant', 'content': 'Next, fetch water in a bowl to add to your pancake mixture. This will help achieve the right consistency for the batter.', 'time': 382.23681, 'start_time': 381.307, 'end_time': 382.23681, 'stamp_time': 381.771905}, {'role': 'assistant', 'content': 'Now, inspect the pancake in the pan to ensure it’s cooking evenly and not burning. Adjust the heat if necessary.', 'time': 391.83298, 'start_time': 382.23681, 'end_time': 391.83298, 'stamp_time': 387.034895}, {'role': 'assistant', 'content': 'Remove any sauce or toppings from the fridge that you plan to serve with the pancakes.', 'time': 413.87677999999994, 'start_time': 393.33784, 'end_time': 413.87677999999994, 'stamp_time': 403.60731}, {'role': 'assistant', 'content': 'Declutter the kitchen countertop to create more space for plating and serving the pancakes.', 'time': 422.56539499999997, 'start_time': 414.07678, 'end_time': 422.56539499999997, 'stamp_time': 418.3210875}, {'role': 'assistant', 'content': 'Inspect the pancake in the pan again to check if it’s ready to flip or remove from the heat.', 'time': 446.946, 'start_time': 422.765395, 'end_time': 446.946, 'stamp_time': 434.8556975}, {'role': 'assistant', 'content': 'Clean a plate with a cloth to ensure it’s ready for serving the pancakes.', 'time': 503.40671499999996, 'start_time': 447.71982, 'end_time': 503.40671499999996, 'stamp_time': 475.5632675}, {'role': 'assistant', 'content': 'Prepare the next pancake in the pan, ensuring the batter is evenly spread for consistent cooking.', 'time': 508.35909, 'start_time': 503.606715, 'end_time': 508.35909, 'stamp_time': 505.9829025}, {'role': 'assistant', 'content': 'Wash any used utensils in the sink to keep your workspace clean and organized.', 'time': 592.967, 'start_time': 509.93854, 'end_time': 592.967, 'stamp_time': 551.45277}, {'role': 'assistant', 'content': 'Clean another plate with a cloth if needed, ensuring all serving dishes are ready.', 'time': 651.607, 'start_time': 592.96796, 'end_time': 651.607, 'stamp_time': 622.28748}, {'role': 'assistant', 'content': 'Prepare the next pancake in the pan, maintaining a steady heat for even cooking.', 'time': 659.0125, 'start_time': 654.49488, 'end_time': 659.0125, 'stamp_time': 656.75369}, {'role': 'assistant', 'content': 'Serve the pancake on a plate, arranging it neatly for presentation.', 'time': 742.45, 'start_time': 663.88021, 'end_time': 742.45, 'stamp_time': 703.165105}, {'role': 'assistant', 'content': 'Turn off the cooking gas to ensure safety once all pancakes are prepared.', 'time': 749.7833850000001, 'start_time': 742.5897, 'end_time': 749.7833850000001, 'stamp_time': 746.1865425000001}, {'role': 'assistant', 'content': 'Prepare the final pancake on the plate, adding any toppings or sauces as desired.', 'time': 752.07794, 'start_time': 749.9833850000001, 'end_time': 752.07794, 'stamp_time': 751.0306625000001}, {'role': 'assistant', 'content': 'Store any unused ingredients back in the fridge to keep them fresh for later use.', 'time': 855.817, 'start_time': 761.51526, 'end_time': 855.817, 'stamp_time': 808.6661300000001}, {'role': 'assistant', 'content': 'Cover the pancake with a lid to keep it warm while you finish up other tasks.', 'time': 869.93716, 'start_time': 857.18827, 'end_time': 869.93716, 'stamp_time': 863.562715}, {'role': 'assistant', 'content': 'Declutter the kitchen countertop once more to ensure a clean and tidy workspace.', 'time': 885.138505, 'start_time': 881.68724, 'end_time': 885.138505, 'stamp_time': 883.4128725}, {'role': 'assistant', 'content': 'Check the pancake on the plate to ensure it’s ready to serve and looks appetizing.', 'time': 1196.997, 'start_time': 885.338505, 'end_time': 1196.997, 'stamp_time': 1041.1677525}], 'duration': 831.6016588541665, 'start_time': 381.307, 'end_time': 1212.9086588541666, 'Task Type': 'Action Reasoning', 'evaluator_output_text': [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], []], 'evaluator_output_reponse': [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], []]}\n",
14
+ "{'video_uid': 'eb1b6e62-7197-4a75-bc39-c373e558fd97', 'conversation': [{'role': 'user', 'content': 'What are the sequential actions I need to take to accomplish my goal, like making dough balls?', 'time': 0.0}, {'role': 'assistant', 'content': 'To start, shape the dough into a ball by gently rolling it between your hands.', 'time': 29.20309, 'start_time': 0.0, 'end_time': 29.20309, 'stamp_time': 14.601545}, {'role': 'assistant', 'content': 'Continue shaping the dough into a smooth, round ball to ensure even cooking and texture.', 'time': 671.09652, 'start_time': 29.20309, 'end_time': 671.09652, 'stamp_time': 350.149805}], 'duration': 848.4666666666667, 'start_time': 0.0, 'end_time': 848.4666666666667, 'Task Type': 'Task Understanding', 'evaluator_output_text': [[], []], 'evaluator_output_reponse': [[], []]}\n",
15
+ "1212\n",
16
+ "1212\n"
17
+ ]
18
+ }
19
+ ],
20
+ "source": [
21
+ "import json,os\n",
22
+ "import numpy as np\n",
23
+ "\n",
24
+ "def load_multiple_json(file_path):\n",
25
+ " \"\"\"读取包含多个 JSON 对象的文件,并将每个 JSON 对象解析成 Python 对象,存放在列表中。\"\"\"\n",
26
+ " with open(file_path, 'r', encoding='utf-8') as f:\n",
27
+ " content = f.read()\n",
28
+ " \n",
29
+ " decoder = json.JSONDecoder()\n",
30
+ " pos = 0\n",
31
+ " results = []\n",
32
+ " content_length = len(content)\n",
33
+ " \n",
34
+ " while pos < content_length:\n",
35
+ " # 跳过空白字符\n",
36
+ " while pos < content_length and content[pos].isspace():\n",
37
+ " pos += 1\n",
38
+ " if pos >= content_length:\n",
39
+ " break\n",
40
+ " try:\n",
41
+ " obj, new_pos = decoder.raw_decode(content, pos)\n",
42
+ " results.append(obj)\n",
43
+ " pos = new_pos\n",
44
+ " except json.JSONDecodeError as e:\n",
45
+ " # 出现解析错误则退出循环\n",
46
+ " print(f\"JSON 解析错误: {e}\")\n",
47
+ " break\n",
48
+ " return results\n",
49
+ "\n",
50
+ "\n",
51
+ "eval_file1 = '/root/videollm-online/data/estp_dataset/result/estpBenchSq_fbf_EWOInDomainITstage2evaluator_llama_5_5.json'\n",
52
+ "\n",
53
+ "parent_dir = os.path.dirname(eval_file1)\n",
54
+ "eval_files = [os.path.join(parent_dir, f) for f in os.listdir(parent_dir) if f.startswith(eval_file1.split('/')[-1])]\n",
55
+ "eval_result1 = []\n",
56
+ "for eval_file in eval_files:\n",
57
+ " eval_result1 += load_multiple_json(eval_file)\n",
58
+ " \n",
59
+ " \n",
60
+ "eval_file2 = '/root/videollm-online/data/estp_dataset/result/estp_bench_sq_VideollmOnlineevaluator_llama_5_5.json'\n",
61
+ "\n",
62
+ "parent_dir = os.path.dirname(eval_file2)\n",
63
+ "eval_files = [os.path.join(parent_dir, f) for f in os.listdir(parent_dir) if f.startswith(eval_file2.split('/')[-1])]\n",
64
+ "eval_result2 = []\n",
65
+ "for eval_file in eval_files:\n",
66
+ " eval_result2 += load_multiple_json(eval_file)\n",
67
+ " \n",
68
+ "for result in eval_result1:\n",
69
+ " if 'EWO' not in result:\n",
70
+ " print(result)\n",
71
+ "print(len(eval_result1))\n",
72
+ "print(len(eval_result2))\n"
73
+ ]
74
+ }
75
+ ],
76
+ "metadata": {
77
+ "kernelspec": {
78
+ "display_name": "videollm",
79
+ "language": "python",
80
+ "name": "python3"
81
+ },
82
+ "language_info": {
83
+ "codemirror_mode": {
84
+ "name": "ipython",
85
+ "version": 3
86
+ },
87
+ "file_extension": ".py",
88
+ "mimetype": "text/x-python",
89
+ "name": "python",
90
+ "nbconvert_exporter": "python",
91
+ "pygments_lexer": "ipython3",
92
+ "version": "3.10.14"
93
+ }
94
+ },
95
+ "nbformat": 4,
96
+ "nbformat_minor": 2
97
+ }
ESTP-Bench/estp_dataset/benchmark/estp.py ADDED
@@ -0,0 +1,607 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import tqdm
2
+ import os
3
+ import time
4
+ import json
5
+
6
+
7
+ def list_user_query(conversation):
8
+ user_query = []
9
+ query_time = []
10
+ for i in range(len(conversation)):
11
+ if conversation[i]['role'].lower() == 'user':
12
+ user_query.append(conversation[i]['content'])
13
+ query_time.append(conversation[i]['time'])
14
+ # Sort user queries and query times by time
15
+ if len(user_query) > 1:
16
+ # Create pairs of (query, time) and sort by time
17
+ query_time_pairs = list(zip(user_query, query_time))
18
+ query_time_pairs.sort(key=lambda x: x[1])
19
+
20
+ # Unpack the sorted pairs back into separate lists
21
+ user_query = [pair[0] for pair in query_time_pairs]
22
+ query_time = [pair[1] for pair in query_time_pairs]
23
+
24
+ return user_query, query_time
25
+
26
+ def print_json(json_data):
27
+ print(json.dumps(json_data, indent=4))
28
+
29
+ PROMPT_TEMPLATE_PROACTIVE = '''You are an advanced image question-answering AI assistant. You have been provided with image and a question related to the images. Your task is to carefully analyze the images and provide the answer to the question. You need to carefully confirm whether the images content meet the conditions of the question, and then output the correct content.
30
+
31
+ Question: {}
32
+
33
+ The answer is:
34
+ '''
35
+
36
+ PROMPT_TEMPLATE_PASSIVE = '''You are an advanced video AI assistant. Given a video and a question, carefully analyze each frame of the video, identify all relevant moments that help answer the question, and provide the corresponding frame numbers along with the answer.
37
+ The format should be: '[frame idx] answer'. For example, [6] The object is a cup.
38
+ [60] The object is a cup.
39
+ [100] The object is a yellow cup.
40
+
41
+ Question: {}
42
+
43
+ The answer is:
44
+ '''
45
+
46
+ PROMPT_TEMPLATE_PASSIVE_GROUNDING = '''
47
+ Question: {}
48
+
49
+ '''
50
+
51
+ ONLINE_MODEL = ['VideollmOnline', 'MMDuet', 'EWO']
52
+ GROUNDING_MODEL = ['TimeChat']
53
+ STREAMING_MODEL = ['EgoVLP', 'CLIP', 'Lavila']
54
+
55
+ class ESTP_singleQ_benchmark:
56
+ def __init__(self, data, config=None):
57
+ self.data = data
58
+ task2number = {
59
+ "Object State Change Recognition": 0,
60
+ "Ego Object State Change Recognition": 0,
61
+ "Object Localization": 0,
62
+ "Action Recognition": 0,
63
+ "Action Reasoning": 0,
64
+ "Object Recognition": 0,
65
+ "Ego Object Localization": 0,
66
+ "Object Function": 0,
67
+ "Task Understanding": 0,
68
+ "Attribute Perception": 0,
69
+ "Information Function": 0,
70
+ "Text-Rich Understanding": 0
71
+ }
72
+
73
+ for k,v in data.items():
74
+ for kk,vv in v.items():
75
+ for ll in vv:
76
+ task2number[ll['Task Type'].strip()] += 1
77
+
78
+ print_json(task2number)
79
+
80
+ self.config = config
81
+
82
+ def eval(self, data, model, output_path, eval_mode):
83
+ """
84
+ Evaluate the model on the given data and update the data with the model responses.
85
+ data: data input
86
+ model: model to evaluate
87
+ """
88
+ video_root = self.config.video_root
89
+ # Create output directory if it doesn't exist
90
+ os.makedirs(os.path.dirname(output_path), exist_ok=True)
91
+
92
+ for k,v in tqdm.tqdm(list(data.items())):
93
+ for kk,vv in v.items():
94
+ for q_idx, qa in enumerate(vv):
95
+ if model.name() in qa.keys():
96
+ continue
97
+ video_path = os.path.join(video_root, k + '.mp4')
98
+
99
+ for conv in qa['conversation']:
100
+ if 'time' not in conv.keys():
101
+ conv['time'] = conv['start_time'] + (conv['end_time'] - conv['start_time']) / 2
102
+ qa['conversation'] = sorted(qa['conversation'], key=lambda x: x['time'])
103
+
104
+ start_time = qa['clip_start_time'] if 'clip_start_time' in qa.keys() else qa['start_time']
105
+ end_time = qa['clip_end_time'] if 'clip_end_time' in qa.keys() else qa['end_time']
106
+ max_time = qa['conversation'][-1]['end_time']
107
+
108
+ if 'question' in qa.keys():
109
+ inp = qa['question']
110
+ query_time = qa['question_time'] if 'question_time' in qa.keys() else start_time # start_time, end_time, question_time is all the in all video scale
111
+ else:
112
+ inp, query_time = list_user_query(qa['conversation'])
113
+ inp = inp[0]
114
+ query_time = query_time[0]
115
+
116
+ start_time = min(start_time, query_time)
117
+ end_time = min(end_time, max_time)
118
+
119
+ # if eval_mode == "frame_by_frame":
120
+ # dialog_history = eval_fbf(model, video_path, inp, start_time, end_time, query_time)
121
+ # elif eval_mode == "passive_inference":
122
+ # dialog_history = eval_passive_inference(model, video_path, inp, start_time, end_time, query_time)
123
+ # else:
124
+ # raise ValueError(f"Invalid eval mode: {eval_mode}")
125
+
126
+ try:
127
+ if eval_mode == "frame_by_frame":
128
+ dialog_history = eval_fbf(model, video_path, inp, start_time, end_time, query_time)
129
+ elif eval_mode == "passive_inference":
130
+ dialog_history = eval_passive_inference(model, video_path, inp, start_time, end_time, query_time)
131
+ else:
132
+ raise ValueError(f"Invalid eval mode: {eval_mode}")
133
+ except Exception as e:
134
+ print(f"Error {e} in {k} {kk} {q_idx}")
135
+ continue
136
+ qa[model.name()] = dialog_history
137
+
138
+ json.dump(data, open(output_path, 'w'), indent=4)
139
+
140
+ def eval_passive_inference(model, video_path, question, start_time, end_time, query_time=0):
141
+ """
142
+ Evaluate the model by first feeding all video frames and then getting response
143
+
144
+ Args:
145
+ model: The model to be evaluated
146
+ video_path: The path to the video file
147
+ inp: The input question
148
+ start_time: The start time of the evaluation
149
+ end_time: The end time of the evaluation
150
+ query_time: The time when the question is asked
151
+
152
+ Returns:
153
+ dialog_history: List of conversation turns with timestamps
154
+ """
155
+
156
+ if model.name() in ONLINE_MODEL:
157
+ dialog_history = model.Run(video_path, question, start_time, end_time, query_time)
158
+ elif model.name() in STREAMING_MODEL:
159
+ dialog_history = model.Run(video_path, question, start_time, end_time, query_time)
160
+ elif model.name() in GROUNDING_MODEL:
161
+ raise NotImplementedError(f"Grounding model {model.name()} is not implemented")
162
+ else:
163
+ query = question
164
+ inp = PROMPT_TEMPLATE_PASSIVE.format(query)
165
+
166
+ real_start_time = time.time()
167
+ response, frame_number = model.Run(video_path, inp, start_time, end_time)
168
+ real_end_time = time.time()
169
+
170
+ frame_fps = frame_number / (end_time - start_time)
171
+
172
+ # 2. parse response
173
+ response_list = response.split('\n')
174
+ response_list = [i.strip() for i in response_list if i.strip()]
175
+ answer_pairs = []
176
+
177
+ for response in response_list:
178
+ if '[' in response and ']' in response:
179
+ frame_idx = response.split('[')[1].split(']')[0]
180
+ if ',' in frame_idx:
181
+ frame_idxs = [int(x.strip()) for x in frame_idx.split(',')]
182
+ elif '-' in frame_idx:
183
+ # Handle range format like "1-7"
184
+ start, end = map(int, frame_idx.split('-'))
185
+ frame_idxs = [int((start+end) / 2)]
186
+ else:
187
+ try:
188
+ frame_idxs = [int(frame_idx)]
189
+ except:
190
+ continue
191
+ answer = response.split('[')[1].split(']')[1].strip()
192
+ for frame_idx in frame_idxs:
193
+ answer_time = start_time + frame_idx / frame_fps
194
+ answer_pairs.append((answer_time, answer))
195
+
196
+ # 3. generate dialog history
197
+ dialog_history = [{
198
+ 'role': 'user',
199
+ 'content': question,
200
+ 'time': query_time,
201
+ 'fps': frame_number / (real_end_time - real_start_time),
202
+ 'cost': real_end_time - real_start_time
203
+ }]
204
+
205
+ for answer_time, answer in answer_pairs:
206
+ dialog_history.append({
207
+ 'role': 'assistant',
208
+ 'content': answer,
209
+ 'time': answer_time,
210
+ 'fps': frame_number / (real_end_time - real_start_time),
211
+ 'cost': real_end_time - real_start_time
212
+ })
213
+ return dialog_history
214
+
215
+ def eval_streaming(model, video_path, question, start_time, end_time, query_time=0):
216
+ """
217
+ Evaluate the model on the data streaming
218
+ """
219
+ # TODO: implement this
220
+ dialog_history = model.Run(video_path, question, start_time, end_time, query_time)
221
+ return dialog_history
222
+
223
+ def eval_fbf(model, video_path, question, start_time, end_time, query_time=0):
224
+ """
225
+ Evaluate the model on the data frame by frame
226
+
227
+ Args:
228
+ model: The model to be evaluated
229
+ video_path: The path to the video file
230
+ inp: The input data
231
+ start_time: The start time of the evaluation
232
+ end_time: The end time of the evaluation
233
+
234
+ Returns:
235
+ None
236
+ """
237
+
238
+ if model.name() in ONLINE_MODEL:
239
+ dialog_history = model.Run(video_path, question, start_time, end_time, query_time)
240
+ elif model.name() in STREAMING_MODEL:
241
+ raise NotImplementedError(f"Streaming model {model.name()} is not implemented")
242
+ elif model.name() in GROUNDING_MODEL:
243
+ raise NotImplementedError(f"Grounding model {model.name()} is not implemented")
244
+ else:
245
+ # query = f"Is it the right time to answer the question \"{inp}\"? You need to answer yes or no first, and if yes, please answer the question."
246
+ query = f"Is it the right time to answer the question \"{question}\"? You need to answer yes or no."
247
+ first_inp = PROMPT_TEMPLATE_PROACTIVE.format(query)
248
+
249
+ yes_query = f"Please answer the question: \"{question}\""
250
+ yes_inp = PROMPT_TEMPLATE_PROACTIVE.format(yes_query)
251
+
252
+
253
+ dialog_history = []
254
+ frame_fps = model.frame_fps # turn ask fps
255
+
256
+ current_time = min(start_time + 1 / frame_fps, end_time)
257
+ timecosts = []
258
+
259
+ while current_time <= end_time:
260
+ real_start_time = time.time()
261
+ response, frame_number = model.Run(video_path, first_inp, start_time, current_time)
262
+ real_end_time = time.time()
263
+ timecosts.append(real_end_time - real_start_time)
264
+
265
+ if 'yes' in response.lower():
266
+ real_start_time = time.time()
267
+ response, frame_number = model.Run(video_path, yes_inp, start_time, current_time)
268
+ real_end_time = time.time()
269
+ timecosts.append(real_end_time - real_start_time)
270
+
271
+ fps = (current_time-start_time) * frame_fps / sum(timecosts)
272
+
273
+ dialog_history.append({
274
+ 'role': 'user', 'content': query, 'time': current_time, 'fps': fps, 'cost': timecosts[-1]
275
+ })
276
+ dialog_history.append({
277
+ 'role': 'assistant', 'content': response, 'time': current_time, 'fps': fps, 'cost': timecosts[-1]
278
+ })
279
+ current_time += 1 / frame_fps
280
+ dialog_history.append({
281
+ 'role': 'fps', 'content': (end_time - start_time) / sum(timecosts),
282
+ })
283
+ return dialog_history
284
+
285
+
286
+ PROMPT_TEMPLATE_C_PROACTIVE = '''You are an advanced image question-answering AI assistant. You have been provided with image and a question related to the images. Your task is to carefully analyze the images, provided context and provided the answer to the question. You need to carefully confirm whether the images content meet the conditions of the question, and then output the correct content.
287
+
288
+ {}
289
+
290
+ Here is the question. Answer it and don't confuse it with the previous conversation.
291
+ Question: {}
292
+
293
+
294
+ The answer is:
295
+ '''
296
+
297
+ PROMPT_TEMPLATE_C_PASSIVE = '''You are an advanced video question-answering AI assistant. Given a video and a question related to the video, carefully analyze each frame of the video and the provided context, identify all relevant moments that help answer the question, and provide the corresponding frame numbers along with the answer.
298
+ The format should be: '[frame idx] answer'. For example, [6] The object is a cup.
299
+ [60] The object is a cup.
300
+ [100] The object is a yellow cup.
301
+
302
+ {}
303
+
304
+ Here is the question. Answer it and don't confuse it with the previous conversation.
305
+ Question: {}
306
+
307
+ '''
308
+
309
+ PROMPT_TEMPLATE_C_PASSIVE_GROUNDING = '''
310
+
311
+ {}
312
+
313
+ Here is the question. Answer it and don't confuse it with the previous conversation.
314
+ Question: {}
315
+
316
+ The answer is:
317
+ '''
318
+
319
+ class ESTP_contextualQ_benchmark:
320
+ def __init__(self, data, config=None):
321
+ self.data = data
322
+
323
+ task2number = {
324
+ "Object Relative Context": 0,
325
+ "Task Relative Context": 0,
326
+ }
327
+
328
+ for k,v in data.items():
329
+ for kk,vv in v.items():
330
+ for ll in vv:
331
+ task2number[ll['Task Type']] += 1
332
+
333
+ print_json(task2number)
334
+
335
+ self.config = config
336
+
337
+ def eval(self, data, model, output_path, eval_mode):
338
+ """
339
+ Evaluate the model on the given data and update the data with the model responses.
340
+ data: data input
341
+ model: model to evaluate
342
+ """
343
+ video_root = self.config.video_root
344
+ os.makedirs(os.path.dirname(output_path), exist_ok=True)
345
+
346
+ for k,v in tqdm.tqdm(data.items()):
347
+ for kk,vv in v.items():
348
+ for qa in vv:
349
+ if model.name() in qa.keys():
350
+ continue
351
+ video_path = os.path.join(video_root, k + '.mp4')
352
+
353
+
354
+ start_time = qa['clip_start_time'] if 'clip_start_time' in qa.keys() else qa['start_time']
355
+ end_time = qa['clip_end_time'] if 'clip_end_time' in qa.keys() else qa['end_time']
356
+
357
+ for conv in qa['conversation']:
358
+ if 'time' not in conv.keys():
359
+ conv['time'] = conv['start_time'] + (conv['end_time'] - conv['start_time']) / 2
360
+ qa['conversation'] = sorted(qa['conversation'], key=lambda x: x['time'])
361
+
362
+ max_time = qa['conversation'][-1]['end_time']
363
+
364
+ user_query, query_time = list_user_query(qa['conversation'])
365
+
366
+ start_time = min([start_time]+query_time)
367
+ end_time = min([end_time,max_time])
368
+
369
+ # if eval_mode == "frame_by_frame":
370
+ # dialog_history = eval_fbf_contextual(model, video_path, user_query, start_time, max_time, query_time)
371
+ # elif eval_mode == "passive_inference":
372
+ # dialog_history = eval_passive_inference_contextual(model, video_path, user_query, start_time, end_time, query_time)
373
+ # else:
374
+ # raise ValueError(f"Invalid eval mode: {eval_mode}")
375
+
376
+ try:
377
+ if eval_mode == "frame_by_frame":
378
+ dialog_history = eval_fbf_contextual(model, video_path, user_query, start_time, max_time, query_time)
379
+ elif eval_mode == "passive_inference":
380
+ dialog_history = eval_passive_inference_contextual(model, video_path, user_query, start_time, end_time, query_time)
381
+ else:
382
+ raise ValueError(f"Invalid eval mode: {eval_mode}")
383
+ except Exception as e:
384
+ print(e)
385
+ continue
386
+
387
+ qa[model.name()] = dialog_history
388
+
389
+ json.dump(data, open(output_path, 'w'), indent=4)
390
+
391
+
392
+ def dialog_history_to_context(dialog_history):
393
+ context = ""
394
+ for i in range(len(dialog_history)):
395
+ if dialog_history[i]['role'] == 'user':
396
+ context += f"At timestamp {dialog_history[i]['time']}, the following question occurred: {dialog_history[i]['content']} \n "
397
+ elif dialog_history[i]['role'] == 'assistant':
398
+ context += f"At timestamp {dialog_history[i]['time']}, the following answer occurred: {dialog_history[i]['content']} \n "
399
+ return context
400
+
401
+ def eval_fbf_contextual(model, video_path, questions, start_time, end_time, query_times=[]):
402
+ """
403
+ Evaluate the model on the data frame by frame
404
+
405
+ Args:
406
+ model: The model to be evaluated
407
+ video_path: The path to the video file
408
+ inp: The input data
409
+ start_time: The start time of the evaluation
410
+ end_time: The end time of the evaluation
411
+
412
+ Returns:
413
+ None
414
+ """
415
+ if model.name() in ONLINE_MODEL:
416
+ dialog_history = model.Run(video_path, questions, start_time, end_time, query_times)
417
+ elif model.name() in STREAMING_MODEL:
418
+ raise NotImplementedError(f"Streaming model {model.name()} is not implemented")
419
+ elif model.name() in GROUNDING_MODEL:
420
+ raise NotImplementedError(f"Grounding model {model.name()} is not implemented")
421
+ else:
422
+ assert isinstance(questions, list), "inp must be a list"
423
+
424
+ dialog_history = []
425
+ frame_fps = model.frame_fps
426
+ timecosts = []
427
+ context = ""
428
+
429
+ start_times = []
430
+ for question, query_time in zip(questions, query_times):
431
+ start_times.append(query_time)
432
+ end_times = start_times[1:] + [end_time]
433
+
434
+ for question, query_time, start_time, end_time in zip(questions, query_times, start_times, end_times):
435
+ # query = f"Is it the right time to answer the question \"{inp}\"? You need to answer yes or no first, and if yes, please answer the question."
436
+ query = f"Is it the right time to answer the question \"{question}\"? You need to answer yes or no."
437
+ first_inp = PROMPT_TEMPLATE_C_PROACTIVE.format(context, query)
438
+
439
+ yes_query = f"Please answer the question: \"{question}\""
440
+ yes_inp = PROMPT_TEMPLATE_C_PROACTIVE.format(context, yes_query)
441
+
442
+ current_time = min(start_time + 1 / frame_fps, end_time)
443
+
444
+ while current_time <= end_time:
445
+ real_start_time = time.time()
446
+ response, frame_number = model.Run(video_path, first_inp, start_time, current_time)
447
+ real_end_time = time.time()
448
+ timecosts.append(real_end_time - real_start_time)
449
+
450
+
451
+ if 'yes' in response.lower():
452
+ real_start_time = time.time()
453
+ response, frame_number = model.Run(video_path, yes_inp, start_time, current_time)
454
+ real_end_time = time.time()
455
+ timecosts.append(real_end_time - real_start_time)
456
+
457
+ fps = (current_time-start_time) * frame_fps / sum(timecosts)
458
+
459
+ dialog_history.append({
460
+ 'role': 'user', 'content': yes_query, 'time': current_time, 'fps': fps, 'cost': timecosts[-1]
461
+ })
462
+ dialog_history.append({
463
+ 'role': 'assistant', 'content': response, 'time': current_time, 'fps': fps, 'cost': timecosts[-1]
464
+ })
465
+ current_time += 1 / frame_fps
466
+
467
+ context = "Here are the contextual information related to the video. Please answer the questions based on the contextual information: "
468
+ context += dialog_history_to_context(dialog_history)
469
+ try:
470
+ dialog_history.append({
471
+ 'role': 'fps', 'content': (end_times[-1] - start_times[0]) / sum(timecosts),
472
+ })
473
+ except:
474
+ print(f"Error in {model.name()}")
475
+ print(f"start_times: {start_times}")
476
+ print(f"end_times: {end_times}")
477
+ print(f"timecosts: {timecosts}")
478
+ print(f"video_path: {video_path}")
479
+
480
+ return dialog_history
481
+
482
+ def eval_streaming_contextual(model, video_path, questions, start_time, end_time, query_times=[]):
483
+ """
484
+ Evaluate the model on the data streaming
485
+ """
486
+ # TODO: implement this
487
+ dialog_history = model.Run(video_path, questions, start_time, end_time, query_times)
488
+ return dialog_history
489
+
490
+
491
+ def eval_passive_inference_contextual(model, video_path, questions, start_time, end_time, query_times=[]):
492
+ """
493
+ Evaluate the model by first feeding all video frames and then getting response
494
+
495
+ Args:
496
+ model: The model to be evaluated
497
+ video_path: The path to the video file
498
+ inp: The input data
499
+ start_time: The start time of the evaluation
500
+ end_time: The end time of the evaluation
501
+ query_times: The times when the questions are asked
502
+
503
+ Returns:
504
+ None
505
+ """
506
+ if model.name() in ONLINE_MODEL:
507
+ dialog_history = model.Run(video_path, questions, start_time, end_time, query_times)
508
+ elif model.name() in STREAMING_MODEL:
509
+ assert isinstance(questions, list), "inp must be a list"
510
+
511
+ # 1. travel each question and get response
512
+ dialog_history = []
513
+ frame_fps = model.frame_fps
514
+
515
+ start_times = []
516
+ for question, query_time in zip(questions, query_times):
517
+ start_times.append(query_time)
518
+ end_times = start_times[1:] + [end_time]
519
+
520
+ for question, query_time, start_time, end_time in zip(questions, query_times, start_times, end_times):
521
+
522
+ # 1.1 get response
523
+ conversation = model.Run(video_path, question, start_time, end_time, query_time)
524
+ # 3. generate dialog history
525
+ dialog_history.extend(conversation)
526
+
527
+ elif model.name() in GROUNDING_MODEL:
528
+ raise NotImplementedError(f"Grounding model {model.name()} is not implemented")
529
+ else:
530
+ assert isinstance(questions, list), "inp must be a list"
531
+
532
+ # 1. travel each question and get response
533
+ dialog_history = []
534
+ frame_fps = model.frame_fps
535
+ context = ""
536
+
537
+ start_times = []
538
+ for question, query_time in zip(questions, query_times):
539
+ start_times.append(query_time)
540
+ end_times = start_times[1:] + [end_time]
541
+
542
+
543
+ for question, query_time, start_time, end_time in zip(questions, query_times, start_times, end_times):
544
+
545
+ if end_time <= start_time:
546
+ breakpoint()
547
+
548
+
549
+ query = question
550
+ inp = PROMPT_TEMPLATE_C_PASSIVE.format(context,query)
551
+
552
+ real_start_time = time.time()
553
+ response, frame_number = model.Run(video_path, inp, start_time, end_time)
554
+ real_end_time = time.time()
555
+
556
+ frame_fps = frame_number / (end_time - start_time)
557
+
558
+ # 2. parse response
559
+ response_list = response.split('\n')
560
+ response_list = [i.strip() for i in response_list if i.strip()]
561
+ answer_pairs = []
562
+
563
+ for response in response_list:
564
+ if '[' in response and ']' in response:
565
+ frame_idx = response.split('[')[1].split(']')[0]
566
+
567
+ try:
568
+ if ',' in frame_idx:
569
+ frame_idxs = [int(x.strip()) for x in frame_idx.split(',')]
570
+ elif '-' in frame_idx:
571
+ # Handle range format like "1-7"
572
+ start, end = map(int, frame_idx.split('-'))
573
+ frame_idxs = [float((start+end) / 2)]
574
+ else:
575
+ frame_idxs = [float(frame_idx)]
576
+ answer = response.split('[')[1].split(']')[1].strip()
577
+ for frame_idx in frame_idxs:
578
+ answer_time = start_time + frame_idx / frame_fps
579
+ answer_pairs.append((answer_time, answer))
580
+ except:
581
+ continue
582
+
583
+ # 3. generate dialog history
584
+ dialog_history.append({
585
+ 'role': 'user',
586
+ 'content': question,
587
+ 'time': query_time,
588
+ 'fps': frame_number / (real_end_time - real_start_time),
589
+ 'cost': real_end_time - real_start_time
590
+ })
591
+
592
+ for answer_time, answer in answer_pairs:
593
+ dialog_history.append({
594
+ 'role': 'assistant',
595
+ 'content': answer,
596
+ 'time': answer_time,
597
+ 'fps': frame_number / (real_end_time - real_start_time),
598
+ 'cost': real_end_time - real_start_time
599
+ })
600
+
601
+
602
+ context = "Here are the contextual information related to the video. Please answer the questions based on the contextual information: "
603
+ context += dialog_history_to_context(dialog_history)
604
+
605
+
606
+ return dialog_history
607
+
ESTP-Bench/estp_dataset/benchmark/eval.py ADDED
@@ -0,0 +1,428 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json,os
2
+ import numpy as np
3
+
4
+ def load_multiple_json(file_path):
5
+ """读取包含多个 JSON 对象的文件,并将每个 JSON 对象解析成 Python 对象,存放在列表中。"""
6
+ with open(file_path, 'r', encoding='utf-8') as f:
7
+ content = f.read()
8
+
9
+ decoder = json.JSONDecoder()
10
+ pos = 0
11
+ results = []
12
+ content_length = len(content)
13
+
14
+ while pos < content_length:
15
+ # 跳过空白字符
16
+ while pos < content_length and content[pos].isspace():
17
+ pos += 1
18
+ if pos >= content_length:
19
+ break
20
+ try:
21
+ obj, new_pos = decoder.raw_decode(content, pos)
22
+ results.append(obj)
23
+ pos = new_pos
24
+ except json.JSONDecodeError as e:
25
+ # 出现解析错误则退出循环
26
+ print(f"JSON 解析错误: {e}")
27
+ break
28
+ return results
29
+
30
+
31
+ import argparse
32
+
33
+ def parse_args():
34
+ parser = argparse.ArgumentParser()
35
+ parser.add_argument('--eval_file', type=str, default='')
36
+ parser.add_argument('--eval_mode', type=str, default='all')
37
+ return parser.parse_args()
38
+
39
+ args = parse_args()
40
+
41
+ # eval_file = '/root/videollm-online/data/estp_dataset/result/estp_bench_sq_MiniCPMV_passiveevaluator_llama_5_5.json' # tmp_predict_VideollmOnline_v2_correctness
42
+ eval_file = '/root/videollm-online/data/estp_dataset/result/estp_bench_sq_MiniCPMV_passiveevaluator_deepseek_1_2.json'
43
+ # eval_file = '/root/videollm-online/data/estp_dataset/estpSqa_baseline_5cases/MiniCPMV_fbf_5casesevaluator_deepseek_5_5.json'
44
+ # eval_file = '/root/videollm-online/data/estp_dataset/estpSqa_baseline_5cases/MiniCPMV_fbf_5cases_0.175evaluator_deepseek_5_5.json'
45
+ # eval_file = '/root/videollm-online/data/estp_dataset/estpSqa_baseline_5cases/MiniCPMV_fbf_5cases_0.175evaluator_deepseek_1_2.json'
46
+ eval_file = '/root/videollm-online/data/estp_dataset/estpSqa_baseline/MiniCPMV_fbf_singleQA_0.175_v2evaluator_deepseek_1_2.json'
47
+ eval_model = 'MiniCPMV'
48
+
49
+ # eval_file = '/root/videollm-online/data/estp_dataset/estp_bench_sq_VideollmOnline0.9evaluator_deepseek_1_2.json'
50
+ # eval_file = '/root/videollm-online/data/estp_dataset/estp_bench_sq_VideollmOnline0.8evaluator_deepseek_1_2.json'
51
+ # # eval_file = '/root/videollm-online/data/estp_dataset/estpSqa_ours/LIVE_IT0.95.json'
52
+ # eval_model = 'VideollmOnline' # VideollmOnline MiniCPMV
53
+
54
+ # eval_file = '/root/videollm-online/data/estp_dataset/estpSqa_baseline/MMDuetevaluator_deepseek_1_2.json'
55
+ # eval_model = 'MMDuet'
56
+
57
+ # eval_file = '/root/videollm-online/data/estp_dataset/result/estp_bench_sq_EWO_frame_by_frame_v6_fusion_dinov2evaluator_llama_5_5.json'
58
+ # eval_file = '/root/videollm-online/data/estp_dataset/result/estp_bench_sq_EWO_frame_by_frame_fitVal_5_cases_v2evaluator_llama_5_5.json'
59
+ # eval_file = '/root/videollm-online/data/estp_dataset/result/estpBenchSq5Cases_fbf_EWOFitValStage3Firstevaluator_llama_5_5.json'
60
+ # eval_file = '/root/videollm-online/data/estp_dataset/result/estpBenchSq5Cases_fbf_EWOInDomainITstage2evaluator_llama_5_5.json'
61
+ # eval_file = '/root/videollm-online/data/estp_dataset/result/estpBenchSq_fbf_EWOInDomainITstage2evaluator_llama_5_5.json'
62
+
63
+ # eval_file = '/root/videollm-online/data/estp_dataset/result/estpBenchSq5Cases_fbf_EWOFitValStage3First0.6evaluator_llama_5_5.json'
64
+ # eval_file = '/root/videollm-online/data/estp_dataset/result/estpBenchSq5Cases_fbf_EWOFitValStage3First0.7evaluator_llama_5_5.json'
65
+ # eval_file = '/root/videollm-online/data/estp_dataset/result/estpBenchSq5Cases_fbf_EWOFitValStage3highv2evaluator_llama_5_5.json'
66
+ # eval_file = '/root/videollm-online/data/estp_dataset/result/estpBenchSq5Cases_fbf_EWOFitValStage3firstv2evaluator_llama_5_5.json'
67
+ # eval_file = '/root/videollm-online/data/estp_dataset/result/estpBenchSq5Cases_fbf_EWOFitValStage3HighRegion_evaluator_llama_5_5.json'
68
+ # eval_file = '/root/videollm-online/data/estp_dataset/train_/estpBenchSq5Cases_fbf_beaconlivel_h_stage2_v2evaluator_deepseek_5_5.json'
69
+ # eval_file = '/root/videollm-online/data/estp_dataset/estpSqa_ours_5cases/LivebaseStage3_high11_evaluator_deepseek_1_2.json'
70
+ # eval_file = '/root/videollm-online/data/estp_dataset/estpSqa_ours_5cases/LivebaseStage2evaluator_deepseek_5_5.json'
71
+ eval_file = '/root/videollm-online/data/estp_dataset/estpSqa_ours_5cases/LivebaseStage3_high0.31_11evaluator_deepseek_1_2.json'
72
+ # eval_file = '/root/videollm-online/data/estp_dataset/estpSqa_ours_5cases/LivebaseStage3_high11_evaluator_deepseek_1_2.json'
73
+
74
+ # eval_file = '/root/videollm-online/data/estp_dataset/estpSqa_ours/LivebaseStage3_high11_evaluator_deepseek_1_2.json'
75
+ # eval_file = '/root/videollm-online/data/estp_dataset/estpSqa_ours/LivebaseStage2evaluator_deepseek_1_2.json'
76
+ eval_file = '/root/videollm-online/data/estp_dataset/estpSqa_ours/LivebaseStage3_high0.31_1_lowevaluator_deepseek_1_2.json'
77
+ eval_model = 'EWO'
78
+
79
+ # eval_file = '/root/videollm-online/data/estp_dataset/estpSqa_baseline/LLaVAOneVision_passiveevaluator_deepseek_1_2.json'
80
+ # eval_file = '/root/videollm-online/data/estp_dataset/estpSqa_baseline/LLaVAOneVision_fbf_singleQA_0.175evaluator_deepseek_1_2.json'
81
+
82
+ # eval_model = 'LLaVAOneVision'
83
+
84
+ # eval_file = '/root/videollm-online/data/estp_dataset/estpSqa_baseline/LLaVANextVideo7B_passiveevaluator_deepseek_1_2.json'
85
+ # eval_file = '/root/videollm-online/data/estp_dataset/estpSqa_baseline/LLaVANextVideo7B_fbf_singleQA_0.175evaluator_deepseek_1_2.json'
86
+ # eval_model = 'LLaVANextVideo7B'
87
+
88
+ # eval_file = '/root/videollm-online/data/estp_dataset/estpSqa_baseline_5cases/Qwen2VL_fbf_5casesevaluator_deepseek_5_5.json'
89
+ # eval_file = '/root/videollm-online/data/estp_dataset/estpSqa_baseline/Qwen2VL_fbf_singleQA_0.175evaluator_deepseek_1_2.json'
90
+ # eval_file = '/root/videollm-online/data/estp_dataset/estpSqa_baseline/Qwen2VL_passiveevaluator_deepseek_1_2.json'
91
+ # eval_model = 'Qwen2VL'
92
+
93
+ # eval_file = '/root/videollm-online/data/estp_dataset/estpSqa_baseline/InternVLV28_fbf_0.175evaluator_deepseek_1_2.json'
94
+ # eval_file = '/root/videollm-online/data/estp_dataset/estpSqa_baseline/InternVLV28_passiveevaluator_deepseek_1_2.json'
95
+ # eval_model = 'InternVLV28'
96
+
97
+
98
+ # eval_model = 'Lavila'
99
+ # eval_file = '/root/videollm-online/data/estp_dataset/estpSqa_baseline/Lavila_streaming_v2evaluator_deepseek_1_2.json'
100
+
101
+ # eval_model = 'EgoVLP'
102
+ # eval_file = '/root/videollm-online/data/estp_dataset/estpSqa_baseline/EgoVLP_streaming_v2evaluator_deepseek_1_2.json'
103
+
104
+ # eval_model = 'CLIP'
105
+ # eval_file = '/root/videollm-online/data/estp_dataset/estpSqa_baseline/CLIP_streaming_v2evaluator_deepseek_1_2.json'
106
+
107
+
108
+ parent_dir = os.path.dirname(eval_file)
109
+ eval_files = [os.path.join(parent_dir, f) for f in os.listdir(parent_dir) if f.startswith(eval_file.split('/')[-1])]
110
+ eval_result = []
111
+ for eval_file in eval_files:
112
+ eval_result += load_multiple_json(eval_file)
113
+
114
+
115
+ task2number = {
116
+ "Object Recognition": 0,
117
+ "Attribute Perception": 0,
118
+ "Text-Rich Understanding": 0,
119
+ "Object Localization": 0,
120
+ "Object State Change Recognition": 0,
121
+ "Ego Object Localization": 0,
122
+ "Ego Object State Change Recognition": 0,
123
+ "Action Recognition": 0,
124
+ "Object Function": 0,
125
+ "Information Function": 0,
126
+ "Action Reasoning": 0,
127
+ "Task Understanding": 0,
128
+
129
+ }
130
+
131
+ task2score = {
132
+ "Object Recognition": 0,
133
+ "Attribute Perception": 0,
134
+ "Text-Rich Understanding": 0,
135
+ "Object Localization": 0,
136
+ "Object State Change Recognition": 0,
137
+ "Ego Object Localization": 0,
138
+ "Ego Object State Change Recognition": 0,
139
+ "Action Recognition": 0,
140
+ "Object Function": 0,
141
+ "Information Function": 0,
142
+ "Action Reasoning": 0,
143
+ "Task Understanding": 0,
144
+
145
+
146
+ }
147
+
148
+ task2recall = {
149
+ "Object Recognition": 0,
150
+ "Attribute Perception": 0,
151
+ "Text-Rich Understanding": 0,
152
+ "Object Localization": 0,
153
+ "Object State Change Recognition": 0,
154
+ "Ego Object Localization": 0,
155
+ "Ego Object State Change Recognition": 0,
156
+ "Action Recognition": 0,
157
+ "Object Function": 0,
158
+ "Information Function": 0,
159
+ "Action Reasoning": 0,
160
+ "Task Understanding": 0,
161
+
162
+
163
+ }
164
+
165
+ task2recall_score = {
166
+ "Object Recognition": 0,
167
+ "Attribute Perception": 0,
168
+ "Text-Rich Understanding": 0,
169
+ "Object Localization": 0,
170
+ "Object State Change Recognition": 0,
171
+ "Ego Object Localization": 0,
172
+ "Ego Object State Change Recognition": 0,
173
+ "Action Recognition": 0,
174
+ "Object Function": 0,
175
+ "Information Function": 0,
176
+ "Action Reasoning": 0,
177
+ "Task Understanding": 0,
178
+
179
+
180
+ }
181
+
182
+ task2nonrecall_pred = {
183
+ "Object Recognition": 0,
184
+ "Attribute Perception": 0,
185
+ "Text-Rich Understanding": 0,
186
+ "Object Localization": 0,
187
+ "Object State Change Recognition": 0,
188
+ "Ego Object Localization": 0,
189
+ "Ego Object State Change Recognition": 0,
190
+ "Action Recognition": 0,
191
+ "Object Function": 0,
192
+ "Information Function": 0,
193
+ "Action Reasoning": 0,
194
+ "Task Understanding": 0,
195
+
196
+ }
197
+
198
+ task2precision = {
199
+ "Object Recognition": 0,
200
+ "Attribute Perception": 0,
201
+ "Text-Rich Understanding": 0,
202
+ "Object Localization": 0,
203
+ "Object State Change Recognition": 0,
204
+ "Ego Object Localization": 0,
205
+ "Ego Object State Change Recognition": 0,
206
+ "Action Recognition": 0,
207
+ "Object Function": 0,
208
+ "Information Function": 0,
209
+ "Action Reasoning": 0,
210
+ "Task Understanding": 0,
211
+ }
212
+
213
+ def scoreMean(score):
214
+ score = score.max(axis=0)
215
+ return score.mean()
216
+
217
+
218
+ def validScoreMean(score):
219
+ # FP
220
+ nonrecall_pred = (score.max(axis=0) == 0).sum()
221
+
222
+ # valid_score
223
+ valid_score = np.zeros(score.shape[0])
224
+ valid_recall = np.zeros(score.shape[0])
225
+ valid_recall_score = np.zeros(score.shape[0])
226
+ for i,s in enumerate(score):
227
+ valid_s = s[s > 0]
228
+ if len(valid_s) > 0:
229
+ valid_score[i] = valid_s.sum() / (len(valid_s) + (nonrecall_pred / score.shape[0]))
230
+ valid_score[i] = valid_s.sum() / (valid_s.sum() + (nonrecall_pred / score.shape[0]))
231
+ # valid_score[i] = valid_s.max() / (1 + (nonrecall_pred / score.shape[0]))
232
+ # valid_score[i] = valid_s.max() * len(valid_s) / (len(valid_s) + (nonrecall_pred / score.shape[0]))
233
+
234
+ valid_recall[i] = 1
235
+ valid_recall_score[i] = valid_s.sum() / len(valid_s)
236
+ return valid_score.mean(), valid_recall.mean(), valid_recall_score.mean(), nonrecall_pred
237
+
238
+ BETA = 1
239
+ def validScoreF1(score):
240
+
241
+ if score.shape[1] == 0:
242
+ return 0, 0, 0, 0, 0
243
+ # FP
244
+ FP = (score.max(axis=0) == 0).sum()
245
+ # TP
246
+ TP = 0
247
+
248
+ # valid_score
249
+ valid_score = np.zeros(score.shape[0])
250
+ valid_recall = np.zeros(score.shape[0])
251
+ valid_recall_score = np.zeros(score.shape[0])
252
+ for i,s in enumerate(score):
253
+ valid_s = s[s > 0]
254
+ if len(valid_s) > 0:
255
+ # four type compute text-time precision
256
+ valid_score[i] = valid_s.sum() / len(valid_s)
257
+ # valid_score[i] = valid_s.max()
258
+ # valid_score[i] = valid_s.sum()
259
+
260
+ valid_recall[i] = 1
261
+ valid_recall_score[i] = valid_s.sum() / len(valid_s)
262
+ # valid_recall_score[i] = valid_s.max()
263
+ # valid_score[i] = valid_s.sum()
264
+
265
+ TP = valid_score.sum()
266
+ precision = TP / (TP + FP)
267
+ recall = valid_recall_score.mean()
268
+ # breakpoint()
269
+ if precision == 0 or recall == 0:
270
+ F1 = 0
271
+ else:
272
+ F1 = (1 + BETA**2) * precision * recall / ((BETA**2 * precision) + recall)
273
+
274
+ F1 = 2*TP / (2*TP + FP + score.shape[0] - valid_recall.sum())
275
+ return F1, valid_recall.mean(), valid_recall_score.mean(), FP, precision
276
+
277
+ def topkValidScoreMean(score, k=10):
278
+ score = score.max(axis=1)
279
+ score = score[:k]
280
+ valid_score = score[score > 0]
281
+ return valid_score.mean()
282
+
283
+ total_fps = 0
284
+ total_kv_cache_size = 0
285
+ total_response_number = 0
286
+ total_answer_number = 0
287
+ total_precision = 0
288
+ for ll in eval_result:
289
+ if eval_model in ll.keys():
290
+ total_answer_number += len(ll['conversation'])
291
+ this_turn_response_number = 0
292
+ for response in ll[eval_model]:
293
+ if response['role'].lower() == 'assistant':
294
+ if 'fps' in response:
295
+ total_fps+=response['fps']
296
+ total_response_number += 1
297
+ this_turn_response_number += 1
298
+ if 'kv_cache_size' in response:
299
+ total_kv_cache_size += response['kv_cache_size']
300
+ task2number[ll['Task Type'].strip()] += 1
301
+ # breakpoint()
302
+ text_score = np.array(ll['evaluator_output_text']) / 10
303
+ reponse_score = np.array(ll['evaluator_output_reponse']) / 10
304
+ # if ll['Task Type'] == 'Ego Object State Change Recognition':
305
+ if args.eval_mode == 'all' and eval_model not in ['EgoVLP', 'CLIP', 'Lavila']:
306
+ score = (text_score+reponse_score)
307
+ elif args.eval_mode == 'text':
308
+ score = text_score
309
+ elif args.eval_mode == 'response' or eval_model in ['EgoVLP', 'CLIP', 'Lavila']:
310
+ score = reponse_score
311
+ score_mean, recall_mean, recall_score_mean, nonrecall_pred, precision = validScoreF1(score)
312
+ task2score[ll['Task Type'].strip()] += score_mean
313
+ task2recall[ll['Task Type'].strip()] += recall_mean
314
+ task2recall_score[ll['Task Type'].strip()] += recall_score_mean
315
+ task2nonrecall_pred[ll['Task Type'].strip()] += nonrecall_pred
316
+ task2precision[ll['Task Type'].strip()] += precision
317
+ total_precision += nonrecall_pred / this_turn_response_number if this_turn_response_number > 0 else 0
318
+
319
+
320
+
321
+ for k,v in task2score.items():
322
+ if task2number[k] == 0:
323
+ task2score[k] = 0
324
+ task2recall[k] = 0
325
+ task2recall_score[k] = 0
326
+ task2nonrecall_pred[k] = 0
327
+ task2precision[k] = 0
328
+ else:
329
+ task2score[k] = v / task2number[k] * 100
330
+ task2recall[k] = task2recall[k] / task2number[k] * 100
331
+ task2recall_score[k] = task2recall_score[k] / task2number[k] * 100
332
+ task2nonrecall_pred[k] = task2nonrecall_pred[k] / task2number[k]
333
+ task2precision[k] = task2precision[k] / task2number[k] * 100
334
+
335
+ print(json.dumps(task2number, indent=4))
336
+ print(json.dumps({k: round(v, 2) for k,v in task2score.items()}, indent=4))
337
+ print(json.dumps(task2recall, indent=4))
338
+ # print(json.dumps(task2recall_score, indent=4))
339
+ # print(json.dumps(task2nonrecall_pred, indent=4))
340
+
341
+ print("Total question number: ", sum(task2number.values()))
342
+
343
+ print(f"Average Score: {sum(task2score.values())/len(task2number.values())}")
344
+
345
+ # Calculate average for first 8 task types
346
+ first_8_tasks = list(task2number.keys())[:8]
347
+ first_8_scores = [task2score[task] for task in first_8_tasks]
348
+ first_8_valid_scores = [score for score in first_8_scores if score > 0]
349
+ first_8_recall_scores = [task2recall[task] for task in first_8_tasks]
350
+ try:
351
+ print(f"Average Score (First 8 Tasks): {sum(first_8_valid_scores)/len(first_8_valid_scores):.2f}")
352
+ except:
353
+ print(f"Average Score (First 8 Tasks): 0.0")
354
+
355
+ # Calculate average for last 4 task types
356
+ last_4_tasks = list(task2number.keys())[-4:]
357
+ last_4_scores = [task2score[task] for task in last_4_tasks]
358
+ last_4_valid_scores = [score for score in last_4_scores if score > 0]
359
+ last_4_recall_scores = [task2recall[task] for task in last_4_tasks]
360
+ try:
361
+ print(f"Average Score (Last 4 Tasks): {sum(last_4_valid_scores)/len(last_4_valid_scores):.2f}")
362
+ except:
363
+ print(f"Average Score (Last 4 Tasks): 0.0")
364
+
365
+
366
+ print(f"Average Recall: {sum(task2recall.values())/len(task2number.values())}")
367
+ print(f"Average Recall Score: {sum(task2recall_score.values())/len(task2number.values())}")
368
+ print(f"Average Precision: {sum(task2precision.values())/len(task2number.values())}")
369
+
370
+
371
+ print(f"Total Question Number: {sum(task2number.values())}")
372
+ print(f"Average FPS: {total_fps/total_response_number}")
373
+ print(f"Average KV Cache: {total_kv_cache_size/total_response_number}")
374
+ print(f"Average Non-Recall Pred: {sum(task2nonrecall_pred.values())/sum(task2number.values())}")
375
+ print(f"Average Response Number: {total_response_number/sum(task2number.values())}")
376
+ print(f"Average Precision: {total_precision/sum(task2number.values()) * 100}")
377
+
378
+
379
+ print(f"[{sum(task2recall.values())/len(task2number.values()):.1f}, {sum(task2precision.values())/len(task2number.values()):.1f}]")
380
+
381
+ def generate_latex_table_row():
382
+ # Generate LaTeX format table row
383
+ print("\n# LaTeX format table row")
384
+ print(f"& {eval_model} ", end="")
385
+
386
+ # First 8 tasks
387
+ for task in list(task2number.keys())[:8]:
388
+ print(f"& {task2score[task]:.1f} ", end="")
389
+
390
+ # Average of first 8 tasks
391
+ print(f"& {sum(first_8_valid_scores)/len(first_8_valid_scores):.1f} ", end="")
392
+
393
+ # Last 4 tasks
394
+ for task in list(task2number.keys())[-4:]:
395
+ print(f"& {task2score[task]:.1f} ", end="")
396
+
397
+ # Average of last 4 tasks
398
+ print(f"& {sum(last_4_valid_scores)/len(last_4_valid_scores):.1f} ", end="")
399
+
400
+ # Overall average
401
+ # print(f"& {sum(task2score.values())/len(task2number.values()):.1f} \\\\")
402
+
403
+ generate_latex_table_row()
404
+
405
+
406
+ # def generate_latex_table_row():
407
+ # # Generate LaTeX format table row
408
+ # print("\n# LaTeX format table row")
409
+ # print(f"& {eval_model} ", end="")
410
+
411
+ # # First 8 tasks
412
+ # for task in list(task2number.keys())[:8]:
413
+ # print(f"& {task2recall[task]:.1f} ", end="")
414
+
415
+ # # Average of first 8 tasks
416
+ # print(f"& {sum(first_8_recall_scores)/len(first_8_recall_scores):.1f} ", end="")
417
+
418
+ # # Last 4 tasks
419
+ # for task in list(task2number.keys())[-4:]:
420
+ # print(f"& {task2recall[task]:.1f} ", end="")
421
+
422
+ # # Average of last 4 tasks
423
+ # print(f"& {sum(last_4_recall_scores)/len(last_4_recall_scores):.1f} ", end="")
424
+
425
+ # # Overall average
426
+ # # print(f"& {sum(task2score.values())/len(task2number.values()):.1f} \\\\")
427
+
428
+ # generate_latex_table_row()
ESTP-Bench/estp_dataset/benchmark/eval_cost.py ADDED
@@ -0,0 +1,170 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json,os
2
+ import numpy as np
3
+
4
+ def load_multiple_json(file_path):
5
+ """读取包含多个 JSON 对象的文件,并将每个 JSON 对象解析成 Python 对象,存放在列表中。"""
6
+ with open(file_path, 'r', encoding='utf-8') as f:
7
+ content = f.read()
8
+
9
+ decoder = json.JSONDecoder()
10
+ pos = 0
11
+ results = []
12
+ content_length = len(content)
13
+
14
+ while pos < content_length:
15
+ # 跳过空白字符
16
+ while pos < content_length and content[pos].isspace():
17
+ pos += 1
18
+ if pos >= content_length:
19
+ break
20
+ try:
21
+ obj, new_pos = decoder.raw_decode(content, pos)
22
+ results.append(obj)
23
+ pos = new_pos
24
+ except json.JSONDecodeError as e:
25
+ # 出现解析错误则退出循环
26
+ print(f"JSON 解析错误: {e}")
27
+ break
28
+ return results
29
+
30
+
31
+
32
+ eval_file = '/root/videollm-online/data/estp_dataset/estpSqa_ours/LIVE_IT0.95.json'
33
+ eval_model = 'VideollmOnline'
34
+
35
+ # eval_file = '/root/videollm-online/data/estp_dataset/estpSqa_ours_5cases/LivebaseStage2.json'
36
+ # eval_file = '/root/videollm-online/data/estp_dataset/estpSqa_ours/LivebaseStage3_high11_.json'
37
+ # eval_model = 'EWO'
38
+
39
+ # eval_file = '/root/videollm-online/data/estp_dataset/estpCqa_baseline/MiniCPMV_fbf_0.175.json'
40
+ # eval_model = 'MiniCPMV'
41
+
42
+ # eval_file = '/root/videollm-online/data/estp_dataset/estpSqa_baseline/LLaVAOneVision_passiveevaluator_deepseek_5_5.json'
43
+ # eval_model = 'LLaVAOneVision'
44
+
45
+ # eval_file = '/root/videollm-online/data/estp_dataset/estpSqa_baseline/LLaVANextVideo7B_passiveevaluator_deepseek_5_5.json'
46
+ # eval_model = 'LLaVANextVideo7B'
47
+
48
+ # eval_file = '/root/videollm-online/data/estp_dataset/estpSqa_baseline_5cases/Qwen2VL_fbf_5casesevaluator_deepseek_5_5.json'
49
+ # eval_model = 'Qwen2VL'
50
+
51
+ # eval_file = '/root/videollm-online/data/estp_dataset/estpSqa_baseline/InternVLV28_passiveevaluator_deepseek_5_5.json'
52
+ # eval_model = 'InternVLV28'
53
+
54
+
55
+ parent_dir = os.path.dirname(eval_file)
56
+ eval_files = [os.path.join(parent_dir, f) for f in os.listdir(parent_dir) if f.startswith(eval_file.split('/')[-1])]
57
+ eval_result = {}
58
+ for eval_file in eval_files:
59
+ eval_result.update(json.load(open(eval_file)))
60
+
61
+ task2number = {
62
+ "Object Recognition": 0,
63
+ "Attribute Perception": 0,
64
+ "Text-Rich Understanding": 0,
65
+ "Object Localization": 0,
66
+ "Object State Change Recognition": 0,
67
+ "Ego Object Localization": 0,
68
+ "Ego Object State Change Recognition": 0,
69
+ "Action Recognition": 0,
70
+ "Object Function": 0,
71
+ "Information Function": 0,
72
+ "Action Reasoning": 0,
73
+ "Task Understanding": 0,
74
+
75
+ }
76
+
77
+ task2score = {
78
+ "Object Recognition": 0,
79
+ "Attribute Perception": 0,
80
+ "Text-Rich Understanding": 0,
81
+ "Object Localization": 0,
82
+ "Object State Change Recognition": 0,
83
+ "Ego Object Localization": 0,
84
+ "Ego Object State Change Recognition": 0,
85
+ "Action Recognition": 0,
86
+ "Object Function": 0,
87
+ "Information Function": 0,
88
+ "Action Reasoning": 0,
89
+ "Task Understanding": 0,
90
+
91
+
92
+ }
93
+
94
+ task2recall = {
95
+ "Object Recognition": 0,
96
+ "Attribute Perception": 0,
97
+ "Text-Rich Understanding": 0,
98
+ "Object Localization": 0,
99
+ "Object State Change Recognition": 0,
100
+ "Ego Object Localization": 0,
101
+ "Ego Object State Change Recognition": 0,
102
+ "Action Recognition": 0,
103
+ "Object Function": 0,
104
+ "Information Function": 0,
105
+ "Action Reasoning": 0,
106
+ "Task Understanding": 0,
107
+
108
+
109
+ }
110
+
111
+ task2recall_score = {
112
+ "Object Recognition": 0,
113
+ "Attribute Perception": 0,
114
+ "Text-Rich Understanding": 0,
115
+ "Object Localization": 0,
116
+ "Object State Change Recognition": 0,
117
+ "Ego Object Localization": 0,
118
+ "Ego Object State Change Recognition": 0,
119
+ "Action Recognition": 0,
120
+ "Object Function": 0,
121
+ "Information Function": 0,
122
+ "Action Reasoning": 0,
123
+ "Task Understanding": 0,
124
+
125
+
126
+ }
127
+
128
+ task2nonrecall_pred = {
129
+ "Object Recognition": 0,
130
+ "Attribute Perception": 0,
131
+ "Text-Rich Understanding": 0,
132
+ "Object Localization": 0,
133
+ "Object State Change Recognition": 0,
134
+ "Ego Object Localization": 0,
135
+ "Ego Object State Change Recognition": 0,
136
+ "Action Recognition": 0,
137
+ "Object Function": 0,
138
+ "Information Function": 0,
139
+ "Action Reasoning": 0,
140
+ "Task Understanding": 0,
141
+
142
+ }
143
+
144
+
145
+
146
+
147
+ total_fps_last = 0
148
+ total_fps = 0
149
+ total_response_number = 0
150
+ total_kv_cache_size = 0
151
+ for k,v in eval_result.items():
152
+ for kk,vv in v.items():
153
+ for ll in vv:
154
+ if eval_model in ll.keys():
155
+ for response in ll[eval_model]:
156
+ if response['role'] == 'fps':
157
+ total_fps_last+=response['content']
158
+ if response['role'].lower() == 'assistant':
159
+ if 'fps' in response:
160
+ total_fps+=response['fps']
161
+ total_response_number += 1
162
+ if 'kv_cache_size' in response:
163
+ total_kv_cache_size+=response['kv_cache_size']
164
+ task2number[ll['Task Type'].strip()] += 1
165
+
166
+ print('total_qa: ', sum(task2number.values()))
167
+ print(f"Average FPS: {total_fps/total_response_number}")
168
+ print(f"Average KV Cache: {total_kv_cache_size/total_response_number}")
169
+ print("total_fps_last_mean: ", total_fps_last / sum(task2number.values()))
170
+ print(f"Average Response Number: {total_response_number/sum(task2number.values())}")
ESTP-Bench/estp_dataset/benchmark/eval_cqa.py ADDED
@@ -0,0 +1,347 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json,os
2
+ import numpy as np
3
+
4
+ def load_multiple_json(file_path):
5
+ """读取包含多个 JSON 对象的文件,并将每个 JSON 对象解析成 Python 对象,存放在列表中。"""
6
+ with open(file_path, 'r', encoding='utf-8') as f:
7
+ content = f.read()
8
+
9
+ decoder = json.JSONDecoder()
10
+ pos = 0
11
+ results = []
12
+ content_length = len(content)
13
+
14
+ while pos < content_length:
15
+ # 跳过空白字符
16
+ while pos < content_length and content[pos].isspace():
17
+ pos += 1
18
+ if pos >= content_length:
19
+ break
20
+ try:
21
+ obj, new_pos = decoder.raw_decode(content, pos)
22
+ results.append(obj)
23
+ pos = new_pos
24
+ except json.JSONDecodeError as e:
25
+ # 出现解析错误则退出循环
26
+ print(f"JSON 解析错误: {e}")
27
+ break
28
+ return results
29
+
30
+ # import argparse
31
+
32
+ # def parse_args():
33
+ # parser = argparse.ArgumentParser()
34
+ # parser.add_argument('--eval_model', type=str, default='InternVLV28', help='Model to evaluate')
35
+ # parser.add_argument('--inference_mode', type=str, default='default', help='Evaluation mode: passive, fbf, streaming')
36
+ # return parser.parse_args()
37
+
38
+ # args = parse_args()
39
+
40
+ # # 使用命令行参数或默认值
41
+ # eval_model = args.eval_model
42
+
43
+ # # 根据选择的模型设置评估文件路径
44
+ # model_file_mapping = {
45
+ # 'MiniCPMV': {
46
+ # 'passive': '/root/videollm-online/data/estp_dataset/estpCqa_baseline/MiniCPMV_passive_v2evaluator_deepseek_1_2.json',
47
+ # 'fbf': '/root/videollm-online/data/estp_dataset/estpCqa_baseline/MiniCPMV_fbf_0.175evaluator_deepseek_1_2.json'
48
+ # },
49
+ # 'Qwen2VL': {
50
+ # 'passive': '/root/videollm-online/data/estp_dataset/estpCqa_baseline/Qwen2VL_passive_v2evaluator_deepseek_1_2.json',
51
+ # 'fbf': '/root/videollm-online/data/estp_dataset/estpCqa_baseline/Qwen2VL_fbf_0.175evaluator_deepseek_1_2.json'
52
+ # },
53
+ # 'LLaVAOneVision': {
54
+ # 'passive': '/root/videollm-online/data/estp_dataset/estpCqa_baseline/LLaVAOneVision_passive_v2evaluator_deepseek_1_2.json',
55
+ # 'fbf': '/root/videollm-online/data/estp_dataset/estpCqa_baseline/LLaVAOneVision_fbf_0.175evaluator_deepseek_1_2.json'
56
+ # },
57
+ # 'Lavila': {
58
+ # 'passive': '/root/videollm-online/data/estp_dataset/estpCqa_baseline/Lavila_streaming_v2evaluator_deepseek_1_2.json'
59
+ # },
60
+ # 'EgoVLP': {
61
+ # 'passive': '/root/videollm-online/data/estp_dataset/estpCqa_baseline/EgoVLP_streaming_v2evaluator_deepseek_1_2.json'
62
+ # },
63
+ # 'CLIP': {
64
+ # 'passive': '/root/videollm-online/data/estp_dataset/estpCqa_baseline/CLIP_streaming_v2evaluator_deepseek_1_2.json'
65
+ # },
66
+ # 'MMDuet': {
67
+ # 'default': '/root/videollm-online/data/estp_dataset/estpCqa_baseline/MMDuetevaluator_deepseek_1_2.json'
68
+ # },
69
+ # 'LLaVANextVideo7B': {
70
+ # 'passive': '/root/videollm-online/data/estp_dataset/estpCqa_baseline/LLaVANextVideo7B_passive_v2evaluator_deepseek_1_2.json',
71
+ # 'fbf': '/root/videollm-online/data/estp_dataset/estpCqa_baseline/LLaVANextVideo7B_fbf_0.175evaluator_deepseek_1_2.json'
72
+ # },
73
+ # 'InternVLV28': {
74
+ # 'passive': '/root/videollm-online/data/estp_dataset/estpCqa_baseline/InternVLV28_passive_v2evaluator_deepseek_1_2.json',
75
+ # 'fbf': '/root/videollm-online/data/estp_dataset/estpCqa_baseline/InternVLV28_fbf_0.175evaluator_deepseek_1_2.json'
76
+ # },
77
+ # 'VideollmOnline': {
78
+ # 'default': '/root/videollm-online/data/estp_dataset/estpCqa_baseline/VideollmOnline0.8evaluator_deepseek_1_2.json'
79
+ # }
80
+ # }
81
+
82
+ # # 默认使用fbf版本,如果没有则使用passive,如果都没有则使用默认或streaming
83
+ # if eval_model in model_file_mapping:
84
+ # # 展示可用的inference_mode选项
85
+ # available_modes = list(model_file_mapping[eval_model].keys())
86
+ # print(f"可用的inference_mode选项:")
87
+ # for i, mode in enumerate(available_modes, 1):
88
+ # print(f"{i}. {mode}")
89
+
90
+ # # 如果用户提供的inference_mode不在可用选项中,让用户选择
91
+ # if args.inference_mode not in available_modes:
92
+ # choice = input(f"请选择inference_mode (1-{len(available_modes)}): ")
93
+ # try:
94
+ # choice_idx = int(choice) - 1
95
+ # if 0 <= choice_idx < len(available_modes):
96
+ # args.inference_mode = available_modes[choice_idx]
97
+ # else:
98
+ # args.inference_mode = available_modes[0] # 默认使用第一个选项
99
+ # print(f"无效选择,使用默认模式: {args.inference_mode}")
100
+ # except ValueError:
101
+ # args.inference_mode = available_modes[0] # 默认使用第一个选项
102
+ # print(f"无效输入,使用默认模式: {args.inference_mode}")
103
+
104
+ # eval_file = model_file_mapping[eval_model][args.inference_mode]
105
+ # else:
106
+ # raise ValueError(f"未找到模型 {eval_model} 的评估文件")
107
+
108
+ # print(f"评估模型: {eval_model}")
109
+ # print(f"评估文件: {eval_file}")
110
+
111
+
112
+ eval_file = '/root/videollm-online/data/estp_dataset/estpCqa_baseline/MiniCPMV_passiveevaluator_deepseek_1_2.json' # tmp_predict_VideollmOnline_v2_correctness
113
+ eval_model = 'MiniCPMV'
114
+
115
+ eval_file = '/root/videollm-online/data/estp_dataset/estpCqa_baseline/Qwen2VL_passiveevaluator_deepseek_1_2.json' # tmp_predict_VideollmOnline_v2_correctness
116
+ eval_model = 'Qwen2VL'
117
+
118
+ eval_file = '/2022233235/videollm-online/data/estp_dataset/estpCqa_baseline/VideollmOnline0.9evaluator_deepseek_1_2.json'
119
+ # eval_file = '/2022233235/videollm-online/data/estp_dataset/estpCqa_ours/LIVE_IT0.95evaluator_deepseek_1_2.json'
120
+ # eval_file = '/2022233235/videollm-online/data/estp_dataset/estpCqa_ours/LIVE_IT_smoothing_v2evaluator_deepseek_1_2.json'
121
+ eval_model = 'VideollmOnline' # VideollmOnline MiniCPMV
122
+
123
+ # eval_file = '/root/videollm-online/data/estp_dataset/estpCqa_baseline/LLaVAOneVision_passiveevaluator_deepseek_1_2.json' # tmp_predict_VideollmOnline_v2_correctness
124
+ # eval_model = 'LLaVAOneVision'
125
+ # eval_file = '/2022233235/videollm-online/data/estp_dataset/estpCqa_ours/LivebaseStage2_v3evaluator_deepseek_1_2.json'
126
+ # eval_file = '/2022233235/videollm-online/data/estp_dataset/estpCqa_ours/LivebaseStage2.5evaluator_deepseek_1_2.json'
127
+ eval_file = '/2022233235/videollm-online/data/estp_dataset/estpCqa_ours/LivebaseStage3.5_high0.31_11evaluator_deepseek_1_2.json'
128
+ # eval_file = '/2022233235/videollm-online/data/estp_dataset/estpCqa_ours/LivebaseStage3_v3evaluator_deepseek_1_2.json'
129
+ # eval_file = '/2022233235/videollm-online/data/estp_dataset/estpCqa_ours/LivebaseStage3.5_v3evaluator_deepseek_1_2.json'
130
+ eval_file = '/root/videollm-online/data/estp_dataset/estpCqa_ours/LivebaseStage2_lowevaluator_deepseek_1_2.json'
131
+ eval_model = 'EWO'
132
+
133
+ parent_dir = os.path.dirname(eval_file)
134
+ eval_files = [os.path.join(parent_dir, f) for f in os.listdir(parent_dir) if f.startswith(eval_file.split('/')[-1])]
135
+ eval_result = []
136
+ for eval_file in eval_files:
137
+ eval_result += load_multiple_json(eval_file)
138
+
139
+
140
+ task2number = {
141
+ "Object Relative Context": 0,
142
+ "Task Relative Context": 0,
143
+ }
144
+
145
+ task2recall = {
146
+ "Object Relative Context": 0,
147
+ "Task Relative Context": 0,
148
+ }
149
+
150
+ task2recall_score = {
151
+ "Object Relative Context": 0,
152
+ "Task Relative Context": 0,
153
+ }
154
+
155
+ task2score = {
156
+ "Object Relative Context": 0,
157
+ "Task Relative Context": 0,
158
+ }
159
+
160
+ task2nonrecall_pred = {
161
+ "Object Relative Context": 0,
162
+ "Task Relative Context": 0,
163
+ }
164
+
165
+ task2precision = {
166
+ "Object Relative Context": 0,
167
+ "Task Relative Context": 0,
168
+ }
169
+
170
+ task2bug = {
171
+ "Object Relative Context": 0,
172
+ "Task Relative Context": 0,
173
+ }
174
+
175
+ def scoreMean(score):
176
+ score = score.max(axis=0)
177
+ return score.mean()
178
+
179
+
180
+ def validScoreMean(score):
181
+ # FP
182
+ if len(score) == 0:
183
+ return 0, 0, 0, 0
184
+ nonrecall_pred = (score.max(axis=0) == 0).sum()
185
+
186
+ # valid_score
187
+ valid_score = np.zeros(score.shape[0])
188
+ valid_recall = np.zeros(score.shape[0])
189
+ valid_recall_score = np.zeros(score.shape[0])
190
+ for i,s in enumerate(score):
191
+ valid_s = s[s > 0]
192
+ if len(valid_s) > 0:
193
+ valid_score[i] = valid_s.sum() / (len(valid_s) + (nonrecall_pred / score.shape[0]))
194
+ valid_score[i] = valid_s.sum() / (valid_s.sum() + (nonrecall_pred / score.shape[0]))
195
+ # valid_score[i] = valid_s.max() / (1 + (nonrecall_pred / score.shape[0]))
196
+ # valid_score[i] = valid_s.max() * len(valid_s) / (len(valid_s) + (nonrecall_pred / score.shape[0]))
197
+
198
+ valid_recall[i] = 1
199
+ valid_recall_score[i] = valid_s.sum() / len(valid_s)
200
+ return valid_score.mean(), valid_recall.mean(), valid_recall_score.mean(), nonrecall_pred
201
+
202
+ BETA = 1
203
+ def validScoreF1(score):
204
+
205
+ if len(score) == 0:
206
+ return 0, 0, 0, 0, 0
207
+
208
+ # FP
209
+ FP = (score.max(axis=0) == 0).sum()
210
+ # TP
211
+ TP = 0
212
+
213
+ # valid_score
214
+ valid_score = np.zeros(score.shape[0])
215
+ valid_recall = np.zeros(score.shape[0])
216
+ valid_recall_score = np.zeros(score.shape[0])
217
+ for i,s in enumerate(score):
218
+ valid_s = s[s > 0]
219
+ if len(valid_s) > 0:
220
+ # four type compute text-time precision
221
+ valid_score[i] = valid_s.sum() / len(valid_s)
222
+ # valid_score[i] = valid_s.max()
223
+
224
+ valid_recall[i] = 1
225
+ valid_recall_score[i] = valid_s.sum() / len(valid_s)
226
+ # valid_recall_score[i] = valid_s.max()
227
+
228
+ TP = valid_score.sum()
229
+ precision = TP / (TP + FP)
230
+ if np.isnan(precision):
231
+ precision = 0
232
+ recall = valid_recall_score.mean()
233
+ if precision == 0 or recall == 0:
234
+ F1 = 0
235
+ else:
236
+ F1 = (1 + BETA**2) * precision * recall / ((BETA**2 * precision) + recall)
237
+
238
+ # breakpoint()
239
+ F1 = 2*TP / (2*TP + FP + score.shape[0] - valid_recall.sum())
240
+
241
+ return F1, valid_recall.mean(), valid_recall_score.mean(), FP, precision
242
+
243
+ def topkValidScoreMean(score, k=10):
244
+ score = score.max(axis=1)
245
+ score = score[:k]
246
+ valid_score = score[score > 0]
247
+ return valid_score.mean()
248
+
249
+ total_fps = 0
250
+ total_kv_cache_size = 0
251
+ total_response_number = 0
252
+ total_answer_number = 0
253
+ for ll in eval_result:
254
+ if eval_model in ll.keys():
255
+ total_answer_number += len(ll['conversation'])
256
+ for response in ll[eval_model]:
257
+ if response['role'].lower() == 'assistant':
258
+ if 'fps' in response:
259
+ total_fps+=response['fps']
260
+ total_response_number += 1
261
+ if 'kv_cache_size' in response:
262
+ total_kv_cache_size += response['kv_cache_size']
263
+ task2number[ll['Task Type'].strip()] += 1
264
+ # breakpoint()
265
+ text_score = np.array(ll['evaluator_output_text']) / 10
266
+ reponse_score = np.array(ll['evaluator_output_reponse']) / 10
267
+ # if ll['Task Type'] == 'Ego Object State Change Recognition':
268
+ score = (text_score+reponse_score)
269
+ try:
270
+ score_mean, recall_mean, recall_score_mean, nonrecall_pred, precision = validScoreF1(score)
271
+ except:
272
+ breakpoint()
273
+ task2score[ll['Task Type'].strip()] += score_mean
274
+ task2recall[ll['Task Type'].strip()] += recall_mean
275
+ task2recall_score[ll['Task Type'].strip()] += recall_score_mean
276
+ task2nonrecall_pred[ll['Task Type'].strip()] += nonrecall_pred
277
+ task2precision[ll['Task Type'].strip()] += precision
278
+
279
+
280
+
281
+ for k,v in task2score.items():
282
+ if task2number[k] == 0:
283
+ task2score[k] = 0
284
+ task2recall[k] = 0
285
+ task2recall_score[k] = 0
286
+ task2nonrecall_pred[k] = 0
287
+ task2precision[k] = 0
288
+ else:
289
+ task2score[k] = v / task2number[k] * 100
290
+ task2recall[k] = task2recall[k] / task2number[k] * 100
291
+ task2recall_score[k] = task2recall_score[k] / task2number[k] * 100
292
+ # task2nonrecall_pred[k] = task2nonrecall_pred[k] / task2number[k]
293
+ task2precision[k] = task2precision[k] / task2number[k] * 100
294
+
295
+ print(json.dumps(task2number, indent=4))
296
+ print(json.dumps({k: round(v, 2) for k,v in task2score.items()}, indent=4))
297
+ print(json.dumps(task2recall, indent=4))
298
+ print(json.dumps(task2recall_score, indent=4))
299
+
300
+ print("Total question number: ", sum(task2number.values()))
301
+
302
+ print(f"Average Score: {sum(task2score.values())/len(task2number.values())}")
303
+
304
+
305
+ print(f"Average Recall: {sum(task2recall.values())/len(task2number.values())}")
306
+ print(f"Average Recall Score: {sum(task2recall_score.values())/len(task2number.values())}")
307
+ print(f"Average Precision: {sum(task2precision.values())/len(task2number.values())}")
308
+
309
+
310
+
311
+ print(f"Total Question Number: {sum(task2number.values())}")
312
+ print(f"Average FPS: {total_fps/total_response_number}")
313
+ print(f"Average KV Cache: {total_kv_cache_size/total_response_number}")
314
+ print(f"Average Non-Recall Pred: {sum(task2nonrecall_pred.values())/sum(task2number.values())}")
315
+ print(f"Average Response Number: {total_response_number/sum(task2number.values())}")
316
+ print(f"Average Answer Number: {total_answer_number/sum(task2number.values())}")
317
+
318
+
319
+ def generate_latex_table_row():
320
+ # Generate LaTeX format table row
321
+ print("\n# LaTeX format table row")
322
+ print(f"& {eval_model} ", end="")
323
+
324
+ # First 8 tasks
325
+ for task in list(task2number.keys())[:8]:
326
+ print(f"& {task2score[task]:.1f} ", end="")
327
+
328
+
329
+ # Overall average
330
+ print(f"& {sum(task2score.values())/len(task2number.values()):.1f}")
331
+
332
+ generate_latex_table_row()
333
+
334
+ def generate_latex_table_row():
335
+ # Generate LaTeX format table row
336
+ print("\n# LaTeX format table row")
337
+ print(f"& {eval_model} ", end="")
338
+
339
+ # First 8 tasks
340
+ for task in list(task2number.keys())[:8]:
341
+ print(f"& {task2recall[task]:.1f} ", end="")
342
+
343
+
344
+ # Overall average
345
+ print(f"& {sum(task2recall.values())/len(task2number.values()):.1f}")
346
+
347
+ generate_latex_table_row()
ESTP-Bench/estp_dataset/benchmark/eval_findcase.py ADDED
@@ -0,0 +1,446 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json,os
2
+ import numpy as np
3
+
4
+ def load_multiple_json(file_path):
5
+ """读取包含多个 JSON 对象的文件,并将每个 JSON 对象解析成 Python 对象,存放在列表中。"""
6
+ with open(file_path, 'r', encoding='utf-8') as f:
7
+ content = f.read()
8
+
9
+ decoder = json.JSONDecoder()
10
+ pos = 0
11
+ results = []
12
+ content_length = len(content)
13
+
14
+ while pos < content_length:
15
+ # 跳过空白字符
16
+ while pos < content_length and content[pos].isspace():
17
+ pos += 1
18
+ if pos >= content_length:
19
+ break
20
+ try:
21
+ obj, new_pos = decoder.raw_decode(content, pos)
22
+ results.append(obj)
23
+ pos = new_pos
24
+ except json.JSONDecodeError as e:
25
+ # 出现解析错误则退出循环
26
+ print(f"JSON 解析错误: {e}")
27
+ break
28
+ return results
29
+
30
+
31
+ import argparse
32
+
33
+ def parse_args():
34
+ parser = argparse.ArgumentParser()
35
+ parser.add_argument('--eval_file', type=str, default='')
36
+ parser.add_argument('--eval_mode', type=str, default='all')
37
+ return parser.parse_args()
38
+
39
+ args = parse_args()
40
+
41
+ # eval_file = '/root/videollm-online/data/estp_dataset/result/estp_bench_sq_MiniCPMV_passiveevaluator_llama_5_5.json' # tmp_predict_VideollmOnline_v2_correctness
42
+ eval_file = '/root/videollm-online/data/estp_dataset/result/estp_bench_sq_MiniCPMV_passiveevaluator_deepseek_1_2.json'
43
+ # eval_file = '/root/videollm-online/data/estp_dataset/estpSqa_baseline_5cases/MiniCPMV_fbf_5casesevaluator_deepseek_5_5.json'
44
+ # eval_file = '/root/videollm-online/data/estp_dataset/estpSqa_baseline_5cases/MiniCPMV_fbf_5cases_0.175evaluator_deepseek_5_5.json'
45
+ # eval_file = '/root/videollm-online/data/estp_dataset/estpSqa_baseline_5cases/MiniCPMV_fbf_5cases_0.175evaluator_deepseek_1_2.json'
46
+ # eval_file = '/root/videollm-online/data/estp_dataset/estpSqa_baseline/MiniCPMV_fbf_singleQA_0.175evaluator_deepseek_1_2.json'
47
+ # eval_model = 'MiniCPMV'
48
+
49
+ # eval_file = '/root/videollm-online/data/estp_dataset/estp_bench_sq_VideollmOnline0.9evaluator_deepseek_1_2.json'
50
+ # eval_file = '/root/videollm-online/data/estp_dataset/estp_bench_sq_VideollmOnline0.8evaluator_deepseek_1_2.json'
51
+ # # eval_file = '/root/videollm-online/data/estp_dataset/estpSqa_ours/LIVE_IT0.95.json'
52
+ # eval_model = 'VideollmOnline' # VideollmOnline MiniCPMV
53
+
54
+ # eval_file = '/root/videollm-online/data/estp_dataset/estpSqa_baseline/MMDuetevaluator_deepseek_1_2.json'
55
+ # eval_model = 'MMDuet'
56
+
57
+ # eval_file = '/root/videollm-online/data/estp_dataset/result/estp_bench_sq_EWO_frame_by_frame_v6_fusion_dinov2evaluator_llama_5_5.json'
58
+ # eval_file = '/root/videollm-online/data/estp_dataset/result/estp_bench_sq_EWO_frame_by_frame_fitVal_5_cases_v2evaluator_llama_5_5.json'
59
+ # eval_file = '/root/videollm-online/data/estp_dataset/result/estpBenchSq5Cases_fbf_EWOFitValStage3Firstevaluator_llama_5_5.json'
60
+ # eval_file = '/root/videollm-online/data/estp_dataset/result/estpBenchSq5Cases_fbf_EWOInDomainITstage2evaluator_llama_5_5.json'
61
+ # eval_file = '/root/videollm-online/data/estp_dataset/result/estpBenchSq_fbf_EWOInDomainITstage2evaluator_llama_5_5.json'
62
+
63
+ # eval_file = '/root/videollm-online/data/estp_dataset/result/estpBenchSq5Cases_fbf_EWOFitValStage3First0.6evaluator_llama_5_5.json'
64
+ # eval_file = '/root/videollm-online/data/estp_dataset/result/estpBenchSq5Cases_fbf_EWOFitValStage3First0.7evaluator_llama_5_5.json'
65
+ # eval_file = '/root/videollm-online/data/estp_dataset/result/estpBenchSq5Cases_fbf_EWOFitValStage3highv2evaluator_llama_5_5.json'
66
+ # eval_file = '/root/videollm-online/data/estp_dataset/result/estpBenchSq5Cases_fbf_EWOFitValStage3firstv2evaluator_llama_5_5.json'
67
+ # eval_file = '/root/videollm-online/data/estp_dataset/result/estpBenchSq5Cases_fbf_EWOFitValStage3HighRegion_evaluator_llama_5_5.json'
68
+ # eval_file = '/root/videollm-online/data/estp_dataset/train_/estpBenchSq5Cases_fbf_beaconlivel_h_stage2_v2evaluator_deepseek_5_5.json'
69
+ # eval_file = '/root/videollm-online/data/estp_dataset/estpSqa_ours_5cases/LivebaseStage3_high11_evaluator_deepseek_1_2.json'
70
+ # eval_file = '/root/videollm-online/data/estp_dataset/estpSqa_ours_5cases/LivebaseStage2evaluator_deepseek_5_5.json'
71
+ # eval_file = '/root/videollm-online/data/estp_dataset/estpSqa_ours_5cases/LivebaseStage3_high0.31_11evaluator_deepseek_1_2.json'
72
+ # eval_file = '/root/videollm-online/data/estp_dataset/estpSqa_ours_5cases/LivebaseStage3_high11_evaluator_deepseek_1_2.json'
73
+
74
+ eval_file = '/root/videollm-online/data/estp_dataset/estpSqa_ours/LivebaseStage3_high11_evaluator_deepseek_1_2.json'
75
+ # eval_file = '/root/videollm-online/data/estp_dataset/estpSqa_ours/LivebaseStage2evaluator_deepseek_1_2.json'
76
+ eval_model = 'EWO'
77
+
78
+ # eval_file = '/root/videollm-online/data/estp_dataset/estpSqa_baseline/LLaVAOneVision_passiveevaluator_deepseek_1_2.json'
79
+ # eval_file = '/root/videollm-online/data/estp_dataset/estpSqa_baseline/LLaVAOneVision_fbf_singleQA_0.175evaluator_deepseek_1_2.json'
80
+
81
+ # eval_model = 'LLaVAOneVision'
82
+
83
+ # eval_file = '/root/videollm-online/data/estp_dataset/estpSqa_baseline/LLaVANextVideo7B_passiveevaluator_deepseek_1_2.json'
84
+ # eval_file = '/root/videollm-online/data/estp_dataset/estpSqa_baseline/LLaVANextVideo7B_fbf_singleQA_0.175evaluator_deepseek_1_2.json'
85
+ # eval_model = 'LLaVANextVideo7B'
86
+
87
+ # eval_file = '/root/videollm-online/data/estp_dataset/estpSqa_baseline_5cases/Qwen2VL_fbf_5casesevaluator_deepseek_5_5.json'
88
+ # eval_file = '/root/videollm-online/data/estp_dataset/estpSqa_baseline/Qwen2VL_fbf_singleQA_0.175evaluator_deepseek_1_2.json'
89
+ # eval_file = '/root/videollm-online/data/estp_dataset/estpSqa_baseline/Qwen2VL_passiveevaluator_deepseek_1_2.json'
90
+ # eval_model = 'Qwen2VL'
91
+
92
+ # eval_file = '/root/videollm-online/data/estp_dataset/estpSqa_baseline/InternVLV28_fbf_0.175evaluator_deepseek_1_2.json'
93
+ # eval_file = '/root/videollm-online/data/estp_dataset/estpSqa_baseline/InternVLV28_passiveevaluator_deepseek_1_2.json'
94
+ # eval_model = 'InternVLV28'
95
+
96
+
97
+ # eval_model = 'Lavila'
98
+ # eval_file = '/root/videollm-online/data/estp_dataset/estpSqa_baseline/Lavila_streaming_v2evaluator_deepseek_1_2.json'
99
+
100
+ # eval_model = 'EgoVLP'
101
+ # eval_file = '/root/videollm-online/data/estp_dataset/estpSqa_baseline/EgoVLP_streaming_v2evaluator_deepseek_1_2.json'
102
+
103
+ # eval_model = 'CLIP'
104
+ # eval_file = '/root/videollm-online/data/estp_dataset/estpSqa_baseline/CLIP_streaming_v2evaluator_deepseek_1_2.json'
105
+
106
+
107
+ parent_dir = os.path.dirname(eval_file)
108
+ eval_files = [os.path.join(parent_dir, f) for f in os.listdir(parent_dir) if f.startswith(eval_file.split('/')[-1])]
109
+ eval_result = []
110
+ for eval_file in eval_files:
111
+ eval_result += load_multiple_json(eval_file)
112
+
113
+
114
+ task2number = {
115
+ "Object Recognition": 0,
116
+ "Attribute Perception": 0,
117
+ "Text-Rich Understanding": 0,
118
+ "Object Localization": 0,
119
+ "Object State Change Recognition": 0,
120
+ "Ego Object Localization": 0,
121
+ "Ego Object State Change Recognition": 0,
122
+ "Action Recognition": 0,
123
+ "Object Function": 0,
124
+ "Information Function": 0,
125
+ "Action Reasoning": 0,
126
+ "Task Understanding": 0,
127
+
128
+ }
129
+
130
+ task2score = {
131
+ "Object Recognition": 0,
132
+ "Attribute Perception": 0,
133
+ "Text-Rich Understanding": 0,
134
+ "Object Localization": 0,
135
+ "Object State Change Recognition": 0,
136
+ "Ego Object Localization": 0,
137
+ "Ego Object State Change Recognition": 0,
138
+ "Action Recognition": 0,
139
+ "Object Function": 0,
140
+ "Information Function": 0,
141
+ "Action Reasoning": 0,
142
+ "Task Understanding": 0,
143
+
144
+
145
+ }
146
+
147
+ task2recall = {
148
+ "Object Recognition": 0,
149
+ "Attribute Perception": 0,
150
+ "Text-Rich Understanding": 0,
151
+ "Object Localization": 0,
152
+ "Object State Change Recognition": 0,
153
+ "Ego Object Localization": 0,
154
+ "Ego Object State Change Recognition": 0,
155
+ "Action Recognition": 0,
156
+ "Object Function": 0,
157
+ "Information Function": 0,
158
+ "Action Reasoning": 0,
159
+ "Task Understanding": 0,
160
+
161
+
162
+ }
163
+
164
+ task2recall_score = {
165
+ "Object Recognition": 0,
166
+ "Attribute Perception": 0,
167
+ "Text-Rich Understanding": 0,
168
+ "Object Localization": 0,
169
+ "Object State Change Recognition": 0,
170
+ "Ego Object Localization": 0,
171
+ "Ego Object State Change Recognition": 0,
172
+ "Action Recognition": 0,
173
+ "Object Function": 0,
174
+ "Information Function": 0,
175
+ "Action Reasoning": 0,
176
+ "Task Understanding": 0,
177
+
178
+
179
+ }
180
+
181
+ task2nonrecall_pred = {
182
+ "Object Recognition": 0,
183
+ "Attribute Perception": 0,
184
+ "Text-Rich Understanding": 0,
185
+ "Object Localization": 0,
186
+ "Object State Change Recognition": 0,
187
+ "Ego Object Localization": 0,
188
+ "Ego Object State Change Recognition": 0,
189
+ "Action Recognition": 0,
190
+ "Object Function": 0,
191
+ "Information Function": 0,
192
+ "Action Reasoning": 0,
193
+ "Task Understanding": 0,
194
+
195
+ }
196
+
197
+ task2precision = {
198
+ "Object Recognition": 0,
199
+ "Attribute Perception": 0,
200
+ "Text-Rich Understanding": 0,
201
+ "Object Localization": 0,
202
+ "Object State Change Recognition": 0,
203
+ "Ego Object Localization": 0,
204
+ "Ego Object State Change Recognition": 0,
205
+ "Action Recognition": 0,
206
+ "Object Function": 0,
207
+ "Information Function": 0,
208
+ "Action Reasoning": 0,
209
+ "Task Understanding": 0,
210
+ }
211
+
212
+ def scoreMean(score):
213
+ score = score.max(axis=0)
214
+ return score.mean()
215
+
216
+
217
+ def validScoreMean(score):
218
+ # FP
219
+ nonrecall_pred = (score.max(axis=0) == 0).sum()
220
+
221
+ # valid_score
222
+ valid_score = np.zeros(score.shape[0])
223
+ valid_recall = np.zeros(score.shape[0])
224
+ valid_recall_score = np.zeros(score.shape[0])
225
+ for i,s in enumerate(score):
226
+ valid_s = s[s > 0]
227
+ if len(valid_s) > 0:
228
+ valid_score[i] = valid_s.sum() / (len(valid_s) + (nonrecall_pred / score.shape[0]))
229
+ valid_score[i] = valid_s.sum() / (valid_s.sum() + (nonrecall_pred / score.shape[0]))
230
+ # valid_score[i] = valid_s.max() / (1 + (nonrecall_pred / score.shape[0]))
231
+ # valid_score[i] = valid_s.max() * len(valid_s) / (len(valid_s) + (nonrecall_pred / score.shape[0]))
232
+
233
+ valid_recall[i] = 1
234
+ valid_recall_score[i] = valid_s.sum() / len(valid_s)
235
+ return valid_score.mean(), valid_recall.mean(), valid_recall_score.mean(), nonrecall_pred
236
+
237
+ BETA = 1
238
+ def validScoreF1(score):
239
+
240
+ if score.shape[1] == 0:
241
+ return 0, 0, 0, 0, 0
242
+ # FP
243
+ FP = (score.max(axis=0) == 0).sum()
244
+ # TP
245
+ TP = 0
246
+
247
+ # valid_score
248
+ valid_score = np.zeros(score.shape[0])
249
+ valid_recall = np.zeros(score.shape[0])
250
+ valid_recall_score = np.zeros(score.shape[0])
251
+ for i,s in enumerate(score):
252
+ valid_s = s[s > 0]
253
+ if len(valid_s) > 0:
254
+ # four type compute text-time precision
255
+ valid_score[i] = valid_s.sum() / len(valid_s)
256
+ # valid_score[i] = valid_s.max()
257
+ # valid_score[i] = valid_s.sum()
258
+
259
+ valid_recall[i] = 1
260
+ valid_recall_score[i] = valid_s.sum() / len(valid_s)
261
+ # valid_recall_score[i] = valid_s.max()
262
+ # valid_score[i] = valid_s.sum()
263
+
264
+ TP = valid_score.sum()
265
+ precision = TP / (TP + FP)
266
+ recall = valid_recall_score.mean()
267
+ if precision == 0 or recall == 0:
268
+ F1 = 0
269
+ else:
270
+ F1 = (1 + BETA**2) * precision * recall / ((BETA**2 * precision) + recall)
271
+
272
+ F1 = 2*TP / (2*TP + FP + score.shape[0] - valid_recall.sum())
273
+ return F1, valid_recall.mean(), valid_recall_score.mean(), FP, precision
274
+
275
+ def topkValidScoreMean(score, k=10):
276
+ score = score.max(axis=1)
277
+ score = score[:k]
278
+ valid_score = score[score > 0]
279
+ return valid_score.mean()
280
+
281
+ total_fps = 0
282
+ total_kv_cache_size = 0
283
+ total_response_number = 0
284
+ total_answer_number = 0
285
+ total_precision = 0
286
+ high_performance_cases = []
287
+
288
+ for ll in eval_result:
289
+ if eval_model in ll.keys():
290
+ total_answer_number += len(ll['conversation'])
291
+ this_turn_response_number = 0
292
+ for response in ll[eval_model]:
293
+ if response['role'].lower() == 'assistant':
294
+ if 'fps' in response:
295
+ total_fps+=response['fps']
296
+ total_response_number += 1
297
+ this_turn_response_number += 1
298
+ if 'kv_cache_size' in response:
299
+ total_kv_cache_size += response['kv_cache_size']
300
+ task2number[ll['Task Type'].strip()] += 1
301
+ # breakpoint()
302
+ text_score = np.array(ll['evaluator_output_text']) / 10
303
+ reponse_score = np.array(ll['evaluator_output_reponse']) / 10
304
+ # if ll['Task Type'] == 'Ego Object State Change Recognition':
305
+ if args.eval_mode == 'all' and eval_model not in ['EgoVLP', 'CLIP', 'Lavila']:
306
+ score = (text_score+reponse_score)
307
+ elif args.eval_mode == 'text':
308
+ score = text_score
309
+ elif args.eval_mode == 'response' or eval_model in ['EgoVLP', 'CLIP', 'Lavila']:
310
+ score = reponse_score
311
+ score_mean, recall_mean, recall_score_mean, nonrecall_pred, precision = validScoreF1(score)
312
+
313
+ # 保存性能较高且gt多于一个的case
314
+ if (score_mean > 0.5 and ll['Task Type'].strip() not in ['Task Understanding', 'Action Reasoning']) or score_mean > 0.7:
315
+ if score.shape[0] / score.shape[1] > 0.5 and score.shape[0] > 1:
316
+ high_performance_cases.append({
317
+ 'case_id': ll.get('id', ''),
318
+ 'task_type': ll['Task Type'].strip(),
319
+ 'score_mean': float(score_mean),
320
+ 'gt_count': int(np.sum(score > 0)),
321
+ 'data': ll
322
+ })
323
+
324
+ task2score[ll['Task Type'].strip()] += score_mean
325
+ task2recall[ll['Task Type'].strip()] += recall_mean
326
+ task2recall_score[ll['Task Type'].strip()] += recall_score_mean
327
+ task2nonrecall_pred[ll['Task Type'].strip()] += nonrecall_pred
328
+ task2precision[ll['Task Type'].strip()] += precision
329
+ total_precision += nonrecall_pred / this_turn_response_number if this_turn_response_number > 0 else 0
330
+
331
+ # 将高性能案例保存到JSON文件
332
+ if high_performance_cases:
333
+ output_file = os.path.join(os.path.dirname(eval_file), f'high_performance_cases_{eval_model}.json')
334
+ with open(output_file, 'w', encoding='utf-8') as f:
335
+ json.dump(high_performance_cases, f, ensure_ascii=False, indent=2)
336
+ print(f"Saved {len(high_performance_cases)} high performance cases to {output_file}")
337
+
338
+
339
+
340
+ for k,v in task2score.items():
341
+ if task2number[k] == 0:
342
+ task2score[k] = 0
343
+ task2recall[k] = 0
344
+ task2recall_score[k] = 0
345
+ task2nonrecall_pred[k] = 0
346
+ task2precision[k] = 0
347
+ else:
348
+ task2score[k] = v / task2number[k] * 100
349
+ task2recall[k] = task2recall[k] / task2number[k] * 100
350
+ task2recall_score[k] = task2recall_score[k] / task2number[k] * 100
351
+ # task2nonrecall_pred[k] = task2nonrecall_pred[k] / task2number[k]
352
+ task2precision[k] = task2precision[k] / task2number[k] * 100
353
+
354
+ # print(json.dumps(task2number, indent=4))
355
+ # print(json.dumps({k: round(v, 2) for k,v in task2score.items()}, indent=4))
356
+ # print(json.dumps(task2recall, indent=4))
357
+ # print(json.dumps(task2recall_score, indent=4))
358
+ # print(json.dumps(task2nonrecall_pred, indent=4))
359
+
360
+ print("Total question number: ", sum(task2number.values()))
361
+
362
+ print(f"Average Score: {sum(task2score.values())/len(task2number.values())}")
363
+
364
+ # Calculate average for first 8 task types
365
+ first_8_tasks = list(task2number.keys())[:8]
366
+ first_8_scores = [task2score[task] for task in first_8_tasks]
367
+ first_8_valid_scores = [score for score in first_8_scores if score > 0]
368
+ first_8_recall_scores = [task2recall[task] for task in first_8_tasks]
369
+ try:
370
+ print(f"Average Score (First 8 Tasks): {sum(first_8_valid_scores)/len(first_8_valid_scores):.2f}")
371
+ except:
372
+ print(f"Average Score (First 8 Tasks): 0.0")
373
+
374
+ # Calculate average for last 4 task types
375
+ last_4_tasks = list(task2number.keys())[-4:]
376
+ last_4_scores = [task2score[task] for task in last_4_tasks]
377
+ last_4_valid_scores = [score for score in last_4_scores if score > 0]
378
+ last_4_recall_scores = [task2recall[task] for task in last_4_tasks]
379
+ try:
380
+ print(f"Average Score (Last 4 Tasks): {sum(last_4_valid_scores)/len(last_4_valid_scores):.2f}")
381
+ except:
382
+ print(f"Average Score (Last 4 Tasks): 0.0")
383
+
384
+
385
+ print(f"Average Recall: {sum(task2recall.values())/len(task2number.values())}")
386
+ print(f"Average Recall Score: {sum(task2recall_score.values())/len(task2number.values())}")
387
+ print(f"Average Precision: {sum(task2precision.values())/len(task2number.values())}")
388
+
389
+
390
+ print(f"Total Question Number: {sum(task2number.values())}")
391
+ print(f"Average FPS: {total_fps/total_response_number}")
392
+ print(f"Average KV Cache: {total_kv_cache_size/total_response_number}")
393
+ print(f"Average Non-Recall Pred: {sum(task2nonrecall_pred.values())/sum(task2number.values())}")
394
+ print(f"Average Response Number: {total_response_number/sum(task2number.values())}")
395
+ print(f"Average Precision: {total_precision/sum(task2number.values()) * 100}")
396
+
397
+ print(f"[{sum(task2recall.values())/len(task2number.values()):.1f}, {sum(task2precision.values())/len(task2number.values()):.1f}]")
398
+
399
+ def generate_latex_table_row():
400
+ # Generate LaTeX format table row
401
+ print("\n# LaTeX format table row")
402
+ print(f"& {eval_model} ", end="")
403
+
404
+ # First 8 tasks
405
+ for task in list(task2number.keys())[:8]:
406
+ print(f"& {task2score[task]:.1f} ", end="")
407
+
408
+ # Average of first 8 tasks
409
+ print(f"& {sum(first_8_valid_scores)/len(first_8_valid_scores):.1f} ", end="")
410
+
411
+ # Last 4 tasks
412
+ for task in list(task2number.keys())[-4:]:
413
+ print(f"& {task2score[task]:.1f} ", end="")
414
+
415
+ # Average of last 4 tasks
416
+ print(f"& {sum(last_4_valid_scores)/len(last_4_valid_scores):.1f} ", end="")
417
+
418
+ # Overall average
419
+ # print(f"& {sum(task2score.values())/len(task2number.values()):.1f} \\\\")
420
+
421
+ generate_latex_table_row()
422
+
423
+
424
+ # def generate_latex_table_row():
425
+ # # Generate LaTeX format table row
426
+ # print("\n# LaTeX format table row")
427
+ # print(f"& {eval_model} ", end="")
428
+
429
+ # # First 8 tasks
430
+ # for task in list(task2number.keys())[:8]:
431
+ # print(f"& {task2recall[task]:.1f} ", end="")
432
+
433
+ # # Average of first 8 tasks
434
+ # print(f"& {sum(first_8_recall_scores)/len(first_8_recall_scores):.1f} ", end="")
435
+
436
+ # # Last 4 tasks
437
+ # for task in list(task2number.keys())[-4:]:
438
+ # print(f"& {task2recall[task]:.1f} ", end="")
439
+
440
+ # # Average of last 4 tasks
441
+ # print(f"& {sum(last_4_recall_scores)/len(last_4_recall_scores):.1f} ", end="")
442
+
443
+ # # Overall average
444
+ # # print(f"& {sum(task2score.values())/len(task2number.values()):.1f} \\\\")
445
+
446
+ # generate_latex_table_row()
ESTP-Bench/estp_dataset/benchmark/eval_singleQA.sh ADDED
@@ -0,0 +1,387 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ######################################################## passive inference ########################################################
2
+ export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7
3
+ python /root/videollm-online/data/estp_dataset/benchmark/evalate_singleQA.py \
4
+ --pred_file /root/videollm-online/data/estp_dataset/result/estp_bench_sq_MiniCPMV_passive.json \
5
+ --eval_model MiniCPMV \
6
+ --concat True \
7
+ --anticipation 1 \
8
+ --latency 2 \
9
+ --evaluator_llm deepseek \
10
+ --master_port 2923 \
11
+
12
+ export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7
13
+ python /root/videollm-online/data/estp_dataset/benchmark/evalate_singleQA.py \
14
+ --pred_file /root/videollm-online/data/estp_dataset/estpCqa_baseline/MiniCPMV_passive_v2.json \
15
+ --eval_model MiniCPMV \
16
+ --concat True \
17
+ --anticipation 1 \
18
+ --latency 2 \
19
+ --evaluator_llm deepseek \
20
+ --qa_type MultiQA \
21
+ --master_port 2923 \
22
+
23
+ export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7
24
+ python /root/videollm-online/data/estp_dataset/benchmark/evalate_singleQA.py \
25
+ --pred_file /root/videollm-online/data/estp_dataset/estpSqa_baseline/Qwen2VL_passive.json \
26
+ --eval_model Qwen2VL \
27
+ --concat True \
28
+ --anticipation 1 \
29
+ --latency 2 \
30
+ --evaluator_llm deepseek \
31
+ --master_port 2918 \
32
+
33
+ export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7
34
+ python /root/videollm-online/data/estp_dataset/benchmark/evalate_singleQA.py \
35
+ --pred_file /root/videollm-online/data/estp_dataset/estpCqa_baseline/Qwen2VL_passive_v2.json \
36
+ --eval_model Qwen2VL \
37
+ --concat True \
38
+ --anticipation 1 \
39
+ --latency 2 \
40
+ --evaluator_llm deepseek \
41
+ --qa_type MultiQA \
42
+ --master_port 2918 \
43
+
44
+
45
+ export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7
46
+ python /root/videollm-online/data/estp_dataset/benchmark/evalate_singleQA.py \
47
+ --pred_file /root/videollm-online/data/estp_dataset/estpSqa_baseline/LLaVAOneVision_passive.json \
48
+ --eval_model LLaVAOneVision \
49
+ --anticipation 1 \
50
+ --latency 2 \
51
+ --evaluator_llm deepseek \
52
+
53
+ conda activate videollm
54
+ export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7
55
+ python /root/videollm-online/data/estp_dataset/benchmark/evalate_singleQA.py \
56
+ --pred_file /root/videollm-online/data/estp_dataset/estpCqa_baseline/LLaVAOneVision_passive_v2.json \
57
+ --eval_model LLaVAOneVision \
58
+ --anticipation 1 \
59
+ --latency 2 \
60
+ --qa_type MultiQA \
61
+ --evaluator_llm deepseek \
62
+ --master_port 2298 \
63
+
64
+ export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7
65
+ python /root/videollm-online/data/estp_dataset/benchmark/evalate_singleQA.py \
66
+ --pred_file /root/videollm-online/data/estp_dataset/estpSqa_baseline/LLaVANextVideo7B_passive.json \
67
+ --eval_model LLaVANextVideo7B \
68
+ --anticipation 1 \
69
+ --latency 2 \
70
+ --evaluator_llm deepseek \
71
+ --master_port 3980 \
72
+
73
+ conda activate videollm
74
+ export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7
75
+ python /root/videollm-online/data/estp_dataset/benchmark/evalate_singleQA.py \
76
+ --pred_file /root/videollm-online/data/estp_dataset/estpCqa_baseline/LLaVANextVideo7B_passive_v2.json \
77
+ --eval_model LLaVANextVideo7B \
78
+ --anticipation 1 \
79
+ --latency 2 \
80
+ --qa_type MultiQA \
81
+ --evaluator_llm deepseek \
82
+ --master_port 3980 \
83
+
84
+ export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7
85
+ python /root/videollm-online/data/estp_dataset/benchmark/evalate_singleQA.py \
86
+ --pred_file /root/videollm-online/data/estp_dataset/estpSqa_baseline/InternVLV28_passive.json \
87
+ --eval_model InternVLV28 \
88
+ --anticipation 1 \
89
+ --latency 2 \
90
+ --evaluator_llm deepseek \
91
+ --master_port 3989 \
92
+
93
+
94
+ export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7
95
+ python /root/videollm-online/data/estp_dataset/benchmark/evalate_singleQA.py \
96
+ --pred_file /root/videollm-online/data/estp_dataset/estpCqa_baseline/InternVLV28_passive_v2.json \
97
+ --eval_model InternVLV28 \
98
+ --anticipation 1 \
99
+ --latency 2 \
100
+ --qa_type MultiQA \
101
+ --evaluator_llm deepseek \
102
+ --master_port 3989 \
103
+ ######################################################## turn ask on 5 cases ########################################################
104
+
105
+ export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7
106
+ python /root/videollm-online/data/estp_dataset/benchmark/evalate_singleQA.py \
107
+ --pred_file /root/videollm-online/data/estp_dataset/estpSqa_baseline/MiniCPMV_fbf_singleQA_0.175_v2.json \
108
+ --eval_model MiniCPMV \
109
+ --concat True \
110
+ --anticipation 1 \
111
+ --latency 2 \
112
+ --evaluator_llm deepseek \
113
+ --master_port 2951 \
114
+
115
+ export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7
116
+ python /root/videollm-online/data/estp_dataset/benchmark/evalate_singleQA.py \
117
+ --pred_file /root/videollm-online/data/estp_dataset/estpCqa_baseline/MiniCPMV_fbf_0.175.json \
118
+ --eval_model MiniCPMV \
119
+ --concat True \
120
+ --anticipation 1 \
121
+ --latency 2 \
122
+ --qa_type MultiQA \
123
+ --evaluator_llm deepseek \
124
+ --master_port 3951 \
125
+
126
+ export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7
127
+ python /root/videollm-online/data/estp_dataset/benchmark/evalate_singleQA.py \
128
+ --pred_file /root/videollm-online/data/estp_dataset/estpSqa_baseline/Qwen2VL_fbf_singleQA_0.175.json \
129
+ --eval_model Qwen2VL \
130
+ --concat True \
131
+ --anticipation 1 \
132
+ --latency 2 \
133
+ --evaluator_llm deepseek \
134
+ --master_port 2951 \
135
+
136
+ export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7
137
+ python /root/videollm-online/data/estp_dataset/benchmark/evalate_singleQA.py \
138
+ --pred_file /root/videollm-online/data/estp_dataset/estpCqa_baseline/Qwen2VL_fbf_0.175.json \
139
+ --eval_model Qwen2VL \
140
+ --concat True \
141
+ --anticipation 1 \
142
+ --latency 2 \
143
+ --qa_type MultiQA \
144
+ --evaluator_llm deepseek \
145
+ --master_port 2151 \
146
+
147
+ export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7
148
+ python /root/videollm-online/data/estp_dataset/benchmark/evalate_singleQA.py \
149
+ --pred_file /root/videollm-online/data/estp_dataset/estpSqa_baseline/LLaVAOneVision_fbf_singleQA_0.175.json \
150
+ --eval_model LLaVAOneVision \
151
+ --concat True \
152
+ --anticipation 1 \
153
+ --latency 2 \
154
+ --evaluator_llm deepseek \
155
+ --master_port 2951 \
156
+
157
+ conda activate videollm
158
+ export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7
159
+ python /root/videollm-online/data/estp_dataset/benchmark/evalate_singleQA.py \
160
+ --pred_file /root/videollm-online/data/estp_dataset/estpCqa_baseline/LLaVAOneVision_fbf_0.175.json \
161
+ --eval_model LLaVAOneVision \
162
+ --concat True \
163
+ --anticipation 1 \
164
+ --latency 2 \
165
+ --qa_type MultiQA \
166
+ --evaluator_llm deepseek \
167
+ --master_port 2951 \
168
+
169
+ export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7
170
+ python /root/videollm-online/data/estp_dataset/benchmark/evalate_singleQA.py \
171
+ --pred_file /root/videollm-online/data/estp_dataset/estpSqa_baseline/InternVLV28_fbf_0.175.json \
172
+ --eval_model InternVLV28 \
173
+ --concat True \
174
+ --anticipation 1 \
175
+ --latency 2 \
176
+ --evaluator_llm deepseek \
177
+ --master_port 1951 \
178
+
179
+
180
+ export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7
181
+ python /root/videollm-online/data/estp_dataset/benchmark/evalate_singleQA.py \
182
+ --pred_file /root/videollm-online/data/estp_dataset/estpCqa_baseline/InternVLV28_fbf_0.175.json \
183
+ --eval_model InternVLV28 \
184
+ --concat True \
185
+ --anticipation 1 \
186
+ --latency 2 \
187
+ --qa_type MultiQA \
188
+ --evaluator_llm deepseek \
189
+ --master_port 2951 \
190
+
191
+ export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7
192
+ python /root/videollm-online/data/estp_dataset/benchmark/evalate_singleQA.py \
193
+ --pred_file /root/videollm-online/data/estp_dataset/estpSqa_baseline/LLaVANextVideo7B_fbf_singleQA_0.175.json \
194
+ --eval_model LLaVANextVideo7B \
195
+ --concat True \
196
+ --anticipation 1 \
197
+ --latency 2 \
198
+ --evaluator_llm deepseek \
199
+ --master_port 1921 \
200
+
201
+ conda activate videollm
202
+ export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7
203
+ python /root/videollm-online/data/estp_dataset/benchmark/evalate_singleQA.py \
204
+ --pred_file /root/videollm-online/data/estp_dataset/estpCqa_baseline/LLaVANextVideo7B_fbf_0.175.json \
205
+ --eval_model LLaVANextVideo7B \
206
+ --concat True \
207
+ --anticipation 1 \
208
+ --latency 2 \
209
+ --qa_type MultiQA \
210
+ --evaluator_llm deepseek \
211
+ --master_port 3951 \
212
+
213
+ ######################################################## streaming inference ########################################################
214
+ export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7
215
+ python /root/videollm-online/data/estp_dataset/benchmark/evalate_singleQA.py \
216
+ --pred_file /root/videollm-online/data/estp_dataset/estpSqa_baseline/EgoVLP_streaming_v2.json \
217
+ --eval_model EgoVLP \
218
+ --anticipation 1 \
219
+ --latency 2 \
220
+ --evaluator_llm deepseek \
221
+ --master_port 2851 \
222
+
223
+ export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7
224
+ python /root/videollm-online/data/estp_dataset/benchmark/evalate_singleQA.py \
225
+ --pred_file /root/videollm-online/data/estp_dataset/estpCqa_baseline/EgoVLP_streaming_v2.json \
226
+ --eval_model EgoVLP \
227
+ --anticipation 1 \
228
+ --latency 2 \
229
+ --qa_type MultiQA \
230
+ --evaluator_llm deepseek \
231
+ --master_port 2951 \
232
+
233
+ export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7
234
+ python /root/videollm-online/data/estp_dataset/benchmark/evalate_singleQA.py \
235
+ --pred_file /root/videollm-online/data/estp_dataset/estpSqa_baseline/Lavila_streaming_v2.json \
236
+ --eval_model Lavila \
237
+ --anticipation 1 \
238
+ --latency 2 \
239
+ --evaluator_llm deepseek \
240
+
241
+ export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7
242
+ python /root/videollm-online/data/estp_dataset/benchmark/evalate_singleQA.py \
243
+ --pred_file /root/videollm-online/data/estp_dataset/estpCqa_baseline/Lavila_streaming_v2.json \
244
+ --eval_model Lavila \
245
+ --anticipation 1 \
246
+ --latency 2 \
247
+ --qa_type MultiQA \
248
+ --evaluator_llm deepseek \
249
+ --master_port 2951 \
250
+
251
+ export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7
252
+ python /root/videollm-online/data/estp_dataset/benchmark/evalate_singleQA.py \
253
+ --pred_file /root/videollm-online/data/estp_dataset/estpSqa_baseline/CLIP_streaming_v2.json \
254
+ --eval_model CLIP \
255
+ --anticipation 1 \
256
+ --latency 2 \
257
+ --evaluator_llm deepseek \
258
+ --master_port 2551 \
259
+
260
+ export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7
261
+ python /root/videollm-online/data/estp_dataset/benchmark/evalate_singleQA.py \
262
+ --pred_file /root/videollm-online/data/estp_dataset/estpCqa_baseline/CLIP_streaming_v2.json \
263
+ --eval_model CLIP \
264
+ --anticipation 1 \
265
+ --latency 2 \
266
+ --evaluator_llm deepseek \
267
+ --qa_type MultiQA \
268
+ --master_port 2651 \
269
+
270
+ ######################################################## online inference ########################################################
271
+
272
+ export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7
273
+ python /root/videollm-online/data/estp_dataset/benchmark/evalate_singleQA.py \
274
+ --pred_file /root/videollm-online/data/estp_dataset/estp_bench_sq_VideollmOnline0.8.json \
275
+ --eval_model VideollmOnline \
276
+ --concat True \
277
+ --anticipation 1 \
278
+ --latency 2 \
279
+ --evaluator_llm deepseek \
280
+
281
+ export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7
282
+ python /root/videollm-online/data/estp_dataset/benchmark/evalate_singleQA.py \
283
+ --pred_file /root/videollm-online/data/estp_dataset/estp_bench_sq_VideollmOnline0.9.json \
284
+ --eval_model VideollmOnline \
285
+ --concat True \
286
+ --anticipation 1 \
287
+ --latency 2 \
288
+ --evaluator_llm deepseek \
289
+ --master_port 3019
290
+
291
+ export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7
292
+ python /root/videollm-online/data/estp_dataset/benchmark/evalate_singleQA.py \
293
+ --pred_file /root/videollm-online/data/estp_dataset/estpCqa_baseline/VideollmOnline0.8.json \
294
+ --eval_model VideollmOnline \
295
+ --concat True \
296
+ --anticipation 1 \
297
+ --latency 2 \
298
+ --qa_type MultiQA \
299
+ --evaluator_llm deepseek \
300
+ --master_port 3019
301
+
302
+ export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7
303
+ python /root/videollm-online/data/estp_dataset/benchmark/evalate_singleQA.py \
304
+ --pred_file /root/videollm-online/data/estp_dataset/estpSqa_baseline/MMDuet.json \
305
+ --eval_model MMDuet \
306
+ --concat True \
307
+ --anticipation 1 \
308
+ --latency 2 \
309
+ --evaluator_llm deepseek \
310
+ --master_port 3019
311
+
312
+ conda activate videollm
313
+ export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7
314
+ python /root/videollm-online/data/estp_dataset/benchmark/evalate_singleQA.py \
315
+ --pred_file /root/videollm-online/data/estp_dataset/estpCqa_baseline/MMDuet.json \
316
+ --eval_model MMDuet \
317
+ --concat True \
318
+ --anticipation 1 \
319
+ --latency 2 \
320
+ --qa_type MultiQA \
321
+ --evaluator_llm deepseek \
322
+ --master_port 3019
323
+
324
+ ######################################################## ours inference ########################################################
325
+
326
+
327
+ # single QA
328
+ export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7
329
+ python /root/videollm-online/data/estp_dataset/benchmark/evalate_singleQA.py \
330
+ --pred_file /root/videollm-online/data/estp_dataset/estpSqa_ours/LivebaseStage2.json \
331
+ --eval_model EWO \
332
+ --concat True \
333
+ --anticipation 5 \
334
+ --latency 5 \
335
+ --evaluator_llm deepseek \
336
+ --master_port 2900 \
337
+
338
+ export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7
339
+ python /root/videollm-online/data/estp_dataset/benchmark/evalate_singleQA.py \
340
+ --pred_file /root/videollm-online/data/estp_dataset/estpSqa_ours/LivebaseStage3_high11_.json \
341
+ --eval_model EWO \
342
+ --concat True \
343
+ --anticipation 1 \
344
+ --latency 5 \
345
+ --evaluator_llm deepseek \
346
+ --master_port 2801 \
347
+
348
+
349
+ # subset 5 cases
350
+ export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7
351
+ python /root/videollm-online/data/estp_dataset/benchmark/evalate_singleQA.py \
352
+ --pred_file /root/videollm-online/data/estp_dataset/estpSqa_ours_5cases/LivebaseStage2.json \
353
+ --eval_model EWO \
354
+ --concat True \
355
+ --anticipation 5 \
356
+ --latency 5 \
357
+ --evaluator_llm deepseek \
358
+
359
+ export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7
360
+ python /root/videollm-online/data/estp_dataset/benchmark/evalate_singleQA.py \
361
+ --pred_file /root/videollm-online/data/estp_dataset/estpSqa_ours/LivebaseStage3_high0.31_11_low.json \
362
+ --eval_model EWO \
363
+ --concat True \
364
+ --anticipation 1 \
365
+ --latency 2 \
366
+ --evaluator_llm deepseek \
367
+
368
+
369
+ export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7
370
+ python /root/videollm-online/data/estp_dataset/benchmark/evalate_singleQA.py \
371
+ --pred_file /root/videollm-online/data/estp_dataset/estpSqa_ours_5cases/LivebaseStage3_high0.31_1_low.json \
372
+ --eval_model EWO \
373
+ --concat True \
374
+ --anticipation 1 \
375
+ --latency 2 \
376
+ --evaluator_llm deepseek \
377
+
378
+ # multi QA
379
+ export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7
380
+ python /root/videollm-online/data/estp_dataset/benchmark/evalate_singleQA.py \
381
+ --pred_file /root/videollm-online/data/estp_dataset/estpCqa_ours/LivebaseStage2_low.json \
382
+ --eval_model EWO \
383
+ --concat True \
384
+ --anticipation 1 \
385
+ --qa_type MultiQA \
386
+ --latency 2 \
387
+ --evaluator_llm deepseek \
ESTP-Bench/estp_dataset/benchmark/evalate_singleQA.py ADDED
@@ -0,0 +1,484 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os, sys, re, requests, random
2
+ from transformers import AutoModelForCausalLM, AutoTokenizer
3
+ import json
4
+ from tqdm import tqdm
5
+ import argparse
6
+ import torch
7
+ import numpy as np
8
+ import pandas as pd
9
+ import torch.distributed as dist
10
+ import torch.multiprocessing as mp
11
+ import openai
12
+
13
+ import math
14
+ def ceil_time_by_fps(time: float, fps: int, min_time: float, max_time: float):
15
+ return min(max(math.ceil(time * fps) / fps, min_time), max_time)
16
+
17
+ EVALUATOR_PROMPT = [
18
+ {"role": "system", "content": (
19
+ "You are an evaluator for a video question answering system. Your task is to rate the "
20
+ "correctness of the predicted answers against the ground truth answers. Use the following scale to assign a score:\n"
21
+ "- 5: Perfect match; the predicted answer is completely correct and contains all the relevant information.\n"
22
+ "- 4: Mostly correct; the predicted answer is largely accurate but may have minor omissions or slight inaccuracies.\n"
23
+ "- 3: Partially correct; the predicted answer has some correct information, but also contains significant inaccuracies or missing key points.\n"
24
+ "- 2: Slightly correct; the predicted answer has only a few correct elements, but most of the information is incorrect or irrelevant, or the predicted answer conflicts with the ground truth answer.\n"
25
+ "- 1: Incorrect; the predicted answer is entirely wrong or does not address the question at all.\n\n"
26
+ "Here are some examples to guide you:")
27
+ },
28
+ {"role": "user", "content": "Question: How can I achieve my goal, such as stir-frying the ingredients, step by step? Can you explain each stage?\nGround Truth Answer: Let\u2019s begin! First, add oil to the pan to prepare for cooking.\nPredicted Answer: Heat oil in the pan."},
29
+ {"role": "assistant", "content": "2"},
30
+
31
+ {"role": "user", "content": "Question: what is the category of the object I hold?\nGround Truth Answer: The object you hold is a computer mouse.\nPredicted Answer: It is a computer mouse."},
32
+ {"role": "assistant", "content": "5"},
33
+
34
+ {"role": "user", "content": "Question: Can you remind me how the towel changes position when I dip it in the water?\nGround Truth Answer: The towel is initially held in your right hand, and you dip it into a purple plastic basin filled with water. The towel becomes submerged, absorbing water, and is then lifted out, allowing excess water to drip off.\nPredicted Answer: The towel changes position when you dip it in the water."},
35
+ {"role": "assistant", "content": "3"},
36
+
37
+ {"role": "user", "content": "Question: Can you remind me when the state of the trunk changes? \nGround Truth Answer: The trunk is opened by the observer.\nPredicted Answer: The trunk is closed."},
38
+ {"role": "assistant", "content": "1"},
39
+
40
+ {"role": "user", "content": "Question: Where're the cars location?\nGround Truth Answer: The cars are one the road far beneath you.\nPredicted Answer: The cars are located at the base of the cliff."},
41
+ {"role": "assistant", "content": "3"},
42
+ ]
43
+
44
+ class CorrectnessEvaluator:
45
+ @torch.no_grad()
46
+ def __init__(self, llm_pretrained):
47
+ self.tokenizer = AutoTokenizer.from_pretrained(llm_pretrained)
48
+ self.model = AutoModelForCausalLM.from_pretrained(llm_pretrained, torch_dtype=torch.bfloat16, device_map='auto')
49
+ conversation = EVALUATOR_PROMPT
50
+
51
+ prompt_input = self.tokenizer.apply_chat_template(conversation, return_tensors='pt', return_dict=True).to(self.model.device)
52
+ outputs = self.model(**prompt_input, use_cache=True)
53
+ self.prompt_past_key_values = outputs.past_key_values
54
+ self.prompt_input_ids = prompt_input.input_ids
55
+
56
+ @torch.no_grad()
57
+ def evaluate(self, question, gold_answer, pred_answer):
58
+ conversation = [
59
+ {"role": "user", "content": f"Question: {question}\nGround Truth Answer: {gold_answer}\nPredicted Answer: {pred_answer}"},
60
+ {"role": "assistant", "content": ""}
61
+ ]
62
+ new_input_ids = self.tokenizer.apply_chat_template(conversation, return_tensors='pt').to(self.model.device)
63
+ first_eot_index = (new_input_ids == 128009).nonzero()[0, -1] # remove the system prompt before the user turn (i.e., the first turn) of llama tokenizer
64
+ new_input_ids = new_input_ids[:, first_eot_index + 1:-1] # -1 (the last token): '<|eot|>'
65
+
66
+ all_input_ids = torch.cat([self.prompt_input_ids, new_input_ids], dim=1)
67
+ generated_ids = self.model.generate(input_ids=all_input_ids, use_cache=True, max_new_tokens=32)
68
+ generated_ids = generated_ids[:, all_input_ids.size(1):]
69
+ decoded_text = self.tokenizer.decode(generated_ids[0], skip_special_tokens=True)
70
+ score = int(decoded_text[0]) if decoded_text[0] in '12345' else 1
71
+ return score
72
+
73
+ class GPTCorrectnessEvaluator:
74
+ def __init__(self, api_key):
75
+ self.client = openai.OpenAI(api_key=api_key)
76
+ self.conversation = EVALUATOR_PROMPT
77
+
78
+ def evaluate(self, question, gold_answer, pred_answer):
79
+ messages = self.conversation + [
80
+ {"role": "user", "content": f"Question: {question}\nGround Truth Answer: {gold_answer}\nPredicted Answer: {pred_answer}"}
81
+ ]
82
+
83
+ try:
84
+ response = self.client.chat.completions.create(
85
+ model="gpt-4", # 或者使用 "gpt-3.5-turbo"
86
+ messages=messages,
87
+ temperature=0,
88
+ max_tokens=1
89
+ )
90
+ score = int(response.choices[0].message.content[0])
91
+ return score if score in [1, 2, 3, 4, 5] else 1
92
+ except Exception as e:
93
+ print(f"Error in GPT evaluation: {e}")
94
+ return 1
95
+
96
+ class DeepSeekCorrectnessEvaluator:
97
+ def __init__(self, api_key):
98
+ api_key = ""
99
+ if api_key == "":
100
+ print("Please set the api key for DeepSeek")
101
+ exit()
102
+ self.client = openai.OpenAI(
103
+ api_key=api_key,
104
+ base_url="https://api.deepseek.com",
105
+ )
106
+ self.conversation = EVALUATOR_PROMPT
107
+
108
+ def evaluate(self, question, gold_answer, pred_answer):
109
+ messages = self.conversation + [
110
+ {"role": "user", "content": f"Question: {question}\nGround Truth Answer: {gold_answer}\nPredicted Answer: {pred_answer}"}
111
+ ]
112
+
113
+ try:
114
+ response = self.client.chat.completions.create(
115
+ model="deepseek-chat",
116
+ messages=messages,
117
+ )
118
+ score = int(response.choices[0].message.content[0])
119
+ return score if score in [1, 2, 3, 4, 5] else 1
120
+ except Exception as e:
121
+ print(f"Error in DeepSeek evaluation: {e}")
122
+ return 1
123
+
124
+ def list_user_query(conversation):
125
+ user_query = []
126
+ query_time = []
127
+ for i in range(len(conversation)):
128
+ if conversation[i]['role'].lower() == 'user':
129
+ user_query.append(conversation[i]['content'])
130
+ query_time.append(conversation[i]['time'])
131
+ # Sort user queries and query times by time
132
+ if len(user_query) > 1:
133
+ # Create pairs of (query, time) and sort by time
134
+ query_time_pairs = list(zip(user_query, query_time))
135
+ query_time_pairs.sort(key=lambda x: x[1])
136
+
137
+ # Unpack the sorted pairs back into separate lists
138
+ user_query = [pair[0] for pair in query_time_pairs]
139
+ query_time = [pair[1] for pair in query_time_pairs]
140
+
141
+ return user_query, query_time
142
+
143
+
144
+ def parser_model_output(eval_model, pred_output):
145
+ if eval_model in ['VideollmOnline', 'EWO']: # for context have time information, delete inference time
146
+ if eval_model not in pred_output:
147
+ return [], []
148
+ model_response_list = []
149
+
150
+ pattern = r'\(Video Time = [\d.]+s\)\s*(\w+):\s*(.*?)(?=\s*\(|$)'
151
+ preds = pred_output[eval_model]
152
+ for pred in preds:
153
+ if pred['role'].lower() not in ['assistant', 'user']:
154
+ continue
155
+ match = re.match(pattern, pred['content'])
156
+ if match:
157
+ role = match.group(1)
158
+ content = match.group(2)
159
+ model_response_list.append({'role': role.lower(), 'content': content, 'time': pred['time']})
160
+
161
+ if len(model_response_list) == 0:
162
+ return [], []
163
+ answers = [e for e in model_response_list if e['role'] == 'assistant']
164
+ pred_list = [e['content'] for e in answers]
165
+ pred_time_list = [e['time'] for e in answers]
166
+ else:
167
+ if eval_model not in pred_output:
168
+ return [], []
169
+ model_response_list = []
170
+ preds = pred_output[eval_model]
171
+ for pred in preds:
172
+ if pred['role'].lower() != 'assistant':
173
+ continue
174
+ model_response_list.append({'role': pred['role'].lower(), 'content': pred['content'], 'time': pred['time']})
175
+
176
+ if len(model_response_list) == 0:
177
+ return [], []
178
+ answers = [e for e in model_response_list if e['role'].lower() == 'assistant']
179
+ pred_list = [e['content'] for e in answers]
180
+ pred_time_list = [e['time'] for e in answers]
181
+
182
+ return pred_list, pred_time_list
183
+
184
+ def calculateResponseScore(gold_timespan, pred_time, task_type, anticipation, latency):
185
+ if task_type.strip() in ['Object Localization', 'Object Recognition','Ego Object Localization','Object Function','Attribute Perception',
186
+ 'Information Function','Text-Rich Understanding']:
187
+ return (1 - abs(pred_time - (gold_timespan[0])) / (gold_timespan[1] - gold_timespan[0] + latency + anticipation)) * 5
188
+ else:
189
+ return (1 - abs(pred_time - (gold_timespan[0]+gold_timespan[1])/2) / (gold_timespan[1] - gold_timespan[0] + latency + anticipation)) * 5
190
+
191
+ def evalSingleQA(args, f_out, answer_pred, evaluator):
192
+ for video_uid in tqdm(answer_pred.keys()):
193
+ for clip_uid in answer_pred[video_uid].keys():
194
+ for i,qa in enumerate(answer_pred[video_uid][clip_uid]):
195
+
196
+ qa['id'] = i
197
+ qa['video_uid'] = video_uid
198
+ qa['clip_uid'] = clip_uid
199
+
200
+ # parser pred output and gold output
201
+ pred_list, pred_time_list = parser_model_output(args.eval_model, qa)
202
+ gold_list = [e['content'] for e in qa['conversation'] if e['role'].lower() == 'assistant']
203
+ clip_end_time = qa['clip_end_time'] if 'clip_end_time' in qa.keys() else qa['end_time']
204
+ gold_timespan_list = [(ceil_time_by_fps(e['start_time'], 2, 0, clip_end_time),
205
+ ceil_time_by_fps(e['end_time'], 2, 0, clip_end_time)) for e in qa['conversation'] if e['role'].lower() == 'assistant']
206
+
207
+ if 'question' not in qa:
208
+ for e in qa['conversation']:
209
+ if e['role'].lower() == 'user':
210
+ question = e['content']
211
+ break
212
+ else:
213
+ question = qa['question']
214
+
215
+ # construct pred and answer map
216
+ pred_text_to_turn_i = dict()
217
+ for turn_i, text in enumerate(pred_list):
218
+ if text not in pred_text_to_turn_i:
219
+ pred_text_to_turn_i[text] = list()
220
+ pred_text_to_turn_i[text].append(turn_i)
221
+
222
+ gold_text_to_turn_i = dict()
223
+ for turn_i, text in enumerate(gold_list):
224
+ if text not in gold_text_to_turn_i:
225
+ gold_text_to_turn_i[text] = list()
226
+ gold_text_to_turn_i[text].append(turn_i)
227
+
228
+
229
+ score_matrix = np.zeros((len(gold_list), len(pred_list)))
230
+ response_score_matrix = np.zeros((len(gold_list), len(pred_list)))
231
+
232
+ for gold_content, gold_turn_ids in gold_text_to_turn_i.items():
233
+ for pred_content, pred_turn_ids in pred_text_to_turn_i.items():
234
+ # we only need to evaluate the pred answer that is in the gold span to for the in-span metric
235
+ gold_timespan = [gold_timespan_list[i] for i in gold_turn_ids]
236
+ pred_time = [pred_time_list[i] for i in pred_turn_ids]
237
+ # the pred answer with time -1 can pair with any other span
238
+ pred_time_in_gold_timespan_list = [(time == -1 or span[0]-args.anticipation <= time <= span[1]+args.latency) for time in pred_time for span in gold_timespan]
239
+ if not any(pred_time_in_gold_timespan_list):
240
+ continue
241
+
242
+ if args.eval_model in ['EgoVLP', 'CLIP', 'Lavila']:
243
+ score = 0
244
+ else:
245
+ score = evaluator.evaluate(question, gold_content, pred_content)
246
+
247
+ row_indices, col_indices = np.meshgrid(gold_turn_ids, pred_turn_ids)
248
+
249
+ for i, (row, col) in enumerate(zip(row_indices.flatten(), col_indices.flatten())):
250
+ if pred_time_in_gold_timespan_list[i]:
251
+ text_score = score
252
+ reponse_score = calculateResponseScore(gold_timespan_list[row], pred_time_list[col], qa['Task Type'], args.anticipation, args.latency)
253
+ score_matrix[row, col] = text_score
254
+ response_score_matrix[row, col] = reponse_score
255
+
256
+
257
+ qa['evaluator_output_text'] = score_matrix.tolist()
258
+ qa['evaluator_output_reponse'] = response_score_matrix.tolist()
259
+ with open(f_out, 'a') as f:
260
+ f.write(json.dumps(qa,indent=4) + '\n')
261
+
262
+
263
+ def evalMultiQA(args, f_out, answer_pred, evaluator):
264
+ for video_uid in tqdm(answer_pred.keys()):
265
+ for clip_uid in answer_pred[video_uid].keys():
266
+ for i,qa in enumerate(answer_pred[video_uid][clip_uid]):
267
+
268
+ qa['id'] = i
269
+ qa['video_uid'] = video_uid
270
+ qa['clip_uid'] = clip_uid
271
+
272
+ for conv in qa['conversation']:
273
+ if 'time' not in conv.keys():
274
+ conv['time'] = conv['start_time'] + (conv['end_time'] - conv['start_time']) / 2
275
+ qa['conversation'] = sorted(qa['conversation'], key=lambda x: x['time'])
276
+
277
+ questions, query_times = list_user_query(qa['conversation'])
278
+ start_time = qa['clip_start_time'] if 'clip_start_time' in qa.keys() else qa['start_time']
279
+ end_time = qa['clip_end_time'] if 'clip_end_time' in qa.keys() else qa['end_time']
280
+ max_time = qa['conversation'][-1]['end_time']
281
+
282
+ start_time = min([start_time]+query_times)
283
+ end_time = min([end_time,max_time])
284
+
285
+ start_times = []
286
+ for question, query_time in zip(questions, query_times):
287
+ start_times.append(query_time)
288
+ end_times = start_times[1:] + [end_time]
289
+ for question_idx, (question, query_time, start_time, end_time) in enumerate(zip(questions, query_times, start_times, end_times)):
290
+ # parser pred output and gold output
291
+ pred_list, pred_time_list = parser_model_output(args.eval_model, qa)
292
+
293
+ # HACK: add in-span pred
294
+ in_span_pred_list, in_span_pred_time_list = [], []
295
+ for pred, pred_time in zip(pred_list, pred_time_list):
296
+ if start_time <= pred_time <= end_time:
297
+ in_span_pred_list.append(pred)
298
+ in_span_pred_time_list.append(pred_time)
299
+ pred_list, pred_time_list = in_span_pred_list, in_span_pred_time_list
300
+
301
+ gold_list = [e['content'] for e in qa['conversation'] if e['role'].lower() == 'assistant']
302
+ clip_end_time = qa['clip_end_time'] if 'clip_end_time' in qa.keys() else qa['end_time']
303
+ gold_timespan_list = [(ceil_time_by_fps(e['start_time'], 2, 0, clip_end_time),
304
+ ceil_time_by_fps(e['end_time'], 2, 0, clip_end_time)) for e in qa['conversation'] if e['role'].lower() == 'assistant']
305
+
306
+
307
+ # HACK: add in-span gold
308
+ in_span_gold_list, in_span_gold_timespan_list = [], []
309
+ for gold, gold_time in zip(gold_list, gold_timespan_list):
310
+ if start_time <= gold_time[0] <= end_time and start_time <= gold_time[1] <= end_time:
311
+ in_span_gold_list.append(gold)
312
+ in_span_gold_timespan_list.append(gold_time)
313
+ gold_list, gold_timespan_list = in_span_gold_list, in_span_gold_timespan_list
314
+
315
+
316
+
317
+ # construct pred and answer map
318
+ pred_text_to_turn_i = dict()
319
+ for turn_i, text in enumerate(pred_list):
320
+ if text not in pred_text_to_turn_i:
321
+ pred_text_to_turn_i[text] = list()
322
+ pred_text_to_turn_i[text].append(turn_i)
323
+
324
+ gold_text_to_turn_i = dict()
325
+ for turn_i, text in enumerate(gold_list):
326
+ if text not in gold_text_to_turn_i:
327
+ gold_text_to_turn_i[text] = list()
328
+ gold_text_to_turn_i[text].append(turn_i)
329
+
330
+ score_matrix = np.zeros((len(gold_list), len(pred_list)))
331
+ response_score_matrix = np.zeros((len(gold_list), len(pred_list)))
332
+
333
+ for gold_content, gold_turn_ids in gold_text_to_turn_i.items():
334
+ for pred_content, pred_turn_ids in pred_text_to_turn_i.items():
335
+ # we only need to evaluate the pred answer that is in the gold span to for the in-span metric
336
+ gold_timespan = [gold_timespan_list[i] for i in gold_turn_ids]
337
+ pred_time = [pred_time_list[i] for i in pred_turn_ids]
338
+ # the pred answer with time -1 can pair with any other span
339
+ pred_time_in_gold_timespan_list = [(time == -1 or span[0]-args.anticipation <= time <= span[1]+args.latency) for time in pred_time for span in gold_timespan]
340
+ if not any(pred_time_in_gold_timespan_list):
341
+ continue
342
+
343
+ if args.eval_model in ['EgoVLP', 'CLIP', 'Lavila']:
344
+ score = 0
345
+ else:
346
+ score = evaluator.evaluate(question, gold_content, pred_content)
347
+
348
+ row_indices, col_indices = np.meshgrid(gold_turn_ids, pred_turn_ids)
349
+
350
+ for i, (row, col) in enumerate(zip(row_indices.flatten(), col_indices.flatten())):
351
+ if pred_time_in_gold_timespan_list[i]:
352
+ text_score = score
353
+ reponse_score = calculateResponseScore(gold_timespan_list[row], pred_time_list[col], qa['Task Type'], args.anticipation, args.latency)
354
+ score_matrix[row, col] = text_score
355
+ response_score_matrix[row, col] = reponse_score
356
+
357
+
358
+ qa['evaluator_output_text'] = score_matrix.tolist()
359
+ qa['evaluator_output_reponse'] = response_score_matrix.tolist()
360
+ with open(f_out, 'a') as f:
361
+ f.write(json.dumps(qa,indent=4) + '\n')
362
+
363
+
364
+ def calQuestionTotal(answer_pred):
365
+ c = 0
366
+ for k,v in answer_pred.items():
367
+ for kk,vv in v.items():
368
+ for qa in vv:
369
+ if args.eval_model in qa:
370
+ c+=1
371
+ print('total question:', c)
372
+
373
+ def main_worker(rank, world_size, args):
374
+ # 设置当前进程使用的 GPU
375
+ torch.cuda.set_device(rank)
376
+ args.device = f"cuda:{rank}"
377
+
378
+ # 初始化分布式环境(此处采用 NCCL 后端,适用于 GPU 之间的通信)
379
+ dist.init_process_group(backend="nccl", init_method="env://", rank=rank, world_size=world_size)
380
+
381
+ # 加载数据
382
+ if args.concat:
383
+ parent_dir = os.path.dirname(args.pred_file)
384
+ eval_files = [os.path.join(parent_dir, f) for f in os.listdir(parent_dir) if f.startswith(args.pred_file.split('/')[-1])]
385
+ data = {}
386
+ for eval_file in eval_files:
387
+ data.update(json.load(open(eval_file)))
388
+ else:
389
+ print(args.pred_file)
390
+ data = json.load(open(args.pred_file))
391
+
392
+ # 将数据分片(此处假设 data 为列表;如果是字典,需要根据实际情况修改分片方式)
393
+ if isinstance(data, list):
394
+ local_data = data[rank::world_size]
395
+ elif isinstance(data, dict):
396
+ keys = list(data.keys())
397
+ local_keys = keys[rank::world_size]
398
+ local_data = {k: data[k] for k in local_keys}
399
+ else:
400
+ local_data = data # 未知结构则不分片
401
+
402
+ output_file = args.pred_file.replace('.json', f'evaluator_{args.evaluator_llm}_{args.anticipation}_{args.latency}.json')
403
+ local_output_file = f"{output_file}.part{rank}"
404
+ # Create an empty local output file
405
+ with open(local_output_file, 'w') as f:
406
+ f.write('') # Write empty string to create the file
407
+ f_out = local_output_file
408
+ print(f_out)
409
+
410
+ # init
411
+ if args.evaluator_llm == 'gpt':
412
+ evaluator = GPTCorrectnessEvaluator('sk-proj-1234567890')
413
+ elif args.evaluator_llm == 'llama':
414
+ evaluator = CorrectnessEvaluator('meta-llama/Meta-Llama-3-8B-Instruct')
415
+ elif args.evaluator_llm == 'deepseek':
416
+ evaluator = DeepSeekCorrectnessEvaluator('sk-43a08cfb3ae64b6288ee67db8009c8ca')
417
+ else:
418
+ raise ValueError(f'evaluator_llm {args.evaluator_llm} not supported')
419
+
420
+ print('start eval')
421
+ # eval
422
+ if args.qa_type == 'SingleQA':
423
+ evalSingleQA(args, f_out, local_data, evaluator)
424
+ elif args.qa_type == 'MultiQA':
425
+ evalMultiQA(args, f_out, local_data, evaluator)
426
+ else:
427
+ raise ValueError(f'qa_type {args.qa_type} not supported')
428
+
429
+
430
+ def main(args):
431
+ print(args.pred_file, args.concat)
432
+ if args.concat:
433
+ parent_dir = os.path.dirname(args.pred_file)
434
+ eval_files = [os.path.join(parent_dir, f) for f in os.listdir(parent_dir) if f.startswith(args.pred_file.split('/')[-1])]
435
+ data = {}
436
+ for eval_file in eval_files:
437
+ data.update(json.load(open(eval_file)))
438
+ else:
439
+ data = json.load(open(args.pred_file))
440
+ calQuestionTotal(data)
441
+
442
+ if torch.cuda.device_count() < 2:
443
+ # init
444
+ if args.evaluator_llm == 'gpt':
445
+ evaluator = GPTCorrectnessEvaluator('sk-proj-1234567890')
446
+ elif args.evaluator_llm == 'llama':
447
+ evaluator = CorrectnessEvaluator('meta-llama/Meta-Llama-3-8B-Instruct')
448
+ elif args.evaluator_llm == 'deepseek':
449
+ evaluator = DeepSeekCorrectnessEvaluator('sk-43a08cfb3ae64b6288ee67db8009c8ca')
450
+ else:
451
+ raise ValueError(f'evaluator_llm {args.evaluator_llm} not supported')
452
+
453
+ output_file = args.pred_file.replace('.json', f'_evaluator_{args.evaluator_llm}_{args.anticipation}_{args.latency}.json')
454
+ with open(output_file, 'w') as f:
455
+ f.write('')
456
+ f_out = output_file
457
+
458
+ # eval
459
+ if args.qa_type == 'SingleQA':
460
+ evalSingleQA(args, f_out, data, evaluator)
461
+ elif args.qa_type == 'MultiQA':
462
+ evalMultiQA(args, f_out, data, evaluator)
463
+ else:
464
+ raise ValueError(f'qa_type {args.qa_type} not supported')
465
+
466
+ else:
467
+ os.environ['MASTER_ADDR'] = '127.0.0.1'
468
+ os.environ['MASTER_PORT'] = str(args.master_port)
469
+ world_size = torch.cuda.device_count()
470
+ mp.spawn(main_worker, args=(world_size, args), nprocs=world_size, join=True)
471
+
472
+
473
+ if __name__ == '__main__':
474
+ parser = argparse.ArgumentParser()
475
+ parser.add_argument('--evaluator_llm', type=str, default='gpt-4')
476
+ parser.add_argument('--eval_model', type=str, default='VideollmOnline')
477
+ parser.add_argument('--pred_file', type=str, default='/root/videollm-online/data/estp_dataset/tmp_predict_VideollmOnline_v2.json')
478
+ parser.add_argument('--concat', type=bool, default=False)
479
+ parser.add_argument('--qa_type', type=str, default='SingleQA')
480
+ parser.add_argument('--anticipation', type=int, default=0.0)
481
+ parser.add_argument('--latency', type=int, default=0.0)
482
+ parser.add_argument('--master_port', type=int, default=29501)
483
+ args = parser.parse_args()
484
+ main(args)
ESTP-Bench/estp_dataset/benchmark/merge_prediction_result.ipynb ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "markdown",
5
+ "metadata": {},
6
+ "source": [
7
+ "# Concat generated prediction results from multiple files"
8
+ ]
9
+ },
10
+ {
11
+ "cell_type": "code",
12
+ "execution_count": null,
13
+ "metadata": {},
14
+ "outputs": [],
15
+ "source": [
16
+ "import os\n",
17
+ "import json\n",
18
+ "\n",
19
+ "pred_file = 'estp_bench_sq_EWO_frame_by_frame_fusion_dinov2_first_5_cases.json'\n",
20
+ "parent_dir = os.path.dirname(pred_file)\n",
21
+ "eval_files = [os.path.join(parent_dir, f) for f in os.listdir(parent_dir) if f.startswith(pred_file.split('/')[-1])]\n",
22
+ "data = {}\n",
23
+ "for eval_file in eval_files:\n",
24
+ " data.update(json.load(open(eval_file)))\n",
25
+ "\n",
26
+ "with open(pred_file, 'w') as f:\n",
27
+ " json.dump(data, f, indent=4)"
28
+ ]
29
+ }
30
+ ],
31
+ "metadata": {
32
+ "kernelspec": {
33
+ "display_name": "videollm",
34
+ "language": "python",
35
+ "name": "python3"
36
+ },
37
+ "language_info": {
38
+ "name": "python",
39
+ "version": "3.10.14"
40
+ }
41
+ },
42
+ "nbformat": 4,
43
+ "nbformat_minor": 2
44
+ }
ESTP-Bench/estp_dataset/cqa_anno.json ADDED
The diff for this file is too large to render. See raw diff
 
ESTP-Bench/estp_dataset/dataset.py ADDED
@@ -0,0 +1 @@
 
 
1
+
ESTP-Bench/estp_dataset/estpCqa_baseline/CLIP_streaming_v2.json ADDED
The diff for this file is too large to render. See raw diff
 
ESTP-Bench/estp_dataset/estpCqa_baseline/CLIP_streaming_v2evaluator_deepseek_1_2.json.part0 ADDED
The diff for this file is too large to render. See raw diff
 
ESTP-Bench/estp_dataset/estpCqa_baseline/CLIP_streaming_v2evaluator_deepseek_1_2.json.part1 ADDED
The diff for this file is too large to render. See raw diff
 
ESTP-Bench/estp_dataset/estpCqa_baseline/CLIP_streaming_v2evaluator_deepseek_1_2.json.part2 ADDED
The diff for this file is too large to render. See raw diff
 
ESTP-Bench/estp_dataset/estpCqa_baseline/CLIP_streaming_v2evaluator_deepseek_1_2.json.part3 ADDED
The diff for this file is too large to render. See raw diff
 
ESTP-Bench/estp_dataset/estpCqa_baseline/CLIP_streaming_v2evaluator_deepseek_1_2.json.part4 ADDED
The diff for this file is too large to render. See raw diff
 
ESTP-Bench/estp_dataset/estpCqa_baseline/CLIP_streaming_v2evaluator_deepseek_1_2.json.part5 ADDED
The diff for this file is too large to render. See raw diff
 
ESTP-Bench/estp_dataset/estpCqa_baseline/CLIP_streaming_v2evaluator_deepseek_1_2.json.part6 ADDED
The diff for this file is too large to render. See raw diff
 
ESTP-Bench/estp_dataset/estpCqa_baseline/CLIP_streaming_v2evaluator_deepseek_1_2.json.part7 ADDED
The diff for this file is too large to render. See raw diff
 
ESTP-Bench/estp_dataset/estpCqa_baseline/EgoVLP_streaming_v2.json ADDED
The diff for this file is too large to render. See raw diff
 
ESTP-Bench/estp_dataset/estpCqa_baseline/EgoVLP_streaming_v2evaluator_deepseek_1_2.json.part0 ADDED
The diff for this file is too large to render. See raw diff
 
ESTP-Bench/estp_dataset/estpCqa_baseline/EgoVLP_streaming_v2evaluator_deepseek_1_2.json.part1 ADDED
The diff for this file is too large to render. See raw diff
 
ESTP-Bench/estp_dataset/estpCqa_baseline/EgoVLP_streaming_v2evaluator_deepseek_1_2.json.part2 ADDED
The diff for this file is too large to render. See raw diff
 
ESTP-Bench/estp_dataset/estpCqa_baseline/EgoVLP_streaming_v2evaluator_deepseek_1_2.json.part3 ADDED
The diff for this file is too large to render. See raw diff
 
ESTP-Bench/estp_dataset/estpCqa_baseline/EgoVLP_streaming_v2evaluator_deepseek_1_2.json.part4 ADDED
The diff for this file is too large to render. See raw diff
 
ESTP-Bench/estp_dataset/estpCqa_baseline/EgoVLP_streaming_v2evaluator_deepseek_1_2.json.part5 ADDED
The diff for this file is too large to render. See raw diff
 
ESTP-Bench/estp_dataset/estpCqa_baseline/EgoVLP_streaming_v2evaluator_deepseek_1_2.json.part6 ADDED
The diff for this file is too large to render. See raw diff
 
ESTP-Bench/estp_dataset/estpCqa_baseline/EgoVLP_streaming_v2evaluator_deepseek_1_2.json.part7 ADDED
The diff for this file is too large to render. See raw diff
 
ESTP-Bench/estp_dataset/estpCqa_baseline/InternVLV28_fbf_0.175.json.part0 ADDED
The diff for this file is too large to render. See raw diff
 
ESTP-Bench/estp_dataset/estpCqa_baseline/InternVLV28_fbf_0.175.json.part1 ADDED
The diff for this file is too large to render. See raw diff
 
ESTP-Bench/estp_dataset/estpCqa_baseline/InternVLV28_fbf_0.175evaluator_deepseek_1_2.json.part0 ADDED
The diff for this file is too large to render. See raw diff
 
ESTP-Bench/estp_dataset/estpCqa_baseline/InternVLV28_fbf_0.175evaluator_deepseek_1_2.json.part1 ADDED
The diff for this file is too large to render. See raw diff
 
ESTP-Bench/estp_dataset/estpCqa_baseline/InternVLV28_fbf_0.175evaluator_deepseek_1_2.json.part2 ADDED
The diff for this file is too large to render. See raw diff
 
ESTP-Bench/estp_dataset/estpCqa_baseline/InternVLV28_fbf_0.175evaluator_deepseek_1_2.json.part3 ADDED
The diff for this file is too large to render. See raw diff
 
ESTP-Bench/estp_dataset/estpCqa_baseline/InternVLV28_fbf_0.175evaluator_deepseek_1_2.json.part4 ADDED
The diff for this file is too large to render. See raw diff
 
ESTP-Bench/estp_dataset/estpCqa_baseline/InternVLV28_fbf_0.175evaluator_deepseek_1_2.json.part5 ADDED
The diff for this file is too large to render. See raw diff
 
ESTP-Bench/estp_dataset/estpCqa_baseline/InternVLV28_fbf_0.175evaluator_deepseek_1_2.json.part6 ADDED
The diff for this file is too large to render. See raw diff
 
ESTP-Bench/estp_dataset/estpCqa_baseline/InternVLV28_fbf_0.175evaluator_deepseek_1_2.json.part7 ADDED
The diff for this file is too large to render. See raw diff