pbwpbw commited on
Commit
63c47be
·
verified ·
1 Parent(s): 9da2a7e

Upload folder using huggingface_hub

Browse files
MMVU.tar ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:99911400978570b69f256426d5260b5016ff892cc5edb4684bc2d41d16945916
3
+ size 1269493760
VCR-Bench.tar ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3ccce107eec913943f26d89bb06e5ef75ef07a4028d615a637e5031d84c2a17d
3
+ size 24236636160
Video-Holmes.tar ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6c2ee320c176fbe15aad7d8020dd56c527142ae0d5a6da6bbd33d897b5a94c93
3
+ size 3173007360
eval_vcr_mmvu.sh ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ save_path='baseline_think'
3
+ save_name='videorft_7b_f32'
4
+ #python eval.py -i eval_res/baseline_think/videorft_7b_f32.jsonl
5
+ #bash eval_vcr_mmvu.sh
6
+ cd VCR-Bench
7
+ python eval.py -i eval_res/$save_path/$save_name.jsonl
8
+ cd ../MMVU
9
+ python eval.py -i eval_res/$save_path/$save_name.jsonl
10
+ cd ../VCR-Bench
11
+ python eval.py -i eval_res/$save_path/$save_name.jsonl
12
+ cd ../VSI-Bench
13
+ python eval.py -i eval_res/$save_path/$save_name.jsonl
14
+ cd ../video-tt
15
+ python eval.py -i eval_res/$save_path/$save_name.jsonl
16
+ cd ../norm_bench
17
+ python get_score.py \
18
+ --benchmark videomme\
19
+ --data-root /home/pubw/datasets/Video-MME \
20
+ --save-path result/$save_path/$save_name/videomme.jsonl
21
+ #python get_score.py --benchmark videomme --data-root /home/pubw/datasets/Video-MME --save-path result/7b_rl/7b_0921_f64_4000/videomme.jsonl
norm_bench.tar ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2f7e047a8db57a1b97f1a9f2848f180c8afc8e681c99af0876de07766ab76cc6
3
+ size 396789760
test_hol_vcr_mmvu.sh ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #"https://www.bitahub.com/inf-app/a16764158164070400405691/v1"
2
+ #"http://192.168.1.112:31151/v1"
3
+ #"http://192.168.1.113:31251/v1"
4
+ #"http://127.0.0.1:8000/v1"
5
+
6
+ export base_url="https://www.bitahub.com/inf-app/a16764158164070400405691/v1"
7
+
8
+ #bash test_hol_vcr_mmvu.sh
9
+ save_path='3b_rl'
10
+ save_name='3b_add_consistent'
11
+
12
+ # python qwen_infer.py --save_path tmm --save_name videochatr1_5
13
+ # python eval.py -i eval_res/3b_rl/3b_rft_hallu.jsonl
14
+ # python test_infer_time.py --save_path test_time --save_name r1
15
+ cd Video-Holmes
16
+ python qwen_infer.py --save_path $save_path --save_name $save_name
17
+ cd ../MMVU
18
+ python qwen_infer.py --save_path $save_path --save_name $save_name
19
+ cd ../VCR-Bench
20
+ python qwen_infer.py --save_path $save_path --save_name $save_name
21
+ cd ../video-tt
22
+ python qwen_infer.py --save_path $save_path --save_name $save_name
23
+ cd ../VSI-Bench
24
+ python qwen_infer.py --save_path $save_path --save_name $save_name
25
+ cd ../norm_bench
26
+ python evaluate.py \
27
+ --benchmark videomme\
28
+ --data-root /home/pubw/datasets/Video-MME \
29
+ --intermediate result/$save_path/$save_name/videomme.jsonl
30
+
31
+ #python evaluate.py --benchmark videomme --data-root /home/pubw/datasets/Video-MME --intermediate result/7b_rl/7b_0921_f64_4000/videomme.jsonl
tvgbench.tar ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0debf68010a5dd9b338acd0eb82b60c2a87bb4f58b8df3a789d46e292dfb5f12
3
+ size 34826240
video-tt.tar ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2f9ebcc3763a62b77b71842e66501172b844efad59df3adc126b6ca23165cbfc
3
+ size 18223298560