Update src/about.py
Browse files- src/about.py +22 -3
src/about.py
CHANGED
|
@@ -25,6 +25,24 @@ TITLE = """<h1 align="center" id="space-title">VBVR-Bench Leaderboard</h1>"""
|
|
| 25 |
|
| 26 |
# What does your leaderboard evaluate?
|
| 27 |
INTRODUCTION_TEXT = """
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 28 |
**VBVR-Bench** is a comprehensive benchmark for evaluating **video reasoning capabilities**.
|
| 29 |
|
| 30 |
To systematically assess model reasoning capabilities, VBVR-Bench employs a **dual-split evaluation strategy** across **100 diverse tasks**:
|
|
@@ -115,8 +133,9 @@ We will review your submission and add it to the leaderboard within 1-2 weeks.
|
|
| 115 |
CITATION_BUTTON_LABEL = "Copy the following snippet to cite these results"
|
| 116 |
CITATION_BUTTON_TEXT = r"""
|
| 117 |
@article{vbvr2026,
|
| 118 |
-
|
| 119 |
-
|
| 120 |
-
|
|
|
|
| 121 |
}
|
| 122 |
"""
|
|
|
|
| 25 |
|
| 26 |
# What does your leaderboard evaluate?
|
| 27 |
INTRODUCTION_TEXT = """
|
| 28 |
+
<a href="https://video-reason.com" target="_blank">
|
| 29 |
+
<img alt="Code" src="https://img.shields.io/badge/Project%20-%20Homepage-4285F4" height="20" />
|
| 30 |
+
</a>
|
| 31 |
+
<a href="https://github.com/orgs/Video-Reason/repositories" target="_blank">
|
| 32 |
+
<img alt="Code" src="https://img.shields.io/badge/VBVR-Code-100000?style=flat-square&logo=github&logoColor=white" height="20" />
|
| 33 |
+
</a>
|
| 34 |
+
<a href="https://arxiv.org/abs/2602.20159" target="_blank">
|
| 35 |
+
<img alt="arXiv" src="https://img.shields.io/badge/arXiv-VBVR-red?logo=arxiv" height="20" />
|
| 36 |
+
</a>
|
| 37 |
+
<a href="https://huggingface.co/Video-Reason/VBVR-Dataset" target="_blank">
|
| 38 |
+
<img alt="Leaderboard" src="https://img.shields.io/badge/%F0%9F%A4%97%20_VBVR_Dataset-Data-ffc107?color=ffc107&logoColor=white" height="20" />
|
| 39 |
+
</a>
|
| 40 |
+
<a href="https://huggingface.co/Video-Reason/VBVR-Bench-Data" target="_blank">
|
| 41 |
+
<img alt="Leaderboard" src="https://img.shields.io/badge/%F0%9F%A4%97%20_VBVR_Bench-Data-ffc107?color=ffc107&logoColor=white" height="20" />
|
| 42 |
+
</a>
|
| 43 |
+
<a href="https://huggingface.co/Video-Reason/VBVR-Bench-Leaderboard" target="_blank">
|
| 44 |
+
<img alt="Leaderboard" src="https://img.shields.io/badge/%F0%9F%A4%97%20_VBVR_Bench-Leaderboard-ffc107?color=ffc107&logoColor=white" height="20" />
|
| 45 |
+
</a>
|
| 46 |
**VBVR-Bench** is a comprehensive benchmark for evaluating **video reasoning capabilities**.
|
| 47 |
|
| 48 |
To systematically assess model reasoning capabilities, VBVR-Bench employs a **dual-split evaluation strategy** across **100 diverse tasks**:
|
|
|
|
| 133 |
CITATION_BUTTON_LABEL = "Copy the following snippet to cite these results"
|
| 134 |
CITATION_BUTTON_TEXT = r"""
|
| 135 |
@article{vbvr2026,
|
| 136 |
+
title={A Very Big Video Reasoning Suite},
|
| 137 |
+
author={Maijunxian Wang and Ruisi Wang and Juyi Lin and Ran Ji and Thaddäus Wiedemer and Qingying Gao and Dezhi Luo and Yaoyao Qian and Lianyu Huang and Zelong Hong and Jiahui Ge and Qianli Ma and Hang He and Yifan Zhou and Lingzi Guo and Lantao Mei and Jiachen Li and Hanwen Xing and Tianqi Zhao and Fengyuan Yu and Weihang Xiao and Yizheng Jiao and Jianheng Hou and Danyang Zhang and Pengcheng Xu and Boyang Zhong and Zehong Zhao and Gaoyun Fang and John Kitaoka and Yile Xu and Hua Xu and Kenton Blacutt and Tin Nguyen and Siyuan Song and Haoran Sun and Shaoyue Wen and Linyang He and Runming Wang and Yanzhi Wang and Mengyue Yang and Ziqiao Ma and Raphaël Millière and Freda Shi and Nuno Vasconcelos and Daniel Khashabi and Alan Yuille and Yilun Du and Ziming Liu and Bo Li and Dahua Lin and Ziwei Liu and Vikash Kumar and Yijiang Li and Lei Yang and Zhongang Cai and Hokin Deng},
|
| 138 |
+
journal = {arXiv preprint arXiv:2602.20159},
|
| 139 |
+
year = {2026}
|
| 140 |
}
|
| 141 |
"""
|