worldbench commited on
Commit
01ba831
·
verified ·
1 Parent(s): 5218bcd

update through web

Browse files
.gitignore ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ auto_evals/
2
+ venv/
3
+ __pycache__/
4
+ .env
5
+ .ipynb_checkpoints
6
+ *ipynb
7
+ .vscode/
8
+
9
+ eval-queue/
10
+ eval-results/
11
+ eval-queue-bk/
12
+ eval-results-bk/
13
+ logs/
14
+ .history/
.pre-commit-config.yaml ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ default_language_version:
16
+ python: python3
17
+
18
+ ci:
19
+ autofix_prs: true
20
+ autoupdate_commit_msg: '[pre-commit.ci] pre-commit suggestions'
21
+ autoupdate_schedule: quarterly
22
+
23
+ repos:
24
+ - repo: https://github.com/pre-commit/pre-commit-hooks
25
+ rev: v4.3.0
26
+ hooks:
27
+ - id: check-yaml
28
+ - id: check-case-conflict
29
+ - id: detect-private-key
30
+ - id: check-added-large-files
31
+ args: ['--maxkb=1000']
32
+ - id: requirements-txt-fixer
33
+ - id: end-of-file-fixer
34
+ - id: trailing-whitespace
35
+
36
+ - repo: https://github.com/PyCQA/isort
37
+ rev: 5.12.0
38
+ hooks:
39
+ - id: isort
40
+ name: Format imports
41
+
42
+ - repo: https://github.com/psf/black
43
+ rev: 22.12.0
44
+ hooks:
45
+ - id: black
46
+ name: Format code
47
+ additional_dependencies: ['click==8.0.2']
48
+
49
+ - repo: https://github.com/charliermarsh/ruff-pre-commit
50
+ # Ruff version.
51
+ rev: 'v0.0.267'
52
+ hooks:
53
+ - id: ruff
Makefile ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .PHONY: style format
2
+
3
+
4
+ style:
5
+ python -m black --line-length 119 .
6
+ python -m isort .
7
+ ruff check --fix .
8
+
9
+
10
+ quality:
11
+ python -m black --check --line-length 119 .
12
+ python -m isort --check-only .
13
+ ruff check .
README copy.md ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: Vla4ad
3
+ emoji: 🥇
4
+ colorFrom: green
5
+ colorTo: indigo
6
+ sdk: gradio
7
+ app_file: app.py
8
+ pinned: true
9
+ license: apache-2.0
10
+ short_description: Duplicate this leaderboard to initialize your own!
11
+ sdk_version: 5.43.1
12
+ tags:
13
+ - leaderboard
14
+ ---
15
+
16
+ # Start the configuration
17
+
18
+ Most of the variables to change for a default leaderboard are in `src/env.py` (replace the path for your leaderboard) and `src/about.py` (for tasks).
19
+
20
+ Results files should have the following format and be stored as json files:
21
+ ```json
22
+ {
23
+ "config": {
24
+ "model_dtype": "torch.float16", # or torch.bfloat16 or 8bit or 4bit
25
+ "model_name": "path of the model on the hub: org/model",
26
+ "model_sha": "revision on the hub",
27
+ },
28
+ "results": {
29
+ "task_name": {
30
+ "metric_name": score,
31
+ },
32
+ "task_name2": {
33
+ "metric_name": score,
34
+ }
35
+ }
36
+ }
37
+ ```
38
+
39
+ Request files are created automatically by this tool.
40
+
41
+ If you encounter problem on the space, don't hesitate to restart it to remove the create eval-queue, eval-queue-bk, eval-results and eval-results-bk created folder.
42
+
43
+ # Code logic for more complex edits
44
+
45
+ You'll find
46
+ - the main table' columns names and properties in `src/display/utils.py`
47
+ - the logic to read all results and request files, then convert them in dataframe lines, in `src/leaderboard/read_evals.py`, and `src/populate.py`
48
+ - the logic to allow or filter submissions in `src/submission/submit.py` and `src/submission/check_validity.py`
app.py ADDED
@@ -0,0 +1,559 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import json
3
+ import pandas as pd
4
+ import matplotlib
5
+ matplotlib.use('Agg')
6
+ import matplotlib.pyplot as plt
7
+ import gradio as gr
8
+
9
+ # 加载数据
10
+ def load_json(filename):
11
+ possible_paths = [
12
+ f'results/{filename}',
13
+ filename,
14
+ os.path.join(os.path.dirname(__file__), 'results', filename),
15
+ os.path.join(os.path.dirname(__file__), filename)
16
+ ]
17
+ for path in possible_paths:
18
+ if os.path.exists(path):
19
+ with open(path, 'r', encoding='utf-8') as f:
20
+ return json.load(f)
21
+ raise FileNotFoundError(f"Cannot find {filename}")
22
+
23
+ # 加载所有数据
24
+ va_data = load_json('va_models.json')
25
+ vla_data = load_json('vla_models.json')
26
+ datasets_data = load_json('datasets.json')
27
+ nuscenes_data = load_json('nuscenes_results.json')
28
+ wod_data = load_json('wod_results.json')
29
+ navsim_data = load_json('navsim_results.json')
30
+ bench2drive_data = load_json('bench2drive_results.json')
31
+
32
+ df_va = pd.DataFrame(va_data)
33
+ df_vla = pd.DataFrame(vla_data)
34
+ df_datasets = pd.DataFrame(datasets_data)
35
+ df_nuscenes = pd.DataFrame(nuscenes_data)
36
+ df_wod = pd.DataFrame(wod_data)
37
+ df_navsim = pd.DataFrame(navsim_data)
38
+ df_bench2drive = pd.DataFrame(bench2drive_data)
39
+
40
+ INPUT_ICONS = {'camera': '📷', 'lidar': '🔍', 'status': '⚙️', 'prompt': '💭',
41
+ 'instruction': '📝', 'scene': '🌆', 'traffic': '🚦', 'context': '📚'}
42
+
43
+ ACTION_LABELS = {'RL': 'Policy w/ RL', 'REG': 'Decoder+MLP', 'SEL': 'Traj. Selection',
44
+ 'GEN': 'Generative Model', 'LH': 'Language Head'}
45
+
46
+ # 格式化函数
47
+ def format_inputs(inputs):
48
+ if isinstance(inputs, list):
49
+ return ' '.join([INPUT_ICONS.get(inp, inp) for inp in inputs])
50
+ return str(inputs)
51
+
52
+ def format_datasets(datasets):
53
+ if isinstance(datasets, list):
54
+ return ', '.join(datasets[:3])
55
+ return str(datasets)
56
+
57
+ # VA Models
58
+ def update_va_table(category, venue, action, search):
59
+ df = df_va.copy()
60
+ if category != "All":
61
+ df = df[df['category'] == category]
62
+ if venue:
63
+ df = df[df['venue'].str.contains(venue, case=False, na=False)]
64
+ if action != "All":
65
+ df = df[df['action'] == action]
66
+ if search:
67
+ mask = df['model'].str.contains(search, case=False, na=False) | df['vision'].str.contains(search, case=False, na=False)
68
+ df = df[mask]
69
+ display_df = df[['id', 'model', 'venue', 'input', 'dataset', 'vision', 'action', 'output', 'category']].copy()
70
+ display_df['input'] = display_df['input'].apply(format_inputs)
71
+ display_df['dataset'] = display_df['dataset'].apply(format_datasets)
72
+ display_df['action'] = display_df['action'].map(ACTION_LABELS)
73
+ return display_df, f"**📊 Statistics:** Total {len(df)} models | Categories: {df['category'].nunique()}"
74
+
75
+ def create_va_plot(category):
76
+ df = df_va.copy()
77
+ if category != "All":
78
+ df = df[df['category'] == category]
79
+ bg_color, panel_color = "#0e1117", "#161b22"
80
+ bar_colors = ['#4cc9f0', '#f72585', '#7209b7', '#3a0ca3', '#4361ee', '#4895ef', '#560bad']
81
+ text_color, grid_color = "#c9d1d9", "#30363d"
82
+
83
+ fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(14, 5))
84
+ fig.patch.set_facecolor(bg_color)
85
+
86
+ category_counts = df['category'].value_counts()
87
+ ax1.barh(category_counts.index, category_counts.values, color=bar_colors[:len(category_counts)])
88
+ ax1.set_facecolor(panel_color)
89
+ ax1.set_xlabel('Count', color=text_color, fontsize=11)
90
+ ax1.set_title('Models by Category', color=text_color, fontsize=13, fontweight='bold')
91
+ ax1.tick_params(colors=text_color)
92
+ ax1.grid(axis='x', linestyle='--', alpha=0.3, color=grid_color)
93
+ ax1.xaxis.set_major_locator(plt.MaxNLocator(integer=True))
94
+ for spine in ['top', 'right', 'left']:
95
+ ax1.spines[spine].set_visible(False)
96
+ ax1.spines['bottom'].set_color(grid_color)
97
+
98
+ action_counts = df['action'].value_counts()
99
+ ax2.bar(range(len(action_counts)), action_counts.values, color=bar_colors[:len(action_counts)])
100
+ ax2.set_facecolor(panel_color)
101
+ ax2.set_xticks(range(len(action_counts)))
102
+ ax2.set_xticklabels([ACTION_LABELS.get(a, a) for a in action_counts.index], rotation=45, ha='right')
103
+ ax2.set_ylabel('Count', color=text_color, fontsize=11)
104
+ ax2.set_title('Models by Action Type', color=text_color, fontsize=13, fontweight='bold')
105
+ ax2.tick_params(colors=text_color)
106
+ ax2.grid(axis='y', linestyle='--', alpha=0.3, color=grid_color)
107
+ ax2.yaxis.set_major_locator(plt.MaxNLocator(integer=True))
108
+ for spine in ['top', 'right']:
109
+ ax2.spines[spine].set_visible(False)
110
+ ax2.spines['left'].set_color(grid_color)
111
+ ax2.spines['bottom'].set_color(grid_color)
112
+
113
+ plt.tight_layout()
114
+ result = fig
115
+ plt.close('all')
116
+ return result
117
+
118
+ # VLA Models
119
+ def update_vla_table(category, venue, language, search):
120
+ df = df_vla.copy()
121
+ if category != "All":
122
+ df = df[df['category'] == category]
123
+ if venue:
124
+ df = df[df['venue'].str.contains(venue, case=False, na=False)]
125
+ if language:
126
+ df = df[df['language'].str.contains(language, case=False, na=False)]
127
+ if search:
128
+ mask = df['model'].str.contains(search, case=False, na=False) | df['vision'].str.contains(search, case=False, na=False) | df['language'].str.contains(search, case=False, na=False)
129
+ df = df[mask]
130
+ display_df = df[['id', 'model', 'venue', 'input', 'dataset', 'vision', 'language', 'action', 'output', 'category']].copy()
131
+ display_df['input'] = display_df['input'].apply(format_inputs)
132
+ display_df['dataset'] = display_df['dataset'].apply(format_datasets)
133
+ display_df['action'] = display_df['action'].map(ACTION_LABELS)
134
+ return display_df, f"**📊 Statistics:** Total {len(df)} models | Categories: {df['category'].nunique()}"
135
+
136
+ def create_vla_plot(category):
137
+ df = df_vla.copy()
138
+ if category != "All":
139
+ df = df[df['category'] == category]
140
+ bg_color, panel_color = "#0e1117", "#161b22"
141
+ bar_colors = ['#4cc9f0', '#f72585', '#7209b7', '#3a0ca3']
142
+ text_color, grid_color = "#c9d1d9", "#30363d"
143
+
144
+ fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(14, 5))
145
+ fig.patch.set_facecolor(bg_color)
146
+
147
+ category_counts = df['category'].value_counts()
148
+ ax1.barh(category_counts.index, category_counts.values, color=bar_colors[:len(category_counts)])
149
+ ax1.set_facecolor(panel_color)
150
+ ax1.set_xlabel('Count', color=text_color, fontsize=11)
151
+ ax1.set_title('VLA Models by Category', color=text_color, fontsize=13, fontweight='bold')
152
+ ax1.tick_params(colors=text_color)
153
+ ax1.grid(axis='x', linestyle='--', alpha=0.3, color=grid_color)
154
+ ax1.xaxis.set_major_locator(plt.MaxNLocator(integer=True))
155
+ for spine in ['top', 'right', 'left']:
156
+ ax1.spines[spine].set_visible(False)
157
+ ax1.spines['bottom'].set_color(grid_color)
158
+
159
+ language_counts = df['language'].value_counts().head(10)
160
+ ax2.bar(range(len(language_counts)), language_counts.values, color=bar_colors[:len(language_counts)])
161
+ ax2.set_facecolor(panel_color)
162
+ ax2.set_xticks(range(len(language_counts)))
163
+ ax2.set_xticklabels(language_counts.index, rotation=45, ha='right')
164
+ ax2.set_ylabel('Count', color=text_color, fontsize=11)
165
+ ax2.set_title('Top Language Models', color=text_color, fontsize=13, fontweight='bold')
166
+ ax2.tick_params(colors=text_color)
167
+ ax2.grid(axis='y', linestyle='--', alpha=0.3, color=grid_color)
168
+ ax2.yaxis.set_major_locator(plt.MaxNLocator(integer=True))
169
+ for spine in ['top', 'right']:
170
+ ax2.spines[spine].set_visible(False)
171
+ ax2.spines['left'].set_color(grid_color)
172
+ ax2.spines['bottom'].set_color(grid_color)
173
+
174
+ plt.tight_layout()
175
+ result = fig
176
+ plt.close('all')
177
+ return result
178
+
179
+ # Datasets
180
+ def update_datasets_table(category, year, search):
181
+ df = df_datasets.copy()
182
+ if category != "All":
183
+ df = df[df['category'] == category]
184
+ if year:
185
+ try:
186
+ df = df[df['year'] == int(year)]
187
+ except:
188
+ pass
189
+ if search:
190
+ mask = df['dataset'].str.contains(search, case=False, na=False) | df['sensor'].str.contains(search, case=False, na=False)
191
+ df = df[mask]
192
+ return df, f"**📊 Statistics:** Total {len(df)} datasets | Years: {df['year'].nunique()}"
193
+
194
+ # Benchmark tables update functions
195
+ def update_nuscenes_table(category, search):
196
+ df = df_nuscenes.copy()
197
+ if category != "All":
198
+ df = df[df['category'] == category]
199
+ if search:
200
+ mask = df['model'].str.contains(search, case=False, na=False)
201
+ df = df[mask]
202
+ return df, f"**📊 Statistics:** Total {len(df)} models"
203
+
204
+ def create_nuscenes_plot(category):
205
+ """创建nuScenes统计图表"""
206
+ df = df_nuscenes.copy()
207
+ if category != "All":
208
+ df = df[df['category'] == category]
209
+
210
+ bg_color, panel_color = "#0e1117", "#161b22"
211
+ bar_colors = ['#4cc9f0', '#f72585', '#7209b7', '#3a0ca3']
212
+ text_color, grid_color = "#c9d1d9", "#30363d"
213
+
214
+ fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(14, 5))
215
+ fig.patch.set_facecolor(bg_color)
216
+
217
+ # 1. 按类别统计
218
+ category_counts = df['category'].value_counts()
219
+ ax1.bar(range(len(category_counts)), category_counts.values, color=bar_colors[:len(category_counts)])
220
+ ax1.set_facecolor(panel_color)
221
+ ax1.set_xticks(range(len(category_counts)))
222
+ ax1.set_xticklabels(category_counts.index, rotation=15, ha='right')
223
+ ax1.set_ylabel('Count', color=text_color, fontsize=11)
224
+ ax1.set_title('Models by Category', color=text_color, fontsize=13, fontweight='bold')
225
+ ax1.tick_params(colors=text_color)
226
+ ax1.grid(axis='y', linestyle='--', alpha=0.3, color=grid_color)
227
+ ax1.yaxis.set_major_locator(plt.MaxNLocator(integer=True))
228
+ for spine in ['top', 'right']:
229
+ ax1.spines[spine].set_visible(False)
230
+ ax1.spines['left'].set_color(grid_color)
231
+ ax1.spines['bottom'].set_color(grid_color)
232
+
233
+ # 2. Top 10 模型按L2 Avg排序
234
+ top_models = df.nsmallest(10, 'l2_avg')[['model', 'l2_avg']].dropna()
235
+ ax2.barh(range(len(top_models)), top_models['l2_avg'].values, color=bar_colors[2])
236
+ ax2.set_facecolor(panel_color)
237
+ ax2.set_yticks(range(len(top_models)))
238
+ ax2.set_yticklabels(top_models['model'].values, fontsize=9)
239
+ ax2.set_xlabel('L2 Avg (m)', color=text_color, fontsize=11)
240
+ ax2.set_title('Top 10 Models by L2 Error', color=text_color, fontsize=13, fontweight='bold')
241
+ ax2.tick_params(colors=text_color)
242
+ ax2.grid(axis='x', linestyle='--', alpha=0.3, color=grid_color)
243
+ ax2.invert_yaxis()
244
+ for spine in ['top', 'right', 'left']:
245
+ ax2.spines[spine].set_visible(False)
246
+ ax2.spines['bottom'].set_color(grid_color)
247
+
248
+ plt.tight_layout()
249
+ result = fig
250
+ plt.close('all')
251
+ return result
252
+
253
+ def update_wod_table(category, search):
254
+ df = df_wod.copy()
255
+ if category != "All":
256
+ df = df[df['category'] == category]
257
+ if search:
258
+ mask = df['model'].str.contains(search, case=False, na=False)
259
+ df = df[mask]
260
+ return df, f"**📊 Statistics:** Total {len(df)} models"
261
+
262
+ def create_wod_plot(category):
263
+ """创建WOD统计图表"""
264
+ df = df_wod.copy()
265
+ if category != "All":
266
+ df = df[df['category'] == category]
267
+
268
+ bg_color, panel_color = "#0e1117", "#161b22"
269
+ bar_colors = ['#4cc9f0', '#f72585', '#7209b7', '#3a0ca3']
270
+ text_color, grid_color = "#c9d1d9", "#30363d"
271
+
272
+ fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(14, 5))
273
+ fig.patch.set_facecolor(bg_color)
274
+
275
+ # 1. 按类别统计
276
+ category_counts = df['category'].value_counts()
277
+ ax1.bar(range(len(category_counts)), category_counts.values, color=bar_colors[:len(category_counts)])
278
+ ax1.set_facecolor(panel_color)
279
+ ax1.set_xticks(range(len(category_counts)))
280
+ ax1.set_xticklabels(category_counts.index, rotation=15, ha='right')
281
+ ax1.set_ylabel('Count', color=text_color, fontsize=11)
282
+ ax1.set_title('Models by Category', color=text_color, fontsize=13, fontweight='bold')
283
+ ax1.tick_params(colors=text_color)
284
+ ax1.grid(axis='y', linestyle='--', alpha=0.3, color=grid_color)
285
+ ax1.yaxis.set_major_locator(plt.MaxNLocator(integer=True))
286
+ for spine in ['top', 'right']:
287
+ ax1.spines[spine].set_visible(False)
288
+ ax1.spines['left'].set_color(grid_color)
289
+ ax1.spines['bottom'].set_color(grid_color)
290
+
291
+ # 2. Top模型按RFS Overall排序
292
+ top_models = df.nlargest(10, 'rfs_overall')[['model', 'rfs_overall']].dropna()
293
+ if len(top_models) > 0:
294
+ ax2.barh(range(len(top_models)), top_models['rfs_overall'].values, color=bar_colors[1])
295
+ ax2.set_facecolor(panel_color)
296
+ ax2.set_yticks(range(len(top_models)))
297
+ ax2.set_yticklabels(top_models['model'].values, fontsize=9)
298
+ ax2.set_xlabel('RFS Overall', color=text_color, fontsize=11)
299
+ ax2.set_title('Top Models by RFS Overall', color=text_color, fontsize=13, fontweight='bold')
300
+ ax2.tick_params(colors=text_color)
301
+ ax2.grid(axis='x', linestyle='--', alpha=0.3, color=grid_color)
302
+ ax2.invert_yaxis()
303
+ else:
304
+ ax2.text(0.5, 0.5, 'No data available', ha='center', va='center',
305
+ color=text_color, fontsize=12, transform=ax2.transAxes)
306
+ ax2.set_facecolor(panel_color)
307
+
308
+ for spine in ['top', 'right', 'left']:
309
+ ax2.spines[spine].set_visible(False)
310
+ ax2.spines['bottom'].set_color(grid_color)
311
+
312
+ plt.tight_layout()
313
+ result = fig
314
+ plt.close('all')
315
+ return result
316
+
317
+ def update_navsim_table(category, search):
318
+ df = df_navsim.copy()
319
+ if category != "All":
320
+ df = df[df['category'] == category]
321
+ if search:
322
+ mask = df['model'].str.contains(search, case=False, na=False)
323
+ df = df[mask]
324
+ return df, f"**📊 Statistics:** Total {len(df)} models"
325
+
326
+ def create_navsim_plot(category):
327
+ """创建NAVSIM统计图表"""
328
+ df = df_navsim.copy()
329
+ if category != "All":
330
+ df = df[df['category'] == category]
331
+
332
+ bg_color, panel_color = "#0e1117", "#161b22"
333
+ bar_colors = ['#4cc9f0', '#f72585', '#7209b7', '#3a0ca3']
334
+ text_color, grid_color = "#c9d1d9", "#30363d"
335
+
336
+ fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(14, 5))
337
+ fig.patch.set_facecolor(bg_color)
338
+
339
+ # 1. 按类别统计
340
+ category_counts = df['category'].value_counts()
341
+ ax1.bar(range(len(category_counts)), category_counts.values, color=bar_colors[:len(category_counts)])
342
+ ax1.set_facecolor(panel_color)
343
+ ax1.set_xticks(range(len(category_counts)))
344
+ ax1.set_xticklabels(category_counts.index, rotation=15, ha='right')
345
+ ax1.set_ylabel('Count', color=text_color, fontsize=11)
346
+ ax1.set_title('Models by Category', color=text_color, fontsize=13, fontweight='bold')
347
+ ax1.tick_params(colors=text_color)
348
+ ax1.grid(axis='y', linestyle='--', alpha=0.3, color=grid_color)
349
+ ax1.yaxis.set_major_locator(plt.MaxNLocator(integer=True))
350
+ for spine in ['top', 'right']:
351
+ ax1.spines[spine].set_visible(False)
352
+ ax1.spines['left'].set_color(grid_color)
353
+ ax1.spines['bottom'].set_color(grid_color)
354
+
355
+ # 2. Top 10 模��按PDMS排序
356
+ top_models = df.nlargest(10, 'pdms')[['model', 'pdms']]
357
+ ax2.barh(range(len(top_models)), top_models['pdms'].values, color=bar_colors[0])
358
+ ax2.set_facecolor(panel_color)
359
+ ax2.set_yticks(range(len(top_models)))
360
+ ax2.set_yticklabels(top_models['model'].values, fontsize=9)
361
+ ax2.set_xlabel('PDMS Score', color=text_color, fontsize=11)
362
+ ax2.set_title('Top 10 Models by PDMS', color=text_color, fontsize=13, fontweight='bold')
363
+ ax2.tick_params(colors=text_color)
364
+ ax2.grid(axis='x', linestyle='--', alpha=0.3, color=grid_color)
365
+ ax2.invert_yaxis()
366
+ for spine in ['top', 'right', 'left']:
367
+ ax2.spines[spine].set_visible(False)
368
+ ax2.spines['bottom'].set_color(grid_color)
369
+
370
+ plt.tight_layout()
371
+ result = fig
372
+ plt.close('all')
373
+ return result
374
+
375
+ def update_bench2drive_table(category, search):
376
+ df = df_bench2drive.copy()
377
+ if category != "All":
378
+ df = df[df['category'] == category]
379
+ if search:
380
+ mask = df['model'].str.contains(search, case=False, na=False)
381
+ df = df[mask]
382
+ return df, f"**📊 Statistics:** Total {len(df)} models"
383
+
384
+ def create_bench2drive_plot(category):
385
+ """创建Bench2Drive统计图表"""
386
+ df = df_bench2drive.copy()
387
+ if category != "All":
388
+ df = df[df['category'] == category]
389
+
390
+ bg_color, panel_color = "#0e1117", "#161b22"
391
+ bar_colors = ['#4cc9f0', '#f72585', '#7209b7', '#3a0ca3']
392
+ text_color, grid_color = "#c9d1d9", "#30363d"
393
+
394
+ fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(14, 5))
395
+ fig.patch.set_facecolor(bg_color)
396
+
397
+ # 1. 按类别统计
398
+ category_counts = df['category'].value_counts()
399
+ ax1.bar(range(len(category_counts)), category_counts.values, color=bar_colors[:len(category_counts)])
400
+ ax1.set_facecolor(panel_color)
401
+ ax1.set_xticks(range(len(category_counts)))
402
+ ax1.set_xticklabels(category_counts.index, rotation=15, ha='right')
403
+ ax1.set_ylabel('Count', color=text_color, fontsize=11)
404
+ ax1.set_title('Models by Category', color=text_color, fontsize=13, fontweight='bold')
405
+ ax1.tick_params(colors=text_color)
406
+ ax1.grid(axis='y', linestyle='--', alpha=0.3, color=grid_color)
407
+ ax1.yaxis.set_major_locator(plt.MaxNLocator(integer=True))
408
+ for spine in ['top', 'right']:
409
+ ax1.spines[spine].set_visible(False)
410
+ ax1.spines['left'].set_color(grid_color)
411
+ ax1.spines['bottom'].set_color(grid_color)
412
+
413
+ # 2. Top 10 模型按DS排序
414
+ top_models = df.nlargest(10, 'ds')[['model', 'ds']]
415
+ ax2.barh(range(len(top_models)), top_models['ds'].values, color=bar_colors[3])
416
+ ax2.set_facecolor(panel_color)
417
+ ax2.set_yticks(range(len(top_models)))
418
+ ax2.set_yticklabels(top_models['model'].values, fontsize=9)
419
+ ax2.set_xlabel('Driving Score', color=text_color, fontsize=11)
420
+ ax2.set_title('Top 10 Models by DS', color=text_color, fontsize=13, fontweight='bold')
421
+ ax2.tick_params(colors=text_color)
422
+ ax2.grid(axis='x', linestyle='--', alpha=0.3, color=grid_color)
423
+ ax2.invert_yaxis()
424
+ for spine in ['top', 'right', 'left']:
425
+ ax2.spines[spine].set_visible(False)
426
+ ax2.spines['bottom'].set_color(grid_color)
427
+
428
+ plt.tight_layout()
429
+ result = fig
430
+ plt.close('all')
431
+ return result
432
+
433
+ # 预生成初始图表
434
+ initial_va_plot = create_va_plot("All")
435
+ initial_vla_plot = create_vla_plot("All")
436
+ initial_nuscenes_plot = create_nuscenes_plot("All")
437
+ initial_wod_plot = create_wod_plot("All")
438
+ initial_navsim_plot = create_navsim_plot("All")
439
+ initial_bench2drive_plot = create_bench2drive_plot("All")
440
+
441
+ # Gradio UI
442
+ with gr.Blocks(css="#title {text-align: center;} .gradio-container {max-width: 100% !important;}") as demo:
443
+ gr.Markdown("# 🚗 VLA for Autonomous Driving: Model Leaderboard", elem_id="title")
444
+ gr.Markdown("### 📄 Survey: *Vision-Language-Action Models for Autonomous Driving*")
445
+
446
+ with gr.Tabs():
447
+ # Tab 1: VA Models
448
+ with gr.Tab("Vision-Action Models (VA)"):
449
+ gr.Markdown("### Table 1: Vision-Action Models in Autonomous Driving")
450
+ va_stats = gr.Markdown(f"**📊 Statistics:** Total {len(df_va)} models")
451
+ with gr.Row():
452
+ va_category = gr.Dropdown(label="Category", choices=["All"] + sorted(df_va['category'].unique().tolist()), value="All")
453
+ va_venue = gr.Textbox(label="Filter by Venue", placeholder="e.g., CVPR, ICCV...")
454
+ va_action = gr.Dropdown(label="Action Type", choices=["All"] + sorted(df_va['action'].unique().tolist()), value="All")
455
+ va_search = gr.Textbox(label="Search", placeholder="e.g., TransFuser...")
456
+ va_update_btn = gr.Button("🔍 Update", variant="primary")
457
+ va_table = gr.Dataframe(value=df_va[['id', 'model', 'venue', 'input', 'dataset', 'vision', 'action', 'output', 'category']], label="VA Models", interactive=False, wrap=True)
458
+ va_plot = gr.Plot(label="Statistics", value=initial_va_plot, format="png")
459
+ va_update_btn.click(fn=lambda cat, ven, act, search: (*update_va_table(cat, ven, act, search), create_va_plot(cat)),
460
+ inputs=[va_category, va_venue, va_action, va_search], outputs=[va_table, va_stats, va_plot])
461
+
462
+ # Tab 2: VLA Models
463
+ with gr.Tab("Vision-Language-Action Models (VLA)"):
464
+ gr.Markdown("### Table 3: Vision-Language-Action Models in Autonomous Driving")
465
+ vla_stats = gr.Markdown(f"**📊 Statistics:** Total {len(df_vla)} models")
466
+ with gr.Row():
467
+ vla_category = gr.Dropdown(label="Category", choices=["All"] + sorted(df_vla['category'].unique().tolist()), value="All")
468
+ vla_venue = gr.Textbox(label="Filter by Venue", placeholder="e.g., CVPR...")
469
+ vla_language = gr.Textbox(label="Filter by Language Model", placeholder="e.g., Qwen...")
470
+ vla_search = gr.Textbox(label="Search", placeholder="e.g., AutoVLA...")
471
+ vla_update_btn = gr.Button("🔍 Update", variant="primary")
472
+ vla_table = gr.Dataframe(value=df_vla[['id', 'model', 'venue', 'input', 'dataset', 'vision', 'language', 'action', 'output', 'category']], label="VLA Models", interactive=False, wrap=True)
473
+ vla_plot = gr.Plot(label="Statistics", value=initial_vla_plot, format="png")
474
+ vla_update_btn.click(fn=lambda cat, ven, lang, search: (*update_vla_table(cat, ven, lang, search), create_vla_plot(cat)),
475
+ inputs=[vla_category, vla_venue, vla_language, vla_search], outputs=[vla_table, vla_stats, vla_plot])
476
+
477
+ # Tab 3: Datasets
478
+ with gr.Tab("Datasets & Benchmarks"):
479
+ gr.Markdown("### Table 4: Summary of Datasets & Benchmarks")
480
+ datasets_stats = gr.Markdown(f"**📊 Statistics:** Total {len(df_datasets)} datasets")
481
+ with gr.Row():
482
+ datasets_category = gr.Dropdown(label="Category", choices=["All"] + sorted(df_datasets['category'].unique().tolist()), value="All")
483
+ datasets_year = gr.Textbox(label="Filter by Year", placeholder="e.g., 2024...")
484
+ datasets_search = gr.Textbox(label="Search", placeholder="e.g., nuScenes...")
485
+ datasets_update_btn = gr.Button("🔍 Update", variant="primary")
486
+ datasets_table = gr.Dataframe(value=df_datasets, label="Datasets", interactive=False, wrap=True)
487
+ datasets_update_btn.click(fn=update_datasets_table, inputs=[datasets_category, datasets_year, datasets_search], outputs=[datasets_table, datasets_stats])
488
+
489
+ # Tab 4: nuScenes
490
+ with gr.Tab("nuScenes Open-Loop"):
491
+ gr.Markdown("### Table 5: Open-Loop Planning Results on nuScenes")
492
+ gr.Markdown("**Metrics:** L2 Error (m) ↓ | Collision Rate ↓")
493
+ nuscenes_stats = gr.Markdown(f"**📊 Statistics:** Total {len(df_nuscenes)} models")
494
+ with gr.Row():
495
+ nuscenes_category = gr.Dropdown(label="Category", choices=["All", "Vision-Action", "Vision-Language-Action"], value="All")
496
+ nuscenes_search = gr.Textbox(label="Search Model", placeholder="e.g., UniAD, EMMA...")
497
+ nuscenes_update_btn = gr.Button("🔍 Update", variant="primary")
498
+ nuscenes_table = gr.Dataframe(value=df_nuscenes, label="nuScenes Results", interactive=False, wrap=True)
499
+ nuscenes_plot = gr.Plot(label="Statistics", value=initial_nuscenes_plot, format="png")
500
+ nuscenes_update_btn.click(fn=lambda cat, search: (*update_nuscenes_table(cat, search), create_nuscenes_plot(cat)),
501
+ inputs=[nuscenes_category, nuscenes_search],
502
+ outputs=[nuscenes_table, nuscenes_stats, nuscenes_plot])
503
+
504
+ # Tab 5: WOD-E2E
505
+ with gr.Tab("WOD-E2E"):
506
+ gr.Markdown("### Table 6: Results on WOD-E2E Test Split")
507
+ gr.Markdown("**Metrics:** RFS (Overall/Spotlight) ↑ | ADE (5s/3s) ↓")
508
+ wod_stats = gr.Markdown(f"**📊 Statistics:** Total {len(df_wod)} models")
509
+ with gr.Row():
510
+ wod_category = gr.Dropdown(label="Category", choices=["All", "Vision-Action", "Vision-Language-Action"], value="All")
511
+ wod_search = gr.Textbox(label="Search Model", placeholder="e.g., AutoVLA...")
512
+ wod_update_btn = gr.Button("🔍 Update", variant="primary")
513
+ wod_table = gr.Dataframe(value=df_wod, label="WOD-E2E Results", interactive=False, wrap=True)
514
+ wod_plot = gr.Plot(label="Statistics", value=initial_wod_plot, format="png")
515
+ wod_update_btn.click(fn=lambda cat, search: (*update_wod_table(cat, search), create_wod_plot(cat)),
516
+ inputs=[wod_category, wod_search],
517
+ outputs=[wod_table, wod_stats, wod_plot])
518
+
519
+ # Tab 6: NAVSIM
520
+ with gr.Tab("NAVSIM Closed-Loop"):
521
+ gr.Markdown("### Table 7: Closed-Loop Results on NAVSIM")
522
+ gr.Markdown("**Metrics:** NC/DAC/TTC/Comf/EP/PDMS ↑")
523
+ navsim_stats = gr.Markdown(f"**📊 Statistics:** Total {len(df_navsim)} models")
524
+ with gr.Row():
525
+ navsim_category = gr.Dropdown(label="Category", choices=["All", "Vision-Action", "Vision-Language-Action"], value="All")
526
+ navsim_search = gr.Textbox(label="Search Model", placeholder="e.g., ReflectDrive...")
527
+ navsim_update_btn = gr.Button("🔍 Update", variant="primary")
528
+ navsim_table = gr.Dataframe(value=df_navsim, label="NAVSIM Results", interactive=False, wrap=True)
529
+ navsim_plot = gr.Plot(label="Statistics", value=initial_navsim_plot, format="png")
530
+ navsim_update_btn.click(fn=lambda cat, search: (*update_navsim_table(cat, search), create_navsim_plot(cat)),
531
+ inputs=[navsim_category, navsim_search],
532
+ outputs=[navsim_table, navsim_stats, navsim_plot])
533
+
534
+ # Tab 7: Bench2Drive
535
+ with gr.Tab("Bench2Drive"):
536
+ gr.Markdown("### Table 8: Closed-Loop & Open-Loop Results on Bench2Drive")
537
+ gr.Markdown("**Metrics:** DS/SR ↑ | L2 Avg ↓")
538
+ bench2drive_stats = gr.Markdown(f"**📊 Statistics:** Total {len(df_bench2drive)} models")
539
+ with gr.Row():
540
+ bench2drive_category = gr.Dropdown(label="Category", choices=["All", "Vision-Action", "Vision-Language-Action"], value="All")
541
+ bench2drive_search = gr.Textbox(label="Search Model", placeholder="e.g., SimLingo...")
542
+ bench2drive_update_btn = gr.Button("🔍 Update", variant="primary")
543
+ bench2drive_table = gr.Dataframe(value=df_bench2drive, label="Bench2Drive Results", interactive=False, wrap=True)
544
+ bench2drive_plot = gr.Plot(label="Statistics", value=initial_bench2drive_plot, format="png")
545
+ bench2drive_update_btn.click(fn=lambda cat, search: (*update_bench2drive_table(cat, search), create_bench2drive_plot(cat)),
546
+ inputs=[bench2drive_category, bench2drive_search],
547
+ outputs=[bench2drive_table, bench2drive_stats, bench2drive_plot])
548
+
549
+ gr.Markdown("""
550
+ ---
551
+ **Legend:** 📷 Camera | 🔍 LiDAR | ⚙️ Status | 💭 Prompt | 📝 Instruction | 🌆 Scene | 🚦 Traffic | 📚 Context
552
+
553
+ **Action Types:** RL=Reinforcement Learning | REG=Decoder+MLP | SEL=Selection | GEN=Generative | LH=Language Head
554
+
555
+ 📖 **Project:** [https://worldbench.github.io/vla4ad](https://worldbench.github.io/vla4ad) | 🔗 **GitHub:** [https://github.com/worldbench/awesome-vla-for-ad](https://github.com/worldbench/awesome-vla-for-ad)
556
+ """)
557
+
558
+ if __name__ == "__main__":
559
+ demo.launch()
pyproject.toml ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [tool.ruff]
2
+ # Enable pycodestyle (`E`) and Pyflakes (`F`) codes by default.
3
+ select = ["E", "F"]
4
+ ignore = ["E501"] # line too long (black is taking care of this)
5
+ line-length = 119
6
+ fixable = ["A", "B", "C", "D", "E", "F", "G", "I", "N", "Q", "S", "T", "W", "ANN", "ARG", "BLE", "COM", "DJ", "DTZ", "EM", "ERA", "EXE", "FBT", "ICN", "INP", "ISC", "NPY", "PD", "PGH", "PIE", "PL", "PT", "PTH", "PYI", "RET", "RSE", "RUF", "SIM", "SLF", "TCH", "TID", "TRY", "UP", "YTT"]
7
+
8
+ [tool.isort]
9
+ profile = "black"
10
+ line_length = 119
11
+
12
+ [tool.black]
13
+ line-length = 119
requirements.txt ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ APScheduler
2
+ black
3
+ datasets
4
+ gradio
5
+ gradio[oauth]
6
+ gradio_leaderboard==0.0.13
7
+ gradio_client
8
+ huggingface-hub>=0.18.0
9
+ matplotlib
10
+ numpy
11
+ pandas
12
+ python-dateutil
13
+ tqdm
14
+ transformers
15
+ tokenizers>=0.15.0
16
+ sentencepiece
results/bench2drive_results.json ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {"id": 1, "model": "TCP", "year": 2022, "input": "camera", "vision": "ResNet", "language": "-", "action": "REG", "ds": 40.70, "sr": 15.00, "efficiency": 54.26, "comfort": 47.80, "l2_avg": 1.70, "category": "Vision-Action"},
3
+ {"id": 2, "model": "ThinkTwice", "year": 2023, "input": "camera", "vision": "ResNet", "language": "-", "action": "REG", "ds": 62.44, "sr": 31.23, "efficiency": 69.33, "comfort": 16.22, "l2_avg": 0.95, "category": "Vision-Action"},
4
+ {"id": 3, "model": "DriveAdapter", "year": 2023, "input": "camera, lidar", "vision": "ResNet", "language": "-", "action": "REG", "ds": 64.22, "sr": 33.08, "efficiency": 70.22, "comfort": 16.01, "l2_avg": 1.01, "category": "Vision-Action"},
5
+ {"id": 4, "model": "UniAD-Base", "year": 2023, "input": "camera", "vision": "ResNet", "language": "-", "action": "REG", "ds": 45.81, "sr": 16.36, "efficiency": 129.21, "comfort": 43.58, "l2_avg": 0.73, "category": "Vision-Action"},
6
+ {"id": 5, "model": "VAD", "year": 2023, "input": "camera", "vision": "ResNet", "language": "-", "action": "REG", "ds": 42.35, "sr": 15.00, "efficiency": 157.94, "comfort": 46.01, "l2_avg": 0.91, "category": "Vision-Action"},
7
+ {"id": 6, "model": "GenAD", "year": 2024, "input": "camera", "vision": "ResNet", "language": "-", "action": "GEN", "ds": 44.81, "sr": 15.90, "efficiency": null, "comfort": null, "l2_avg": null, "category": "Vision-Action"},
8
+ {"id": 7, "model": "DriveTransformer", "year": 2025, "input": "camera", "vision": "ResNet", "language": "-", "action": "REG", "ds": 63.46, "sr": 35.01, "efficiency": 100.64, "comfort": 20.78, "l2_avg": 0.62, "category": "Vision-Action"},
9
+ {"id": 8, "model": "ETA", "year": 2025, "input": "camera", "vision": "CLIP", "language": "-", "action": "REG", "ds": 69.53, "sr": 38.64, "efficiency": 184.51, "comfort": 28.43, "l2_avg": null, "category": "Vision-Action"},
10
+ {"id": 9, "model": "WoTE", "year": 2025, "input": "camera, lidar", "vision": "ResNet", "language": "-", "action": "SEL", "ds": 61.71, "sr": 31.36, "efficiency": null, "comfort": null, "l2_avg": null, "category": "Vision-Action"},
11
+ {"id": 10, "model": "GuideFlow", "year": 2025, "input": "camera", "vision": "ResNet", "language": "-", "action": "GEN", "ds": 75.21, "sr": 51.36, "efficiency": null, "comfort": null, "l2_avg": null, "category": "Vision-Action"},
12
+ {"id": 11, "model": "Raw2Drive", "year": 2025, "input": "camera", "vision": "ResNet", "language": "-", "action": "RL", "ds": 71.36, "sr": 50.24, "efficiency": 214.17, "comfort": 22.42, "l2_avg": null, "category": "Vision-Action"},
13
+ {"id": 12, "model": "ORION", "year": 2025, "input": "camera, prompt, command", "vision": "EVA-02", "language": "Vicuna-1.5", "action": "GEN", "ds": 77.74, "sr": 54.62, "efficiency": 151.48, "comfort": 17.38, "l2_avg": 0.68, "category": "Vision-Language-Action"},
14
+ {"id": 13, "model": "AutoVLA", "year": 2025, "input": "camera, prompt, command, status", "vision": "SigLIP", "language": "Qwen2.5-VL", "action": "LH", "ds": 78.84, "sr": 57.73, "efficiency": 146.93, "comfort": 39.33, "l2_avg": null, "category": "Vision-Language-Action"},
15
+ {"id": 14, "model": "SimLingo-Base", "year": 2025, "input": "camera, prompt, command", "vision": "InternViT", "language": "Qwen2", "action": "REG", "ds": 85.94, "sr": 66.82, "efficiency": 244.18, "comfort": 25.49, "l2_avg": null, "category": "Vision-Language-Action"},
16
+ {"id": 15, "model": "SimLingo", "year": 2025, "input": "camera, prompt, command", "vision": "InternViT", "language": "Qwen2", "action": "REG", "ds": 85.07, "sr": 67.27, "efficiency": 259.23, "comfort": 33.67, "l2_avg": null, "category": "Vision-Language-Action"},
17
+ {"id": 16, "model": "ReAL-AD (QwenVL)", "year": 2025, "input": "camera, prompt, command", "vision": "ResNet", "language": "QwenVL", "action": "REG", "ds": 40.76, "sr": 10.93, "efficiency": null, "comfort": null, "l2_avg": 0.87, "category": "Vision-Language-Action"},
18
+ {"id": 17, "model": "ReasonPlan", "year": 2025, "input": "camera, prompt, command, status, context", "vision": "SigLIP", "language": "Qwen", "action": "LH", "ds": 64.01, "sr": 34.55, "efficiency": 180.64, "comfort": 25.63, "l2_avg": 0.61, "category": "Vision-Language-Action"},
19
+ {"id": 18, "model": "DriveMoE", "year": 2025, "input": "camera, prompt, status", "vision": "BEV Encoder", "language": "LLaMA", "action": "REG", "ds": 74.22, "sr": 48.64, "efficiency": 175.96, "comfort": 15.31, "l2_avg": 0.31, "category": "Vision-Language-Action"},
20
+ {"id": 19, "model": "VDRive", "year": 2025, "input": "camera, prompt, command, status", "vision": "Qwen2.5-VL, CVQ", "language": "InternVL3", "action": "GEN", "ds": 66.15, "sr": 50.51, "efficiency": 110.23, "comfort": 22.90, "l2_avg": 0.55, "category": "Vision-Language-Action"},
21
+ {"id": 20, "model": "CoReVLA", "year": 2025, "input": "camera, prompt, command, context", "vision": "Qwen2.5-VL", "language": "Qwen2.5-VL", "action": "LH", "ds": 72.18, "sr": 50.00, "efficiency": 145.41, "comfort": 34.35, "l2_avg": null, "category": "Vision-Language-Action"}
22
+ ]
results/datasets.json ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {"id": 1, "dataset": "BDD100K", "year": 2020, "sensor": "camera", "type": "Real", "scale": "120M", "language": "-", "action_type": "Traj.", "category": "Vision-Action"},
3
+ {"id": 2, "dataset": "nuScenes", "year": 2020, "sensor": "camera, lidar, radar", "type": "Real", "scale": "1.4M", "language": "-", "action_type": "Traj.", "category": "Vision-Action"},
4
+ {"id": 3, "dataset": "Waymo", "year": 2020, "sensor": "camera, lidar, radar", "type": "Real", "scale": "200M", "language": "-", "action_type": "Traj.", "category": "Vision-Action"},
5
+ {"id": 4, "dataset": "nuPlan", "year": 2021, "sensor": "camera, lidar, map", "type": "Real", "scale": "4.6M", "language": "-", "action_type": "Ctrl, Traj.", "category": "Vision-Action"},
6
+ {"id": 5, "dataset": "Argoverse 2", "year": 2021, "sensor": "camera, lidar, map", "type": "Real", "scale": "300K", "language": "-", "action_type": "Traj.", "category": "Vision-Action"},
7
+ {"id": 6, "dataset": "Bench2Drive", "year": 2024, "sensor": "camera, lidar, map", "type": "Sim", "scale": "2M", "language": "-", "action_type": "Traj.", "category": "Vision-Action"},
8
+ {"id": 7, "dataset": "RoboBEV", "year": 2025, "sensor": "camera, lidar, radar", "type": "Real", "scale": "866K", "language": "-", "action_type": "Traj.", "category": "Vision-Action"},
9
+ {"id": 8, "dataset": "WOD-E2E", "year": 2025, "sensor": "camera", "type": "Real", "scale": "800K", "language": "-", "action_type": "Traj.", "category": "Vision-Action"},
10
+ {"id": 9, "dataset": "BDD-X", "year": 2018, "sensor": "camera", "type": "Real", "scale": "8.4M", "language": "Caption (26K)", "action_type": "Ctrl", "category": "Vision-Language-Action"},
11
+ {"id": 10, "dataset": "Talk2Car", "year": 2022, "sensor": "camera, lidar, radar", "type": "Real", "scale": "400K", "language": "Caption (12K)", "action_type": "Ctrl, Traj.", "category": "Vision-Language-Action"},
12
+ {"id": 11, "dataset": "SDN", "year": 2022, "sensor": "camera", "type": "Sim", "scale": "-", "language": "Instruction, QA (8.4K)", "action_type": "Ctrl, Traj.", "category": "Vision-Language-Action"},
13
+ {"id": 12, "dataset": "DriveMLM", "year": 2023, "sensor": "camera, lidar", "type": "Sim", "scale": "-", "language": "Reason., Deci.", "action_type": "Ctrl, Traj.", "category": "Vision-Language-Action"},
14
+ {"id": 13, "dataset": "LMDrive", "year": 2024, "sensor": "camera, lidar", "type": "Sim", "scale": "3M", "language": "Instruction (64K)", "action_type": "Traj.", "category": "Vision-Language-Action"},
15
+ {"id": 14, "dataset": "DriveLM-N", "year": 2024, "sensor": "camera, lidar, radar", "type": "Real", "scale": "4.8K", "language": "QA (445K)", "action_type": "Ctrl, Traj.", "category": "Vision-Language-Action"},
16
+ {"id": 15, "dataset": "DriveLM-C", "year": 2024, "sensor": "camera, lidar, radar", "type": "Sim", "scale": "64K", "language": "QA (3.76M)", "action_type": "Ctrl, Traj.", "category": "Vision-Language-Action"},
17
+ {"id": 16, "dataset": "HBD", "year": 2024, "sensor": "camera", "type": "Real, Sim", "scale": "-", "language": "Deci., Descrip., QA", "action_type": "Traj.", "category": "Vision-Language-Action"},
18
+ {"id": 17, "dataset": "VLAAD", "year": 2024, "sensor": "camera", "type": "Real", "scale": "-", "language": "Reason., QA (64K)", "action_type": "Ctrl.", "category": "Vision-Language-Action"},
19
+ {"id": 18, "dataset": "SUP-AD", "year": 2024, "sensor": "camera", "type": "Real", "scale": "-", "language": "Action, Reason., QA", "action_type": "Ctrl, Traj.", "category": "Vision-Language-Action"},
20
+ {"id": 19, "dataset": "NuInstruct", "year": 2024, "sensor": "camera, lidar, radar", "type": "Real", "scale": "11.8K", "language": "Instruction (91K)", "action_type": "Ctrl", "category": "Vision-Language-Action"},
21
+ {"id": 20, "dataset": "WOMD-Reason", "year": 2024, "sensor": "camera", "type": "Real", "scale": "63K", "language": "QA (2940K)", "action_type": "Plan.", "category": "Vision-Language-Action"},
22
+ {"id": 21, "dataset": "DriveCoT", "year": 2024, "sensor": "camera, lidar", "type": "Sim", "scale": "-", "language": "CoT, Deci. (36K)", "action_type": "Ctrl", "category": "Vision-Language-Action"},
23
+ {"id": 22, "dataset": "Reason2Drive", "year": 2024, "sensor": "camera, lidar, radar", "type": "Real", "scale": "-", "language": "Reason., QA (632K)", "action_type": "Ctrl, Traj.", "category": "Vision-Language-Action"},
24
+ {"id": 23, "dataset": "DriveBench", "year": 2025, "sensor": "camera, lidar, radar", "type": "Real", "scale": "19.2K", "language": "QA (20.5K)", "action_type": "Ctrl", "category": "Vision-Language-Action"},
25
+ {"id": 24, "dataset": "MetaAD", "year": 2025, "sensor": "camera", "type": "Real", "scale": "120K", "language": "Reason., Plan, QA (30K)", "action_type": "Ctrl", "category": "Vision-Language-Action"},
26
+ {"id": 25, "dataset": "OmniDrive", "year": 2025, "sensor": "camera, lidar, radar", "type": "Real", "scale": "-", "language": "Reason., QA", "action_type": "Ctrl, Traj.", "category": "Vision-Language-Action"},
27
+ {"id": 26, "dataset": "NuInteract", "year": 2025, "sensor": "camera, lidar, radar", "type": "Real", "scale": "34K", "language": "Caption, QA (1.5M)", "action_type": "Ctrl", "category": "Vision-Language-Action"},
28
+ {"id": 27, "dataset": "DriveAction", "year": 2025, "sensor": "camera", "type": "Real", "scale": "2.6K", "language": "QA (16.18K)", "action_type": "Ctrl", "category": "Vision-Language-Action"},
29
+ {"id": 28, "dataset": "ImpromptuVLA", "year": 2025, "sensor": "camera", "type": "Real, Sim", "scale": "2M", "language": "Instruction, QA (80K)", "action_type": "Ctrl, Traj.", "category": "Vision-Language-Action"},
30
+ {"id": 29, "dataset": "CoVLA", "year": 2025, "sensor": "camera", "type": "Real", "scale": "6M", "language": "Caption (6M)", "action_type": "Traj.", "category": "Vision-Language-Action"},
31
+ {"id": 30, "dataset": "OmniReason-N", "year": 2025, "sensor": "camera, lidar, radar", "type": "Real", "scale": "-", "language": "QA", "action_type": "Ctrl, Traj.", "category": "Vision-Language-Action"},
32
+ {"id": 31, "dataset": "OmniReason-B2D", "year": 2025, "sensor": "camera", "type": "Sim", "scale": "-", "language": "QA", "action_type": "Ctrl, Traj.", "category": "Vision-Language-Action"}
33
+ ]
results/navsim_results.json ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {"id": 1, "model": "TransFuser", "year": 2022, "input": "camera, lidar", "vision": "ResNet", "language": "-", "action": "REG", "nc": 97.7, "dac": 92.8, "ttc": 92.8, "comf": 100.0, "ep": 79.2, "pdms": 84.0, "category": "Vision-Action"},
3
+ {"id": 2, "model": "UniAD", "year": 2023, "input": "camera", "vision": "ResNet", "language": "-", "action": "REG", "nc": 97.8, "dac": 91.9, "ttc": 92.9, "comf": 100.0, "ep": 78.8, "pdms": 83.4, "category": "Vision-Action"},
4
+ {"id": 3, "model": "VADv2", "year": 2024, "input": "camera", "vision": "ResNet", "language": "-", "action": "REG", "nc": 97.2, "dac": 89.1, "ttc": 91.6, "comf": 100.0, "ep": 76.0, "pdms": 80.9, "category": "Vision-Action"},
5
+ {"id": 4, "model": "PARA-Drive", "year": 2024, "input": "camera", "vision": "ResNet", "language": "-", "action": "REG", "nc": 97.9, "dac": 92.4, "ttc": 93.0, "comf": 99.8, "ep": 79.3, "pdms": 84.0, "category": "Vision-Action"},
6
+ {"id": 5, "model": "LAW", "year": 2024, "input": "camera", "vision": "Swin-T", "language": "-", "action": "REG", "nc": 96.4, "dac": 95.4, "ttc": 88.7, "comf": 99.9, "ep": 81.7, "pdms": 84.6, "category": "Vision-Action"},
7
+ {"id": 6, "model": "DRAMA", "year": 2024, "input": "camera, lidar", "vision": "ResNet", "language": "-", "action": "REG", "nc": 98.0, "dac": 93.1, "ttc": 94.8, "comf": 100.0, "ep": 80.1, "pdms": 85.5, "category": "Vision-Action"},
8
+ {"id": 7, "model": "DiffusionDrive", "year": 2024, "input": "camera, lidar", "vision": "ResNet", "language": "-", "action": "GEN", "nc": 98.2, "dac": 96.2, "ttc": 94.7, "comf": 100.0, "ep": 82.2, "pdms": 88.1, "category": "Vision-Action"},
9
+ {"id": 8, "model": "WoTE", "year": 2025, "input": "camera, lidar", "vision": "ResNet", "language": "-", "action": "SEL", "nc": 98.5, "dac": 96.8, "ttc": 94.9, "comf": 99.9, "ep": 81.9, "pdms": 88.3, "category": "Vision-Action"},
10
+ {"id": 9, "model": "World4Drive", "year": 2025, "input": "camera", "vision": "ResNet", "language": "-", "action": "REG", "nc": 97.4, "dac": 94.3, "ttc": 92.8, "comf": 100.0, "ep": 79.9, "pdms": 85.1, "category": "Vision-Action"},
11
+ {"id": 10, "model": "DrivingGPT", "year": 2025, "input": "camera", "vision": "VQ-VAE", "language": "-", "action": "REG", "nc": 98.9, "dac": 90.7, "ttc": 94.9, "comf": 95.6, "ep": 79.7, "pdms": 82.4, "category": "Vision-Action"},
12
+ {"id": 11, "model": "AD-R1", "year": 2025, "input": "camera, lidar, status", "vision": "-", "language": "-", "action": "RL", "nc": 98.7, "dac": 97.8, "ttc": 94.8, "comf": 100.0, "ep": 87.5, "pdms": 91.9, "category": "Vision-Action"},
13
+ {"id": 12, "model": "SeerDrive", "year": 2025, "input": "camera, lidar", "vision": "VoVNet", "language": "-", "action": "SEL", "nc": 98.8, "dac": 98.6, "ttc": 95.8, "comf": 100.0, "ep": 84.2, "pdms": 90.7, "category": "Vision-Action"},
14
+ {"id": 13, "model": "Epona", "year": 2025, "input": "camera, status", "vision": "DC-AE", "language": "-", "action": "REG", "nc": 97.9, "dac": 95.1, "ttc": 93.8, "comf": 99.9, "ep": 80.4, "pdms": 86.2, "category": "Vision-Action"},
15
+ {"id": 14, "model": "GoalFlow", "year": 2025, "input": "camera, lidar, status", "vision": "VoVNet", "language": "-", "action": "GEN", "nc": 98.4, "dac": 98.3, "ttc": 94.6, "comf": 100.0, "ep": 85.0, "pdms": 90.3, "category": "Vision-Action"},
16
+ {"id": 15, "model": "TrajDiff", "year": 2025, "input": "camera, lidar, status", "vision": "ResNet", "language": "-", "action": "GEN", "nc": 98.1, "dac": 97.0, "ttc": 94.3, "comf": 100.0, "ep": 82.7, "pdms": 88.5, "category": "Vision-Action"},
17
+ {"id": 16, "model": "DiffusionDriveV2", "year": 2025, "input": "camera, lidar", "vision": "ResNet", "language": "-", "action": "GEN", "nc": 98.3, "dac": 97.9, "ttc": 94.8, "comf": 99.9, "ep": 87.5, "pdms": 91.2, "category": "Vision-Action"},
18
+ {"id": 17, "model": "NaviHydra", "year": 2025, "input": "camera, lidar", "vision": "ResNet", "language": "-", "action": "SEL", "nc": 98.7, "dac": 98.6, "ttc": 88.7, "comf": 96.2, "ep": 100.0, "pdms": 92.7, "category": "Vision-Action"},
19
+ {"id": 18, "model": "Mimir", "year": 2025, "input": "camera, lidar", "vision": "ResNet", "language": "-", "action": "GEN", "nc": 98.2, "dac": 97.5, "ttc": 94.6, "comf": 100.0, "ep": 83.6, "pdms": 89.3, "category": "Vision-Action"},
20
+ {"id": 19, "model": "ReCogDrive", "year": 2025, "input": "camera, prompt, command", "vision": "InternViT", "language": "Qwen2.5-VL", "action": "GEN", "nc": 98.2, "dac": 97.8, "ttc": 95.2, "comf": 99.8, "ep": 83.5, "pdms": 89.6, "category": "Vision-Language-Action"},
21
+ {"id": 20, "model": "AutoVLA", "year": 2025, "input": "camera, prompt, command, status", "vision": "Qwen2.5-VL", "language": "Qwen2.5-VL", "action": "LH", "nc": 99.1, "dac": 97.1, "ttc": 97.1, "comf": 99.9, "ep": 87.6, "pdms": 92.1, "category": "Vision-Language-Action"},
22
+ {"id": 21, "model": "ReflectDrive", "year": 2025, "input": "camera, prompt, scene, status", "vision": "LLaDA-V", "language": "LLaDA-V", "action": "GEN", "nc": 99.7, "dac": 99.5, "ttc": 99.1, "comf": 99.9, "ep": 88.9, "pdms": 94.7, "category": "Vision-Language-Action"},
23
+ {"id": 22, "model": "AdaThinkDrive", "year": 2025, "input": "camera, prompt, command, status", "vision": "InternVL3", "language": "InternVL3", "action": "REG", "nc": 99.1, "dac": 98.8, "ttc": 97.2, "comf": 100.0, "ep": 87.9, "pdms": 93.0, "category": "Vision-Language-Action"},
24
+ {"id": 23, "model": "Percept-WAM", "year": 2025, "input": "camera, lidar, prompt, command", "vision": "InternViT", "language": "InternVL2", "action": "REG", "nc": 98.8, "dac": 98.6, "ttc": 94.4, "comf": 99.5, "ep": 84.8, "pdms": 90.2, "category": "Vision-Language-Action"},
25
+ {"id": 24, "model": "Reasoning-VLA", "year": 2025, "input": "camera, prompt, status", "vision": "Qwen2.5-VL", "language": "Qwen2.5-VL", "action": "REG", "nc": 97.8, "dac": 93.2, "ttc": 98.1, "comf": 99.8, "ep": 80.7, "pdms": 91.7, "category": "Vision-Language-Action"}
26
+ ]
results/nuscenes_results.json ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {"id": 1, "model": "ST-P3", "year": 2022, "input": "camera", "vision": "EfficientNet", "language": "-", "action": "REG", "l2_1s": 1.33, "l2_2s": 2.11, "l2_3s": 2.90, "l2_avg": 2.11, "cr_1s": 0.23, "cr_2s": 0.62, "cr_3s": 1.27, "cr_avg": 0.71, "category": "Vision-Action"},
3
+ {"id": 2, "model": "UniAD", "year": 2022, "input": "camera", "vision": "ResNet", "language": "-", "action": "REG", "l2_1s": 0.44, "l2_2s": 0.67, "l2_3s": 0.96, "l2_avg": 0.69, "cr_1s": 0.04, "cr_2s": 0.08, "cr_3s": 0.23, "cr_avg": 0.12, "category": "Vision-Action"},
4
+ {"id": 3, "model": "VAD", "year": 2023, "input": "camera", "vision": "ResNet", "language": "-", "action": "REG", "l2_1s": 0.17, "l2_2s": 0.34, "l2_3s": 0.60, "l2_avg": 0.37, "cr_1s": 0.07, "cr_2s": 0.10, "cr_3s": 0.24, "cr_avg": 0.14, "category": "Vision-Action"},
5
+ {"id": 4, "model": "OccNet", "year": 2023, "input": "camera", "vision": "ResNet", "language": "-", "action": "SEL", "l2_1s": 1.29, "l2_2s": 2.13, "l2_3s": 2.99, "l2_avg": 2.14, "cr_1s": 0.21, "cr_2s": 0.59, "cr_3s": 1.37, "cr_avg": 0.72, "category": "Vision-Action"},
6
+ {"id": 5, "model": "BEV-Planner", "year": 2024, "input": "camera", "vision": "ResNet", "language": "-", "action": "REG", "l2_1s": 0.30, "l2_2s": 0.52, "l2_3s": 0.83, "l2_avg": 0.55, "cr_1s": 0.10, "cr_2s": 0.37, "cr_3s": 1.30, "cr_avg": 0.59, "category": "Vision-Action"},
7
+ {"id": 6, "model": "Drive-WM", "year": 2024, "input": "camera, status", "vision": "ConvNeXt", "language": "-", "action": "SEL", "l2_1s": 0.43, "l2_2s": 0.77, "l2_3s": 1.20, "l2_avg": 0.80, "cr_1s": 0.10, "cr_2s": 0.21, "cr_3s": 0.48, "cr_avg": 0.26, "category": "Vision-Action"},
8
+ {"id": 7, "model": "GenAD", "year": 2024, "input": "camera", "vision": "ResNet", "language": "-", "action": "GEN", "l2_1s": 0.36, "l2_2s": 0.83, "l2_3s": 1.55, "l2_avg": 0.91, "cr_1s": 0.06, "cr_2s": 0.23, "cr_3s": 1.00, "cr_avg": 0.43, "category": "Vision-Action"},
9
+ {"id": 8, "model": "OccWorld", "year": 2024, "input": "camera", "vision": "ResNet", "language": "-", "action": "REG", "l2_1s": 0.43, "l2_2s": 1.08, "l2_3s": 1.99, "l2_avg": 1.17, "cr_1s": 0.07, "cr_2s": 0.38, "cr_3s": 1.35, "cr_avg": 0.60, "category": "Vision-Action"},
10
+ {"id": 9, "model": "DriveDreamer", "year": 2024, "input": "camera, status", "vision": "SD", "language": "-", "action": "REG", "l2_1s": null, "l2_2s": null, "l2_3s": null, "l2_avg": 0.29, "cr_1s": null, "cr_2s": null, "cr_3s": null, "cr_avg": 0.15, "category": "Vision-Action"},
11
+ {"id": 10, "model": "SparseAD", "year": 2024, "input": "camera", "vision": "ResNet", "language": "-", "action": "REG", "l2_1s": 0.15, "l2_2s": 0.31, "l2_3s": 0.56, "l2_avg": 0.34, "cr_1s": 0.00, "cr_2s": 0.04, "cr_3s": 0.15, "cr_avg": 0.06, "category": "Vision-Action"},
12
+ {"id": 11, "model": "GaussianAD", "year": 2024, "input": "camera", "vision": "ResNet", "language": "-", "action": "REG", "l2_1s": 0.40, "l2_2s": 0.64, "l2_3s": 0.88, "l2_avg": 0.64, "cr_1s": 0.09, "cr_2s": 0.38, "cr_3s": 0.81, "cr_avg": 0.42, "category": "Vision-Action"},
13
+ {"id": 12, "model": "LAW", "year": 2024, "input": "camera", "vision": "Swin-T", "language": "-", "action": "REG", "l2_1s": 0.24, "l2_2s": 0.46, "l2_3s": 0.76, "l2_avg": 0.49, "cr_1s": 0.08, "cr_2s": 0.10, "cr_3s": 0.39, "cr_avg": 0.19, "category": "Vision-Action"},
14
+ {"id": 13, "model": "SSR", "year": 2024, "input": "camera", "vision": "ResNet", "language": "-", "action": "REG", "l2_1s": 0.18, "l2_2s": 0.36, "l2_3s": 0.63, "l2_avg": 0.39, "cr_1s": 0.01, "cr_2s": 0.04, "cr_3s": 0.12, "cr_avg": 0.06, "category": "Vision-Action"},
15
+ {"id": 14, "model": "Drive-OccWorld", "year": 2025, "input": "camera", "vision": "ResNet", "language": "-", "action": "REG", "l2_1s": 0.25, "l2_2s": 0.44, "l2_3s": 0.72, "l2_avg": 0.47, "cr_1s": 0.03, "cr_2s": 0.08, "cr_3s": 0.22, "cr_avg": 0.11, "category": "Vision-Action"},
16
+ {"id": 15, "model": "DriveTransformer", "year": 2025, "input": "camera", "vision": "ResNet", "language": "-", "action": "REG", "l2_1s": 0.19, "l2_2s": 0.34, "l2_3s": 0.66, "l2_avg": 0.40, "cr_1s": 0.03, "cr_2s": 0.10, "cr_3s": 0.21, "cr_avg": 0.11, "category": "Vision-Action"},
17
+ {"id": 16, "model": "DiffusionDrive", "year": 2025, "input": "camera, lidar", "vision": "ResNet", "language": "-", "action": "GEN", "l2_1s": 0.27, "l2_2s": 0.54, "l2_3s": 0.90, "l2_avg": 0.57, "cr_1s": 0.03, "cr_2s": 0.05, "cr_3s": 0.16, "cr_avg": 0.08, "category": "Vision-Action"},
18
+ {"id": 17, "model": "World4Drive", "year": 2025, "input": "camera", "vision": "ResNet", "language": "-", "action": "REG", "l2_1s": 0.23, "l2_2s": 0.47, "l2_3s": 0.81, "l2_avg": 0.50, "cr_1s": 0.02, "cr_2s": 0.12, "cr_3s": 0.33, "cr_avg": 0.16, "category": "Vision-Action"},
19
+ {"id": 18, "model": "Epona", "year": 2025, "input": "camera, status", "vision": "DC-AE", "language": "-", "action": "REG", "l2_1s": 0.61, "l2_2s": 1.17, "l2_3s": 1.98, "l2_avg": 1.25, "cr_1s": 0.01, "cr_2s": 0.22, "cr_3s": 0.85, "cr_avg": 0.36, "category": "Vision-Action"},
20
+ {"id": 19, "model": "SeerDrive", "year": 2025, "input": "camera, lidar", "vision": "ResNet", "language": "-", "action": "SEL", "l2_1s": 0.20, "l2_2s": 0.39, "l2_3s": 0.69, "l2_avg": 0.43, "cr_1s": 0.00, "cr_2s": 0.05, "cr_3s": 0.14, "cr_avg": 0.06, "category": "Vision-Action"},
21
+ {"id": 20, "model": "GuideFlow", "year": 2025, "input": "camera", "vision": "ResNet", "language": "-", "action": "GEN", "l2_1s": null, "l2_2s": null, "l2_3s": null, "l2_avg": null, "cr_1s": 0.00, "cr_2s": 0.02, "cr_3s": 0.18, "cr_avg": 0.07, "category": "Vision-Action"},
22
+ {"id": 21, "model": "Agent-Driver", "year": 2023, "input": "camera, prompt", "vision": "-", "language": "GPT-3.5", "action": "LH", "l2_1s": 0.16, "l2_2s": 0.34, "l2_3s": 0.61, "l2_avg": 0.37, "cr_1s": 0.02, "cr_2s": 0.07, "cr_3s": 0.18, "cr_avg": 0.09, "category": "Vision-Language-Action"},
23
+ {"id": 22, "model": "DriveVLM", "year": 2024, "input": "camera, prompt, scene, status", "vision": "ViT", "language": "QwenVL", "action": "GEN", "l2_1s": 0.18, "l2_2s": 0.34, "l2_3s": 0.68, "l2_avg": 0.40, "cr_1s": 0.10, "cr_2s": 0.22, "cr_3s": 0.45, "cr_avg": 0.27, "category": "Vision-Language-Action"},
24
+ {"id": 23, "model": "DriveVLM-Dual", "year": 2024, "input": "camera, prompt", "vision": "ViT", "language": "QwenVL", "action": "REG", "l2_1s": 0.15, "l2_2s": 0.29, "l2_3s": 0.48, "l2_avg": 0.31, "cr_1s": 0.05, "cr_2s": 0.08, "cr_3s": 0.17, "cr_avg": 0.10, "category": "Vision-Language-Action"},
25
+ {"id": 24, "model": "RAG-Driver", "year": 2024, "input": "camera, prompt, command, context, status", "vision": "CLIP", "language": "Vicuna-1.5", "action": "LH", "l2_1s": 0.34, "l2_2s": 0.37, "l2_3s": 0.69, "l2_avg": 0.40, "cr_1s": 0.01, "cr_2s": 0.05, "cr_3s": 0.26, "cr_avg": 0.10, "category": "Vision-Language-Action"},
26
+ {"id": 25, "model": "Senna", "year": 2024, "input": "camera, prompt, command, status", "vision": "ViT", "language": "Vicuna-1.5", "action": "REG", "l2_1s": 0.37, "l2_2s": 0.54, "l2_3s": 0.86, "l2_avg": 0.59, "cr_1s": 0.09, "cr_2s": 0.12, "cr_3s": 0.33, "cr_avg": 0.18, "category": "Vision-Language-Action"},
27
+ {"id": 26, "model": "Doe-1", "year": 2024, "input": "camera, prompt, status, scene", "vision": "Lumina-mGPT", "language": "BPE tokenizer", "action": "LH", "l2_1s": 0.37, "l2_2s": 0.67, "l2_3s": 1.07, "l2_avg": 0.70, "cr_1s": 0.02, "cr_2s": 0.14, "cr_3s": 0.47, "cr_avg": 0.21, "category": "Vision-Language-Action"},
28
+ {"id": 27, "model": "VLP", "year": 2024, "input": "camera, prompt, scene, command, status", "vision": "UniAD, VAD", "language": "CLIP", "action": "REG", "l2_1s": 0.30, "l2_2s": 0.53, "l2_3s": 0.84, "l2_avg": 0.55, "cr_1s": 0.01, "cr_2s": 0.07, "cr_3s": 0.38, "cr_avg": 0.15, "category": "Vision-Language-Action"},
29
+ {"id": 28, "model": "VLM-AD", "year": 2024, "input": "camera, prompt, scene, status", "vision": "UniAD, VAD", "language": "CLIP-ViT", "action": "REG", "l2_1s": 0.24, "l2_2s": 0.46, "l2_3s": 0.75, "l2_avg": 0.48, "cr_1s": 0.12, "cr_2s": 0.17, "cr_3s": 0.41, "cr_avg": 0.23, "category": "Vision-Language-Action"},
30
+ {"id": 29, "model": "OpenDriveVLA", "year": 2025, "input": "camera, prompt, scene, command, status", "vision": "ResNet", "language": "Qwen2.5-VL", "action": "LH", "l2_1s": 0.14, "l2_2s": 0.30, "l2_3s": 0.55, "l2_avg": 0.33, "cr_1s": 0.02, "cr_2s": 0.07, "cr_3s": 0.22, "cr_avg": 0.10, "category": "Vision-Language-Action"},
31
+ {"id": 30, "model": "OmniDrive", "year": 2025, "input": "camera, prompt, command, scene, status, traffic", "vision": "EVA-02", "language": "LLaMA2", "action": "LH", "l2_1s": 0.40, "l2_2s": 0.80, "l2_3s": 1.32, "l2_avg": 0.84, "cr_1s": 0.04, "cr_2s": 0.46, "cr_3s": 2.32, "cr_avg": 0.94, "category": "Vision-Language-Action"},
32
+ {"id": 31, "model": "ORION", "year": 2025, "input": "camera, prompt, command", "vision": "EVA-02", "language": "Vicuna-1.5", "action": "GEN", "l2_1s": 0.17, "l2_2s": 0.31, "l2_3s": 0.55, "l2_avg": 0.34, "cr_1s": 0.05, "cr_2s": 0.25, "cr_3s": 0.80, "cr_avg": 0.37, "category": "Vision-Language-Action"},
33
+ {"id": 32, "model": "EMMA", "year": 2025, "input": "camera, prompt, command, status", "vision": "Gemini-VLM", "language": "Gemini", "action": "LH", "l2_1s": 0.14, "l2_2s": 0.29, "l2_3s": 0.54, "l2_avg": 0.32, "cr_1s": null, "cr_2s": null, "cr_3s": null, "cr_avg": null, "category": "Vision-Language-Action"},
34
+ {"id": 33, "model": "WKER", "year": 2025, "input": "camera, prompt, command, status", "vision": "EVA-02", "language": "LLaMA3", "action": "LH", "l2_1s": 0.14, "l2_2s": 0.30, "l2_3s": 0.55, "l2_avg": 0.33, "cr_1s": 0.07, "cr_2s": 0.14, "cr_3s": 0.32, "cr_avg": 0.18, "category": "Vision-Language-Action"},
35
+ {"id": 34, "model": "Drive-R1", "year": 2025, "input": "camera, prompt, command, status", "vision": "InternVL2", "language": "InternVL2", "action": "LH", "l2_1s": 0.14, "l2_2s": 0.28, "l2_3s": 0.50, "l2_avg": 0.31, "cr_1s": 0.02, "cr_2s": 0.06, "cr_3s": 0.19, "cr_avg": 0.09, "category": "Vision-Language-Action"},
36
+ {"id": 35, "model": "ReAL-AD (MiniCPM)", "year": 2025, "input": "camera, prompt, command", "vision": "ResNet", "language": "MiniCPM", "action": "REG", "l2_1s": 0.30, "l2_2s": 0.48, "l2_3s": 0.67, "l2_avg": 0.48, "cr_1s": 0.07, "cr_2s": 0.10, "cr_3s": 0.28, "cr_avg": 0.15, "category": "Vision-Language-Action"},
37
+ {"id": 36, "model": "ReAL-AD (QwenVL)", "year": 2025, "input": "camera, prompt, command", "vision": "ResNet", "language": "QwenVL", "action": "REG", "l2_1s": 0.35, "l2_2s": 0.53, "l2_3s": 0.71, "l2_avg": 0.53, "cr_1s": 0.09, "cr_2s": 0.12, "cr_3s": 0.31, "cr_avg": 0.17, "category": "Vision-Language-Action"},
38
+ {"id": 37, "model": "DiMA", "year": 2025, "input": "camera, prompt, status", "vision": "VAD", "language": "LLaVA-1.5", "action": "REG", "l2_1s": 0.18, "l2_2s": 0.50, "l2_3s": 1.03, "l2_avg": 0.57, "cr_1s": 0.00, "cr_2s": 0.05, "cr_3s": 0.16, "cr_avg": 0.08, "category": "Vision-Language-Action"},
39
+ {"id": 38, "model": "FasionAD", "year": 2025, "input": "camera, prompt, scene, command, status", "vision": "BEVFormer", "language": "VLM+Thinking", "action": "GEN", "l2_1s": 0.19, "l2_2s": 0.62, "l2_3s": 1.25, "l2_avg": 0.69, "cr_1s": 0.02, "cr_2s": 0.09, "cr_3s": 0.44, "cr_avg": 0.18, "category": "Vision-Language-Action"},
40
+ {"id": 39, "model": "InsightDrive", "year": 2025, "input": "camera, prompt, scene, status", "vision": "ResNet", "language": "VLMs", "action": "REG", "l2_1s": 0.23, "l2_2s": 0.41, "l2_3s": 0.68, "l2_avg": 0.44, "cr_1s": 0.09, "cr_2s": 0.10, "cr_3s": 0.27, "cr_avg": 0.15, "category": "Vision-Language-Action"},
41
+ {"id": 40, "model": "S4-Driver", "year": 2025, "input": "camera, prompt, command, status", "vision": "ViT-G", "language": "PaLI3", "action": "LH", "l2_1s": 0.13, "l2_2s": 0.28, "l2_3s": 0.51, "l2_avg": 0.31, "cr_1s": null, "cr_2s": null, "cr_3s": null, "cr_avg": null, "category": "Vision-Language-Action"},
42
+ {"id": 41, "model": "SOLVE", "year": 2025, "input": "camera, prompt, status", "vision": "EVA-02", "language": "LLaVA-1.5", "action": "REG", "l2_1s": 0.13, "l2_2s": 0.25, "l2_3s": 0.47, "l2_avg": 0.28, "cr_1s": 0.00, "cr_2s": 0.16, "cr_3s": 0.43, "cr_avg": 0.20, "category": "Vision-Language-Action"},
43
+ {"id": 42, "model": "VERDI", "year": 2025, "input": "camera, prompt, scene, status", "vision": "VAD", "language": "Qwen2.5-VL", "action": "REG", "l2_1s": 0.36, "l2_2s": 0.62, "l2_3s": 0.96, "l2_avg": 0.65, "cr_1s": null, "cr_2s": null, "cr_3s": null, "cr_avg": null, "category": "Vision-Language-Action"},
44
+ {"id": 43, "model": "OmniReason", "year": 2025, "input": "camera, prompt, command, scene, status, traffic, context", "vision": "EVA-02", "language": "LLaVA-1.5", "action": "LH", "l2_1s": 0.15, "l2_2s": 0.31, "l2_3s": 0.57, "l2_avg": 0.34, "cr_1s": 0.04, "cr_2s": 0.18, "cr_3s": 0.98, "cr_avg": 0.40, "category": "Vision-Language-Action"},
45
+ {"id": 44, "model": "FutureSightDrive", "year": 2025, "input": "camera, prompt, command, status", "vision": "ViT", "language": "Qwen2-VL", "action": "LH", "l2_1s": 0.14, "l2_2s": 0.25, "l2_3s": 0.46, "l2_avg": 0.28, "cr_1s": 0.03, "cr_2s": 0.06, "cr_3s": 0.21, "cr_avg": 0.10, "category": "Vision-Language-Action"},
46
+ {"id": 45, "model": "Occ-LLM", "year": 2025, "input": "camera, prompt, status", "vision": "-", "language": "LLaMA2", "action": "LH", "l2_1s": 0.12, "l2_2s": 0.24, "l2_3s": 0.49, "l2_avg": 0.28, "cr_1s": null, "cr_2s": null, "cr_3s": null, "cr_avg": null, "category": "Vision-Language-Action"},
47
+ {"id": 46, "model": "FastDriveVLA", "year": 2025, "input": "camera, prompt", "vision": "Qwen2.5-VL", "language": "Qwen2.5-VL", "action": "LH", "l2_1s": 0.14, "l2_2s": 0.29, "l2_3s": 0.54, "l2_avg": 0.33, "cr_1s": 0.00, "cr_2s": 0.18, "cr_3s": 0.70, "cr_avg": 0.29, "category": "Vision-Language-Action"},
48
+ {"id": 47, "model": "AutoDrive-R²", "year": 2025, "input": "camera, prompt, status", "vision": "Qwen2.5-VL", "language": "Qwen2.5-VL", "action": "LH", "l2_1s": 0.13, "l2_2s": 0.19, "l2_3s": 0.25, "l2_avg": 0.19, "cr_1s": null, "cr_2s": null, "cr_3s": null, "cr_avg": null, "category": "Vision-Language-Action"},
49
+ {"id": 48, "model": "VDrive", "year": 2025, "input": "camera, prompt, command, status", "vision": "Qwen2.5-VL, CVQ", "language": "InternVL3", "action": "GEN", "l2_1s": 0.12, "l2_2s": 0.26, "l2_3s": 0.50, "l2_avg": 0.29, "cr_1s": 0.03, "cr_2s": 0.16, "cr_3s": 0.36, "cr_avg": 0.18, "category": "Vision-Language-Action"},
50
+ {"id": 49, "model": "OccVLA", "year": 2025, "input": "camera, prompt, status", "vision": "VQ-VAE", "language": "PaliGemma-2", "action": "REG", "l2_1s": 0.18, "l2_2s": 0.26, "l2_3s": 0.40, "l2_avg": 0.28, "cr_1s": null, "cr_2s": null, "cr_3s": null, "cr_avg": null, "category": "Vision-Language-Action"},
51
+ {"id": 50, "model": "FasionAD++", "year": 2024, "input": "camera, prompt, command, status", "vision": "GenAD", "language": "CLIP, QwenVL", "action": "GEN", "l2_1s": 0.13, "l2_2s": 0.26, "l2_3s": 0.45, "l2_avg": 0.28, "cr_1s": 0.05, "cr_2s": 0.08, "cr_3s": 0.15, "cr_avg": 0.09, "category": "Vision-Language-Action"},
52
+ {"id": 51, "model": "ALN-P3", "year": 2025, "input": "camera, prompt, scene", "vision": "VAD", "language": "CLIP, LLaMA-2", "action": "REG", "l2_1s": null, "l2_2s": null, "l2_3s": null, "l2_avg": null, "cr_1s": 0.05, "cr_2s": 0.09, "cr_3s": 0.35, "cr_avg": 0.16, "category": "Vision-Language-Action"},
53
+ {"id": 52, "model": "VLM-E2E", "year": 2025, "input": "camera, prompt, scene", "vision": "EfficientNet", "language": "CLIP", "action": "REG", "l2_1s": 0.28, "l2_2s": 0.50, "l2_3s": 0.80, "l2_avg": 0.53, "cr_1s": 0.01, "cr_2s": 0.06, "cr_3s": 0.20, "cr_avg": 0.09, "category": "Vision-Language-Action"},
54
+ {"id": 53, "model": "NetRoller", "year": 2025, "input": "camera, prompt", "vision": "CLIP", "language": "LLaMA-2", "action": "REG", "l2_1s": 0.38, "l2_2s": 0.66, "l2_3s": 1.01, "l2_avg": 0.68, "cr_1s": 0.06, "cr_2s": 0.13, "cr_3s": 0.30, "cr_avg": 0.16, "category": "Vision-Language-Action"},
55
+ {"id": 54, "model": "OmniScene", "year": 2025, "input": "camera, prompt, scene, status", "vision": "ResNet", "language": "Qwen2.5-VL", "action": "REG", "l2_1s": 0.28, "l2_2s": 0.53, "l2_3s": 0.91, "l2_avg": 0.57, "cr_1s": 0.00, "cr_2s": 0.04, "cr_3s": 0.19, "cr_avg": 0.08, "category": "Vision-Language-Action"},
56
+ {"id": 55, "model": "Sce2DriveX", "year": 2025, "input": "camera, prompt, command, status", "vision": "OpenCLIP", "language": "Vicuna-1.5", "action": "LH", "l2_1s": 0.15, "l2_2s": 0.33, "l2_3s": 0.59, "l2_avg": 0.36, "cr_1s": null, "cr_2s": null, "cr_3s": null, "cr_avg": null, "category": "Vision-Language-Action"},
57
+ {"id": 56, "model": "dVLM-AD", "year": 2025, "input": "camera, prompt, status", "vision": "SigLIP2", "language": "LLaDA-V", "action": "LH", "l2_1s": 0.15, "l2_2s": 0.40, "l2_3s": 0.68, "l2_avg": 0.41, "cr_1s": null, "cr_2s": null, "cr_3s": null, "cr_avg": null, "category": "Vision-Language-Action"},
58
+ {"id": 57, "model": "Percept-WAM", "year": 2025, "input": "camera, lidar, prompt, command", "vision": "InternViT", "language": "InternVL2", "action": "REG", "l2_1s": 0.16, "l2_2s": 0.33, "l2_3s": 0.60, "l2_avg": 0.36, "cr_1s": null, "cr_2s": null, "cr_3s": null, "cr_avg": null, "category": "Vision-Language-Action"},
59
+ {"id": 58, "model": "Reasoning-VLA", "year": 2025, "input": "camera, prompt, status", "vision": "Qwen2.5-VL", "language": "Qwen2.5-VL", "action": "REG", "l2_1s": 0.05, "l2_2s": 0.19, "l2_3s": 0.41, "l2_avg": 0.22, "cr_1s": 0.02, "cr_2s": 0.06, "cr_3s": 0.13, "cr_avg": 0.07, "category": "Vision-Language-Action"}
60
+ ]
results/va_models.json ADDED
@@ -0,0 +1,911 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "id": 1,
4
+ "model": "LBC",
5
+ "venue": "CoRL'20",
6
+ "input": [
7
+ "camera"
8
+ ],
9
+ "dataset": [
10
+ "CARLA",
11
+ "NoCrash"
12
+ ],
13
+ "vision": "ResNet",
14
+ "action": "RL",
15
+ "output": "Ctrl.+Traj.",
16
+ "category": "Action-Only Models"
17
+ },
18
+ {
19
+ "id": 2,
20
+ "model": "Latent-DRL",
21
+ "venue": "CVPR'20",
22
+ "input": [
23
+ "camera"
24
+ ],
25
+ "dataset": [
26
+ "CARLA"
27
+ ],
28
+ "vision": "ResNet",
29
+ "action": "RL",
30
+ "output": "Ctrl.",
31
+ "category": "Action-Only Models"
32
+ },
33
+ {
34
+ "id": 3,
35
+ "model": "NEAT",
36
+ "venue": "ICCV'21",
37
+ "input": [
38
+ "camera"
39
+ ],
40
+ "dataset": [
41
+ "CARLA"
42
+ ],
43
+ "vision": "ResNet",
44
+ "action": "REG",
45
+ "output": "Traj.",
46
+ "category": "Action-Only Models"
47
+ },
48
+ {
49
+ "id": 4,
50
+ "model": "Roach",
51
+ "venue": "ICCV'21",
52
+ "input": [
53
+ "camera"
54
+ ],
55
+ "dataset": [
56
+ "CARLA",
57
+ "NoCrash"
58
+ ],
59
+ "vision": "ResNet",
60
+ "action": "RL",
61
+ "output": "Ctrl.",
62
+ "category": "Action-Only Models"
63
+ },
64
+ {
65
+ "id": 5,
66
+ "model": "WoR",
67
+ "venue": "ICCV'21",
68
+ "input": [
69
+ "camera"
70
+ ],
71
+ "dataset": [
72
+ "CARLA",
73
+ "NoCrash",
74
+ "ProcGen"
75
+ ],
76
+ "vision": "ResNet",
77
+ "action": "REG",
78
+ "output": "Ctrl.",
79
+ "category": "Action-Only Models"
80
+ },
81
+ {
82
+ "id": 6,
83
+ "model": "TCP",
84
+ "venue": "NeurIPS'22",
85
+ "input": [
86
+ "camera"
87
+ ],
88
+ "dataset": [
89
+ "CARLA"
90
+ ],
91
+ "vision": "ResNet",
92
+ "action": "REG",
93
+ "output": "Ctrl.+Traj.",
94
+ "category": "Action-Only Models"
95
+ },
96
+ {
97
+ "id": 7,
98
+ "model": "Urban-Driver",
99
+ "venue": "CoRL'22",
100
+ "input": [
101
+ "camera"
102
+ ],
103
+ "dataset": [
104
+ "Lyft"
105
+ ],
106
+ "vision": "ResNet",
107
+ "action": "REG",
108
+ "output": "Traj.",
109
+ "category": "Action-Only Models"
110
+ },
111
+ {
112
+ "id": 8,
113
+ "model": "LAV",
114
+ "venue": "CVPR'22",
115
+ "input": [
116
+ "camera",
117
+ "lidar"
118
+ ],
119
+ "dataset": [
120
+ "CARLA"
121
+ ],
122
+ "vision": "ResNet",
123
+ "action": "REG",
124
+ "output": "Ctrl.+Traj.",
125
+ "category": "Action-Only Models"
126
+ },
127
+ {
128
+ "id": 9,
129
+ "model": "TransFuser",
130
+ "venue": "TPAMI'23",
131
+ "input": [
132
+ "camera",
133
+ "lidar"
134
+ ],
135
+ "dataset": [
136
+ "CARLA"
137
+ ],
138
+ "vision": "ResNet",
139
+ "action": "REG",
140
+ "output": "Traj.",
141
+ "category": "Action-Only Models"
142
+ },
143
+ {
144
+ "id": 10,
145
+ "model": "GRI",
146
+ "venue": "Robotics'23",
147
+ "input": [
148
+ "camera"
149
+ ],
150
+ "dataset": [
151
+ "CARLA"
152
+ ],
153
+ "vision": "EfficientNet",
154
+ "action": "RL",
155
+ "output": "Ctrl.",
156
+ "category": "Action-Only Models"
157
+ },
158
+ {
159
+ "id": 11,
160
+ "model": "BEVPlanner",
161
+ "venue": "CVPR'24",
162
+ "input": [
163
+ "camera"
164
+ ],
165
+ "dataset": [
166
+ "nuScenes"
167
+ ],
168
+ "vision": "ResNet",
169
+ "action": "REG",
170
+ "output": "Traj.",
171
+ "category": "Action-Only Models"
172
+ },
173
+ {
174
+ "id": 12,
175
+ "model": "Raw2Drive",
176
+ "venue": "NeurIPS'25",
177
+ "input": [
178
+ "camera"
179
+ ],
180
+ "dataset": [
181
+ "CARLA",
182
+ "Bench2Drive"
183
+ ],
184
+ "vision": "ResNet",
185
+ "action": "RL",
186
+ "output": "Ctrl.",
187
+ "category": "Action-Only Models"
188
+ },
189
+ {
190
+ "id": 13,
191
+ "model": "RAD",
192
+ "venue": "NeurIPS'25",
193
+ "input": [
194
+ "camera"
195
+ ],
196
+ "dataset": [
197
+ "Private"
198
+ ],
199
+ "vision": "ResNet",
200
+ "action": "RL",
201
+ "output": "Traj.",
202
+ "category": "Action-Only Models"
203
+ },
204
+ {
205
+ "id": 14,
206
+ "model": "TrajDiff",
207
+ "venue": "arXiv'25",
208
+ "input": [
209
+ "camera",
210
+ "lidar",
211
+ "status"
212
+ ],
213
+ "dataset": [
214
+ "NAVSIM"
215
+ ],
216
+ "vision": "ResNet",
217
+ "action": "GEN",
218
+ "output": "Traj.",
219
+ "category": "Action-Only Models"
220
+ },
221
+ {
222
+ "id": 15,
223
+ "model": "ST-P3",
224
+ "venue": "ECCV'22",
225
+ "input": [
226
+ "camera"
227
+ ],
228
+ "dataset": [
229
+ "nuScenes",
230
+ "CARLA"
231
+ ],
232
+ "vision": "EfficientNet",
233
+ "action": "SEL",
234
+ "output": "Traj.",
235
+ "category": "Perception-Action Models"
236
+ },
237
+ {
238
+ "id": 16,
239
+ "model": "UniAD",
240
+ "venue": "CVPR'23",
241
+ "input": [
242
+ "camera"
243
+ ],
244
+ "dataset": [
245
+ "nuScenes"
246
+ ],
247
+ "vision": "ResNet",
248
+ "action": "REG",
249
+ "output": "Traj.",
250
+ "category": "Perception-Action Models"
251
+ },
252
+ {
253
+ "id": 17,
254
+ "model": "VAD",
255
+ "venue": "ICCV'23",
256
+ "input": [
257
+ "camera"
258
+ ],
259
+ "dataset": [
260
+ "nuScenes"
261
+ ],
262
+ "vision": "ResNet",
263
+ "action": "REG",
264
+ "output": "Traj.",
265
+ "category": "Perception-Action Models"
266
+ },
267
+ {
268
+ "id": 18,
269
+ "model": "OccNet",
270
+ "venue": "ICCV'23",
271
+ "input": [
272
+ "camera"
273
+ ],
274
+ "dataset": [
275
+ "nuScenes",
276
+ "OpenOcc"
277
+ ],
278
+ "vision": "ResNet",
279
+ "action": "SEL",
280
+ "output": "Traj.",
281
+ "category": "Perception-Action Models"
282
+ },
283
+ {
284
+ "id": 19,
285
+ "model": "GenAD",
286
+ "venue": "ECCV'24",
287
+ "input": [
288
+ "camera"
289
+ ],
290
+ "dataset": [
291
+ "nuScenes"
292
+ ],
293
+ "vision": "ResNet",
294
+ "action": "GEN",
295
+ "output": "Traj.",
296
+ "category": "Perception-Action Models"
297
+ },
298
+ {
299
+ "id": 20,
300
+ "model": "PARA-Drive",
301
+ "venue": "CVPR'24",
302
+ "input": [
303
+ "camera"
304
+ ],
305
+ "dataset": [
306
+ "nuScenes"
307
+ ],
308
+ "vision": "ResNet",
309
+ "action": "REG",
310
+ "output": "Traj.",
311
+ "category": "Perception-Action Models"
312
+ },
313
+ {
314
+ "id": 21,
315
+ "model": "Hydra-MDP",
316
+ "venue": "CVPRW'24",
317
+ "input": [
318
+ "camera",
319
+ "lidar"
320
+ ],
321
+ "dataset": [
322
+ "NAVSIM"
323
+ ],
324
+ "vision": "ResNet",
325
+ "action": "SEL",
326
+ "output": "Traj.",
327
+ "category": "Perception-Action Models"
328
+ },
329
+ {
330
+ "id": 22,
331
+ "model": "SparseAD",
332
+ "venue": "arXiv'24",
333
+ "input": [
334
+ "camera"
335
+ ],
336
+ "dataset": [
337
+ "nuScenes"
338
+ ],
339
+ "vision": "ResNet",
340
+ "action": "REG",
341
+ "output": "Traj.",
342
+ "category": "Perception-Action Models"
343
+ },
344
+ {
345
+ "id": 23,
346
+ "model": "GaussianAD",
347
+ "venue": "arXiv'24",
348
+ "input": [
349
+ "camera"
350
+ ],
351
+ "dataset": [
352
+ "nuScenes"
353
+ ],
354
+ "vision": "ResNet",
355
+ "action": "REG",
356
+ "output": "Traj.",
357
+ "category": "Perception-Action Models"
358
+ },
359
+ {
360
+ "id": 24,
361
+ "model": "DiFSD",
362
+ "venue": "arXiv'24",
363
+ "input": [
364
+ "camera"
365
+ ],
366
+ "dataset": [
367
+ "nuScenes"
368
+ ],
369
+ "vision": "ResNet",
370
+ "action": "GEN",
371
+ "output": "Traj.",
372
+ "category": "Perception-Action Models"
373
+ },
374
+ {
375
+ "id": 25,
376
+ "model": "DriveTransformer",
377
+ "venue": "ICLR'25",
378
+ "input": [
379
+ "camera"
380
+ ],
381
+ "dataset": [
382
+ "nuScenes",
383
+ "Bench2Drive"
384
+ ],
385
+ "vision": "ResNet",
386
+ "action": "REG",
387
+ "output": "Traj.",
388
+ "category": "Perception-Action Models"
389
+ },
390
+ {
391
+ "id": 26,
392
+ "model": "SparseDrive",
393
+ "venue": "ICRA'25",
394
+ "input": [
395
+ "camera"
396
+ ],
397
+ "dataset": [
398
+ "nuScenes"
399
+ ],
400
+ "vision": "ResNet",
401
+ "action": "REG",
402
+ "output": "Traj.",
403
+ "category": "Perception-Action Models"
404
+ },
405
+ {
406
+ "id": 27,
407
+ "model": "DiffusionDrive",
408
+ "venue": "CVPR'25",
409
+ "input": [
410
+ "camera",
411
+ "lidar"
412
+ ],
413
+ "dataset": [
414
+ "nuScenes",
415
+ "NAVSIM"
416
+ ],
417
+ "vision": "ResNet",
418
+ "action": "GEN",
419
+ "output": "Traj.",
420
+ "category": "Perception-Action Models"
421
+ },
422
+ {
423
+ "id": 28,
424
+ "model": "GoalFlow",
425
+ "venue": "CVPR'25",
426
+ "input": [
427
+ "camera",
428
+ "lidar",
429
+ "status"
430
+ ],
431
+ "dataset": [
432
+ "NAVSIM"
433
+ ],
434
+ "vision": "VoVNet",
435
+ "action": "GEN",
436
+ "output": "Traj.",
437
+ "category": "Perception-Action Models"
438
+ },
439
+ {
440
+ "id": 29,
441
+ "model": "GuideFlow",
442
+ "venue": "arXiv'25",
443
+ "input": [
444
+ "camera"
445
+ ],
446
+ "dataset": [
447
+ "nuScenes",
448
+ "NAVSIM",
449
+ "Bench2Drive"
450
+ ],
451
+ "vision": "ResNet",
452
+ "action": "GEN",
453
+ "output": "Traj.",
454
+ "category": "Perception-Action Models"
455
+ },
456
+ {
457
+ "id": 30,
458
+ "model": "ETA",
459
+ "venue": "arXiv'25",
460
+ "input": [
461
+ "camera"
462
+ ],
463
+ "dataset": [
464
+ "Bench2Drive"
465
+ ],
466
+ "vision": "CLIP-ViT",
467
+ "action": "REG",
468
+ "output": "Traj.",
469
+ "category": "Perception-Action Models"
470
+ },
471
+ {
472
+ "id": 31,
473
+ "model": "Geo",
474
+ "venue": "arXiv'25",
475
+ "input": [
476
+ "camera"
477
+ ],
478
+ "dataset": [
479
+ "nuScenes"
480
+ ],
481
+ "vision": "ResNet",
482
+ "action": "REG",
483
+ "output": "Traj.",
484
+ "category": "Perception-Action Models"
485
+ },
486
+ {
487
+ "id": 32,
488
+ "model": "DiffusionDriveV2",
489
+ "venue": "arXiv'25",
490
+ "input": [
491
+ "camera",
492
+ "lidar"
493
+ ],
494
+ "dataset": [
495
+ "NAVSIM"
496
+ ],
497
+ "vision": "ResNet",
498
+ "action": "GEN",
499
+ "output": "Traj.",
500
+ "category": "Perception-Action Models"
501
+ },
502
+ {
503
+ "id": 33,
504
+ "model": "NaviHydra",
505
+ "venue": "arXiv'25",
506
+ "input": [
507
+ "camera",
508
+ "lidar"
509
+ ],
510
+ "dataset": [
511
+ "NAVSIM"
512
+ ],
513
+ "vision": "ResNet",
514
+ "action": "SEL",
515
+ "output": "Traj.",
516
+ "category": "Perception-Action Models"
517
+ },
518
+ {
519
+ "id": 34,
520
+ "model": "Mimir",
521
+ "venue": "arXiv'25",
522
+ "input": [
523
+ "camera",
524
+ "lidar"
525
+ ],
526
+ "dataset": [
527
+ "NAVSIM"
528
+ ],
529
+ "vision": "ResNet",
530
+ "action": "GEN",
531
+ "output": "Traj.",
532
+ "category": "Perception-Action Models"
533
+ },
534
+ {
535
+ "id": 35,
536
+ "model": "DriveDreamer",
537
+ "venue": "ECCV'24",
538
+ "input": [
539
+ "camera",
540
+ "status"
541
+ ],
542
+ "dataset": [
543
+ "nuScenes"
544
+ ],
545
+ "vision": "SD",
546
+ "action": "REG",
547
+ "output": "Traj.",
548
+ "category": "Image-Based World Models"
549
+ },
550
+ {
551
+ "id": 36,
552
+ "model": "GenAD",
553
+ "venue": "CVPR'24",
554
+ "input": [
555
+ "camera"
556
+ ],
557
+ "dataset": [
558
+ "OpenDV"
559
+ ],
560
+ "vision": "SDXL",
561
+ "action": "REG",
562
+ "output": "Traj.",
563
+ "category": "Image-Based World Models"
564
+ },
565
+ {
566
+ "id": 37,
567
+ "model": "Drive-WM",
568
+ "venue": "CVPR'24",
569
+ "input": [
570
+ "camera",
571
+ "status"
572
+ ],
573
+ "dataset": [
574
+ "nuScenes"
575
+ ],
576
+ "vision": "ConvNeXt",
577
+ "action": "SEL",
578
+ "output": "Traj.",
579
+ "category": "Image-Based World Models"
580
+ },
581
+ {
582
+ "id": 38,
583
+ "model": "DrivingWorld",
584
+ "venue": "arXiv'24",
585
+ "input": [
586
+ "camera",
587
+ "status"
588
+ ],
589
+ "dataset": [
590
+ "nuPlan"
591
+ ],
592
+ "vision": "VQ-VAE",
593
+ "action": "REG",
594
+ "output": "Traj.",
595
+ "category": "Image-Based World Models"
596
+ },
597
+ {
598
+ "id": 39,
599
+ "model": "Imagine-2-Drive",
600
+ "venue": "IROS'25",
601
+ "input": [
602
+ "camera"
603
+ ],
604
+ "dataset": [
605
+ "CARLA"
606
+ ],
607
+ "vision": "SVD",
608
+ "action": "SEL",
609
+ "output": "Traj.",
610
+ "category": "Image-Based World Models"
611
+ },
612
+ {
613
+ "id": 40,
614
+ "model": "DrivingGPT",
615
+ "venue": "ICCV'25",
616
+ "input": [
617
+ "camera",
618
+ "status"
619
+ ],
620
+ "dataset": [
621
+ "nuPlan",
622
+ "NAVSIM"
623
+ ],
624
+ "vision": "VQ-VAE",
625
+ "action": "REG",
626
+ "output": "Traj.",
627
+ "category": "Image-Based World Models"
628
+ },
629
+ {
630
+ "id": 41,
631
+ "model": "Epona",
632
+ "venue": "ICCV'25",
633
+ "input": [
634
+ "camera",
635
+ "status"
636
+ ],
637
+ "dataset": [
638
+ "nuScenes",
639
+ "NAVSIM",
640
+ "nuPlan"
641
+ ],
642
+ "vision": "DC-AE",
643
+ "action": "REG",
644
+ "output": "Traj.",
645
+ "category": "Image-Based World Models"
646
+ },
647
+ {
648
+ "id": 42,
649
+ "model": "VaViM",
650
+ "venue": "arXiv'25",
651
+ "input": [
652
+ "camera"
653
+ ],
654
+ "dataset": [
655
+ "OpenDV",
656
+ "nuScenes",
657
+ "nuPlan"
658
+ ],
659
+ "vision": "LLaMAGen",
660
+ "action": "GEN",
661
+ "output": "Traj.",
662
+ "category": "Image-Based World Models"
663
+ },
664
+ {
665
+ "id": 43,
666
+ "model": "OccWorld",
667
+ "venue": "ECCV'24",
668
+ "input": [
669
+ "camera",
670
+ "status"
671
+ ],
672
+ "dataset": [
673
+ "nuScenes",
674
+ "Occ3D"
675
+ ],
676
+ "vision": "ResNet",
677
+ "action": "REG",
678
+ "output": "Traj.",
679
+ "category": "Occupancy-Based World Models"
680
+ },
681
+ {
682
+ "id": 44,
683
+ "model": "NeMo",
684
+ "venue": "ECCV'24",
685
+ "input": [
686
+ "camera"
687
+ ],
688
+ "dataset": [
689
+ "nuScenes"
690
+ ],
691
+ "vision": "ResNet",
692
+ "action": "REG",
693
+ "output": "Traj.",
694
+ "category": "Occupancy-Based World Models"
695
+ },
696
+ {
697
+ "id": 45,
698
+ "model": "OccVAR",
699
+ "venue": "-",
700
+ "input": [
701
+ "camera",
702
+ "status"
703
+ ],
704
+ "dataset": [
705
+ "nuScenes",
706
+ "Occ3D"
707
+ ],
708
+ "vision": "ResNet",
709
+ "action": "REG",
710
+ "output": "Traj.",
711
+ "category": "Occupancy-Based World Models"
712
+ },
713
+ {
714
+ "id": 46,
715
+ "model": "RenderWorld",
716
+ "venue": "arXiv'24",
717
+ "input": [
718
+ "camera"
719
+ ],
720
+ "dataset": [
721
+ "nuScenes",
722
+ "Occ3D"
723
+ ],
724
+ "vision": "Swin-T",
725
+ "action": "REG",
726
+ "output": "Traj.",
727
+ "category": "Occupancy-Based World Models"
728
+ },
729
+ {
730
+ "id": 47,
731
+ "model": "DFIT-OccWorld",
732
+ "venue": "arXiv'24",
733
+ "input": [
734
+ "camera",
735
+ "status"
736
+ ],
737
+ "dataset": [
738
+ "nuScenes",
739
+ "Occ3D"
740
+ ],
741
+ "vision": "ResNet",
742
+ "action": "REG",
743
+ "output": "Traj.",
744
+ "category": "Occupancy-Based World Models"
745
+ },
746
+ {
747
+ "id": 48,
748
+ "model": "Drive-OccWorld",
749
+ "venue": "AAAI'25",
750
+ "input": [
751
+ "camera"
752
+ ],
753
+ "dataset": [
754
+ "nuScenes",
755
+ "Cam4DOcc"
756
+ ],
757
+ "vision": "ResNet",
758
+ "action": "REG",
759
+ "output": "Traj.",
760
+ "category": "Occupancy-Based World Models"
761
+ },
762
+ {
763
+ "id": 49,
764
+ "model": "T³Former",
765
+ "venue": "arXiv'25",
766
+ "input": [
767
+ "camera",
768
+ "status"
769
+ ],
770
+ "dataset": [
771
+ "nuScenes",
772
+ "Occ3D"
773
+ ],
774
+ "vision": "ResNet",
775
+ "action": "REG",
776
+ "output": "Traj.",
777
+ "category": "Occupancy-Based World Models"
778
+ },
779
+ {
780
+ "id": 50,
781
+ "model": "AD-R1",
782
+ "venue": "arXiv'25",
783
+ "input": [
784
+ "camera",
785
+ "lidar",
786
+ "status"
787
+ ],
788
+ "dataset": [
789
+ "nuScenes",
790
+ "NAVSIM"
791
+ ],
792
+ "vision": "-",
793
+ "action": "RL",
794
+ "output": "Traj.",
795
+ "category": "Occupancy-Based World Models"
796
+ },
797
+ {
798
+ "id": 51,
799
+ "model": "Covariate-Shift",
800
+ "venue": "arXiv'24",
801
+ "input": [
802
+ "camera",
803
+ "status"
804
+ ],
805
+ "dataset": [
806
+ "CARLA"
807
+ ],
808
+ "vision": "DINOv2",
809
+ "action": "REG",
810
+ "output": "Traj.",
811
+ "category": "Latent-Based World Models"
812
+ },
813
+ {
814
+ "id": 52,
815
+ "model": "World4Drive",
816
+ "venue": "ICCV'25",
817
+ "input": [
818
+ "camera"
819
+ ],
820
+ "dataset": [
821
+ "nuScenes",
822
+ "NAVSIM"
823
+ ],
824
+ "vision": "ResNet",
825
+ "action": "REG",
826
+ "output": "Traj.",
827
+ "category": "Latent-Based World Models"
828
+ },
829
+ {
830
+ "id": 53,
831
+ "model": "WoTE",
832
+ "venue": "ICCV'25",
833
+ "input": [
834
+ "camera",
835
+ "lidar"
836
+ ],
837
+ "dataset": [
838
+ "NAVSIM",
839
+ "Bench2Drive"
840
+ ],
841
+ "vision": "ResNet",
842
+ "action": "SEL",
843
+ "output": "Traj.",
844
+ "category": "Latent-Based World Models"
845
+ },
846
+ {
847
+ "id": 54,
848
+ "model": "LAW",
849
+ "venue": "ICLR'25",
850
+ "input": [
851
+ "camera"
852
+ ],
853
+ "dataset": [
854
+ "nuScenes",
855
+ "NAVSIM",
856
+ "CARLA"
857
+ ],
858
+ "vision": "Swin-T",
859
+ "action": "REG",
860
+ "output": "Traj.",
861
+ "category": "Latent-Based World Models"
862
+ },
863
+ {
864
+ "id": 55,
865
+ "model": "SSR",
866
+ "venue": "ICLR'25",
867
+ "input": [
868
+ "camera"
869
+ ],
870
+ "dataset": [
871
+ "nuScenes",
872
+ "CARLA"
873
+ ],
874
+ "vision": "ResNet",
875
+ "action": "REG",
876
+ "output": "Traj.",
877
+ "category": "Latent-Based World Models"
878
+ },
879
+ {
880
+ "id": 56,
881
+ "model": "Echo-Planning",
882
+ "venue": "arXiv'25",
883
+ "input": [
884
+ "camera"
885
+ ],
886
+ "dataset": [
887
+ "nuScenes"
888
+ ],
889
+ "vision": "ResNet",
890
+ "action": "REG",
891
+ "output": "Traj.",
892
+ "category": "Latent-Based World Models"
893
+ },
894
+ {
895
+ "id": 57,
896
+ "model": "SeerDrive",
897
+ "venue": "NeurIPS'25",
898
+ "input": [
899
+ "camera",
900
+ "lidar"
901
+ ],
902
+ "dataset": [
903
+ "nuScenes",
904
+ "NAVSIM"
905
+ ],
906
+ "vision": "VoVNet",
907
+ "action": "SEL",
908
+ "output": "Traj.",
909
+ "category": "Latent-Based World Models"
910
+ }
911
+ ]
results/vla_models.json ADDED
@@ -0,0 +1,1499 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "id": 1,
4
+ "model": "DriveMLM",
5
+ "venue": "arXiv'23",
6
+ "input": [
7
+ "camera",
8
+ "prompt",
9
+ "instruction",
10
+ "traffic"
11
+ ],
12
+ "dataset": [
13
+ "CARLA"
14
+ ],
15
+ "vision": "EVA-CLIP",
16
+ "language": "LLaMA",
17
+ "action": "LH",
18
+ "output": "Desc.+Meta.",
19
+ "category": "Textural Action Generator"
20
+ },
21
+ {
22
+ "id": 2,
23
+ "model": "RAG-Driver",
24
+ "venue": "RSS'24",
25
+ "input": [
26
+ "camera",
27
+ "prompt",
28
+ "instruction",
29
+ "context",
30
+ "status"
31
+ ],
32
+ "dataset": [
33
+ "BDD-X"
34
+ ],
35
+ "vision": "CLIP",
36
+ "language": "Vicuna-1.5",
37
+ "action": "LH",
38
+ "output": "Desc.+Ctrl.",
39
+ "category": "Textural Action Generator"
40
+ },
41
+ {
42
+ "id": 3,
43
+ "model": "RDA-Driver",
44
+ "venue": "ECCV'24",
45
+ "input": [
46
+ "camera",
47
+ "prompt",
48
+ "status"
49
+ ],
50
+ "dataset": [
51
+ "DriveLM",
52
+ "nuScenes"
53
+ ],
54
+ "vision": "BEVFusion",
55
+ "language": "LLaMA",
56
+ "action": "LH",
57
+ "output": "Desc.+Traj.",
58
+ "category": "Textural Action Generator"
59
+ },
60
+ {
61
+ "id": 4,
62
+ "model": "DriveLM",
63
+ "venue": "ECCV'24",
64
+ "input": [
65
+ "camera",
66
+ "prompt"
67
+ ],
68
+ "dataset": [
69
+ "DriveLM"
70
+ ],
71
+ "vision": "BLIP-2",
72
+ "language": "LLaMA",
73
+ "action": "LH",
74
+ "output": "Meta.+Traj.",
75
+ "category": "Textural Action Generator"
76
+ },
77
+ {
78
+ "id": 5,
79
+ "model": "DriveGPT4",
80
+ "venue": "RA-L'24",
81
+ "input": [
82
+ "camera",
83
+ "prompt"
84
+ ],
85
+ "dataset": [
86
+ "BDD-X"
87
+ ],
88
+ "vision": "CLIP",
89
+ "language": "LLaMA-2",
90
+ "action": "LH",
91
+ "output": "Desc.+Ctrl.",
92
+ "category": "Textural Action Generator"
93
+ },
94
+ {
95
+ "id": 6,
96
+ "model": "DriVLMe",
97
+ "venue": "IROS'24",
98
+ "input": [
99
+ "camera",
100
+ "prompt",
101
+ "instruction"
102
+ ],
103
+ "dataset": [
104
+ "BDD-X",
105
+ "SDN",
106
+ "CARLA"
107
+ ],
108
+ "vision": "CLIP",
109
+ "language": "Vicuna",
110
+ "action": "LH",
111
+ "output": "Desc.+Ctrl.",
112
+ "category": "Textural Action Generator"
113
+ },
114
+ {
115
+ "id": 7,
116
+ "model": "LLaDA",
117
+ "venue": "CVPR'24",
118
+ "input": [
119
+ "camera",
120
+ "prompt",
121
+ "instruction",
122
+ "scene",
123
+ "traffic"
124
+ ],
125
+ "dataset": [
126
+ "nuScenes",
127
+ "nuPlan"
128
+ ],
129
+ "vision": "-",
130
+ "language": "GPT-4",
131
+ "action": "LH",
132
+ "output": "Ctrl.+Traj.",
133
+ "category": "Textural Action Generator"
134
+ },
135
+ {
136
+ "id": 8,
137
+ "model": "VLAAD",
138
+ "venue": "WACVW'24",
139
+ "input": [
140
+ "camera",
141
+ "prompt",
142
+ "instruction"
143
+ ],
144
+ "dataset": [
145
+ "VLAAD"
146
+ ],
147
+ "vision": "BLIP-2",
148
+ "language": "LLaMA-2",
149
+ "action": "LH",
150
+ "output": "Ctrl.",
151
+ "category": "Textural Action Generator"
152
+ },
153
+ {
154
+ "id": 9,
155
+ "model": "OccLLaMA",
156
+ "venue": "arXiv'24",
157
+ "input": [
158
+ "camera",
159
+ "prompt"
160
+ ],
161
+ "dataset": [
162
+ "nuScenes"
163
+ ],
164
+ "vision": "Swin-T, PointPillar",
165
+ "language": "LLaMA-3",
166
+ "action": "LH",
167
+ "output": "Ctrl.+Traj.",
168
+ "category": "Textural Action Generator"
169
+ },
170
+ {
171
+ "id": 10,
172
+ "model": "Doe-1",
173
+ "venue": "arXiv'24",
174
+ "input": [
175
+ "camera",
176
+ "prompt",
177
+ "status",
178
+ "scene"
179
+ ],
180
+ "dataset": [
181
+ "nuScenes"
182
+ ],
183
+ "vision": "Lumina-mGPT",
184
+ "language": "BPE tokenizer",
185
+ "action": "LH",
186
+ "output": "Ctrl.+Traj.",
187
+ "category": "Textural Action Generator"
188
+ },
189
+ {
190
+ "id": 11,
191
+ "model": "LINGO-2",
192
+ "venue": "-",
193
+ "input": [
194
+ "camera",
195
+ "prompt",
196
+ "instruction"
197
+ ],
198
+ "dataset": [
199
+ "Private"
200
+ ],
201
+ "vision": "Wayve Vision",
202
+ "language": "Wayve VLA",
203
+ "action": "LH",
204
+ "output": "Desc.+Traj.",
205
+ "category": "Textural Action Generator"
206
+ },
207
+ {
208
+ "id": 12,
209
+ "model": "SafeAuto",
210
+ "venue": "ICML'25",
211
+ "input": [
212
+ "camera",
213
+ "prompt",
214
+ "status",
215
+ "traffic",
216
+ "context"
217
+ ],
218
+ "dataset": [
219
+ "BDD-X",
220
+ "DriveLM"
221
+ ],
222
+ "vision": "LanguageBind",
223
+ "language": "Video-LLaVA",
224
+ "action": "LH",
225
+ "output": "Desc.+Ctrl.",
226
+ "category": "Textural Action Generator"
227
+ },
228
+ {
229
+ "id": 13,
230
+ "model": "OpenEMMA",
231
+ "venue": "WACV'25",
232
+ "input": [
233
+ "camera",
234
+ "prompt",
235
+ "status"
236
+ ],
237
+ "dataset": [
238
+ "nuScenes"
239
+ ],
240
+ "vision": "Qwen2-VL",
241
+ "language": "Qwen2-VL",
242
+ "action": "LH",
243
+ "output": "Desc.+Traj.",
244
+ "category": "Textural Action Generator"
245
+ },
246
+ {
247
+ "id": 14,
248
+ "model": "ReasonPlan",
249
+ "venue": "CoRL'25",
250
+ "input": [
251
+ "camera",
252
+ "prompt",
253
+ "instruction",
254
+ "status"
255
+ ],
256
+ "dataset": [
257
+ "Bench2Drive"
258
+ ],
259
+ "vision": "SigLIP",
260
+ "language": "Qwen",
261
+ "action": "LH",
262
+ "output": "Desc.+Traj.+Meta.",
263
+ "category": "Textural Action Generator"
264
+ },
265
+ {
266
+ "id": 15,
267
+ "model": "FutureSightDrive",
268
+ "venue": "NeurIPS'25",
269
+ "input": [
270
+ "camera",
271
+ "prompt",
272
+ "instruction",
273
+ "status"
274
+ ],
275
+ "dataset": [
276
+ "nuScenes",
277
+ "DriveLM"
278
+ ],
279
+ "vision": "ViT",
280
+ "language": "Qwen2-VL",
281
+ "action": "LH",
282
+ "output": "Desc.+Traj.",
283
+ "category": "Textural Action Generator"
284
+ },
285
+ {
286
+ "id": 16,
287
+ "model": "ImpromptuVLA",
288
+ "venue": "NeurIPS'25",
289
+ "input": [
290
+ "camera",
291
+ "prompt",
292
+ "scene",
293
+ "status",
294
+ "traffic"
295
+ ],
296
+ "dataset": [
297
+ "ImpromptuVLA"
298
+ ],
299
+ "vision": "Qwen2.5-VL",
300
+ "language": "Qwen2.5-VL",
301
+ "action": "LH",
302
+ "output": "Traj.",
303
+ "category": "Textural Action Generator"
304
+ },
305
+ {
306
+ "id": 17,
307
+ "model": "WKER",
308
+ "venue": "AAAI'25",
309
+ "input": [
310
+ "camera",
311
+ "prompt",
312
+ "instruction",
313
+ "status"
314
+ ],
315
+ "dataset": [
316
+ "nuScenes"
317
+ ],
318
+ "vision": "EVA-02",
319
+ "language": "LLaMA3",
320
+ "action": "LH",
321
+ "output": "Desc.+Traj.",
322
+ "category": "Textural Action Generator"
323
+ },
324
+ {
325
+ "id": 18,
326
+ "model": "OmniDrive",
327
+ "venue": "CVPR'25",
328
+ "input": [
329
+ "camera",
330
+ "prompt",
331
+ "instruction",
332
+ "scene",
333
+ "status",
334
+ "traffic"
335
+ ],
336
+ "dataset": [
337
+ "nuScenes",
338
+ "DriveLM"
339
+ ],
340
+ "vision": "EVA-02",
341
+ "language": "LLaMA2",
342
+ "action": "LH",
343
+ "output": "Desc.+Traj",
344
+ "category": "Textural Action Generator"
345
+ },
346
+ {
347
+ "id": 19,
348
+ "model": "S4-Driver",
349
+ "venue": "CVPR'25",
350
+ "input": [
351
+ "camera",
352
+ "prompt",
353
+ "instruction",
354
+ "status"
355
+ ],
356
+ "dataset": [
357
+ "Waymo",
358
+ "nuScenes"
359
+ ],
360
+ "vision": "PaLI3",
361
+ "language": "PaLI3",
362
+ "action": "LH",
363
+ "output": "Meta.+Traj.",
364
+ "category": "Textural Action Generator"
365
+ },
366
+ {
367
+ "id": 20,
368
+ "model": "EMMA",
369
+ "venue": "TMLR'25",
370
+ "input": [
371
+ "camera",
372
+ "prompt",
373
+ "instruction",
374
+ "status"
375
+ ],
376
+ "dataset": [
377
+ "Waymo",
378
+ "nuScenes"
379
+ ],
380
+ "vision": "Gemini-VLM",
381
+ "language": "Gemini",
382
+ "action": "LH",
383
+ "output": "Desc.+Traj.",
384
+ "category": "Textural Action Generator"
385
+ },
386
+ {
387
+ "id": 21,
388
+ "model": "Occ-LLM",
389
+ "venue": "ICRA'25",
390
+ "input": [
391
+ "camera",
392
+ "prompt",
393
+ "status"
394
+ ],
395
+ "dataset": [
396
+ "nuScenes"
397
+ ],
398
+ "vision": "-",
399
+ "language": "LLaMA2",
400
+ "action": "LH",
401
+ "output": "Traj.",
402
+ "category": "Textural Action Generator"
403
+ },
404
+ {
405
+ "id": 22,
406
+ "model": "Sce2DriveX",
407
+ "venue": "RA-L'25",
408
+ "input": [
409
+ "camera",
410
+ "prompt",
411
+ "instruction",
412
+ "status"
413
+ ],
414
+ "dataset": [
415
+ "nuScenes",
416
+ "Bench2Drive"
417
+ ],
418
+ "vision": "OpenCLIP",
419
+ "language": "Vicuna-1.5",
420
+ "action": "LH",
421
+ "output": "Ctrl.+Traj.",
422
+ "category": "Textural Action Generator"
423
+ },
424
+ {
425
+ "id": 23,
426
+ "model": "DriveAgent-R1",
427
+ "venue": "arXiv'25",
428
+ "input": [
429
+ "camera",
430
+ "prompt",
431
+ "instruction",
432
+ "status"
433
+ ],
434
+ "dataset": [
435
+ "nuScenes",
436
+ "Private"
437
+ ],
438
+ "vision": "Qwen2.5-VL",
439
+ "language": "Qwen2.5-VL",
440
+ "action": "LH",
441
+ "output": "Desc.+Meta.",
442
+ "category": "Textural Action Generator"
443
+ },
444
+ {
445
+ "id": 24,
446
+ "model": "Drive-R1",
447
+ "venue": "arXiv'25",
448
+ "input": [
449
+ "camera",
450
+ "prompt",
451
+ "instruction",
452
+ "status"
453
+ ],
454
+ "dataset": [
455
+ "nuScenes",
456
+ "DriveLM"
457
+ ],
458
+ "vision": "InternVL2",
459
+ "language": "InternVL2",
460
+ "action": "LH",
461
+ "output": "Desc.+Traj.+Meta.",
462
+ "category": "Textural Action Generator"
463
+ },
464
+ {
465
+ "id": 25,
466
+ "model": "FastDriveVLA",
467
+ "venue": "arXiv'25",
468
+ "input": [
469
+ "camera",
470
+ "prompt"
471
+ ],
472
+ "dataset": [
473
+ "nuScenes"
474
+ ],
475
+ "vision": "Qwen2.5-VL",
476
+ "language": "Qwen2.5-VL",
477
+ "action": "LH",
478
+ "output": "Desc.+Traj.",
479
+ "category": "Textural Action Generator"
480
+ },
481
+ {
482
+ "id": 26,
483
+ "model": "WiseAD",
484
+ "venue": "arXiv'25",
485
+ "input": [
486
+ "camera",
487
+ "prompt"
488
+ ],
489
+ "dataset": [
490
+ "BDD-X",
491
+ "CARLA",
492
+ "DriveLM"
493
+ ],
494
+ "vision": "CLIP",
495
+ "language": "MobileVLM",
496
+ "action": "LH",
497
+ "output": "Traj.+Ctrl.",
498
+ "category": "Textural Action Generator"
499
+ },
500
+ {
501
+ "id": 27,
502
+ "model": "AutoDrive-R²",
503
+ "venue": "arXiv'25",
504
+ "input": [
505
+ "camera",
506
+ "prompt",
507
+ "status"
508
+ ],
509
+ "dataset": [
510
+ "nuScenes",
511
+ "Waymo"
512
+ ],
513
+ "vision": "Qwen2.5-VL",
514
+ "language": "Qwen2.5-VL",
515
+ "action": "LH",
516
+ "output": "Traj.",
517
+ "category": "Textural Action Generator"
518
+ },
519
+ {
520
+ "id": 28,
521
+ "model": "OmniReason",
522
+ "venue": "arXiv'25",
523
+ "input": [
524
+ "camera",
525
+ "prompt",
526
+ "instruction",
527
+ "scene",
528
+ "status",
529
+ "traffic",
530
+ "context"
531
+ ],
532
+ "dataset": [
533
+ "nuScenes",
534
+ "Bench2Drive"
535
+ ],
536
+ "vision": "EVA-02",
537
+ "language": "LLaVA-1.5",
538
+ "action": "LH",
539
+ "output": "Meta.+Traj.",
540
+ "category": "Textural Action Generator"
541
+ },
542
+ {
543
+ "id": 29,
544
+ "model": "OpenREAD",
545
+ "venue": "arXiv'25",
546
+ "input": [
547
+ "camera",
548
+ "prompt",
549
+ "instruction",
550
+ "scene",
551
+ "status"
552
+ ],
553
+ "dataset": [
554
+ "nuScenes"
555
+ ],
556
+ "vision": "Qwen3",
557
+ "language": "Qwen3",
558
+ "action": "LH",
559
+ "output": "Desc.+Meta.+Traj.",
560
+ "category": "Textural Action Generator"
561
+ },
562
+ {
563
+ "id": 30,
564
+ "model": "dVLM-AD",
565
+ "venue": "arXiv'25",
566
+ "input": [
567
+ "camera",
568
+ "prompt",
569
+ "status"
570
+ ],
571
+ "dataset": [
572
+ "Waymo",
573
+ "nuScenes"
574
+ ],
575
+ "vision": "SigLIP2",
576
+ "language": "LLaDA-V",
577
+ "action": "LH",
578
+ "output": "Desc.+Traj.",
579
+ "category": "Textural Action Generator"
580
+ },
581
+ {
582
+ "id": 31,
583
+ "model": "PLA",
584
+ "venue": "arXiv'25",
585
+ "input": [
586
+ "camera",
587
+ "prompt",
588
+ "instruction",
589
+ "scene",
590
+ "status",
591
+ "context"
592
+ ],
593
+ "dataset": [
594
+ "nuScenes"
595
+ ],
596
+ "vision": "Sensor Encoder",
597
+ "language": "GPT-4.1",
598
+ "action": "LH",
599
+ "output": "Desc.+Traj.",
600
+ "category": "Textural Action Generator"
601
+ },
602
+ {
603
+ "id": 32,
604
+ "model": "AlphaDrive",
605
+ "venue": "arXiv'25",
606
+ "input": [
607
+ "camera",
608
+ "prompt",
609
+ "instruction",
610
+ "status"
611
+ ],
612
+ "dataset": [
613
+ "MetaAD"
614
+ ],
615
+ "vision": "Qwen2-VL",
616
+ "language": "Qwen2-VL",
617
+ "action": "LH",
618
+ "output": "Desc.+Meta.",
619
+ "category": "Textural Action Generator"
620
+ },
621
+ {
622
+ "id": 33,
623
+ "model": "CoReVLA",
624
+ "venue": "arXiv'25",
625
+ "input": [
626
+ "camera",
627
+ "prompt",
628
+ "instruction",
629
+ "context"
630
+ ],
631
+ "dataset": [
632
+ "Bench2Drive"
633
+ ],
634
+ "vision": "Qwen2.5-VL",
635
+ "language": "Qwen2.5-VL",
636
+ "action": "LH",
637
+ "output": "Ctrl.+Traj.",
638
+ "category": "Textural Action Generator"
639
+ },
640
+ {
641
+ "id": 34,
642
+ "model": "LMDrive",
643
+ "venue": "CVPR'24",
644
+ "input": [
645
+ "camera",
646
+ "prompt",
647
+ "scene",
648
+ "instruction"
649
+ ],
650
+ "dataset": [
651
+ "CARLA"
652
+ ],
653
+ "vision": "ResNet",
654
+ "language": "LLaMA, Vicuna",
655
+ "action": "REG",
656
+ "output": "Ctrl.",
657
+ "category": "Numerical Action Generator"
658
+ },
659
+ {
660
+ "id": 35,
661
+ "model": "BEVDriver",
662
+ "venue": "IROS'25",
663
+ "input": [
664
+ "camera",
665
+ "prompt",
666
+ "instruction"
667
+ ],
668
+ "dataset": [
669
+ "CARLA"
670
+ ],
671
+ "vision": "InterFuser",
672
+ "language": "LLaMA-3.1",
673
+ "action": "REG",
674
+ "output": "Ctrl.+Traj.",
675
+ "category": "Numerical Action Generator"
676
+ },
677
+ {
678
+ "id": 36,
679
+ "model": "CoVLA-Agent",
680
+ "venue": "WACV'25",
681
+ "input": [
682
+ "camera",
683
+ "prompt",
684
+ "status"
685
+ ],
686
+ "dataset": [
687
+ "CoVLA"
688
+ ],
689
+ "vision": "CLIP",
690
+ "language": "LLaMA2",
691
+ "action": "REG",
692
+ "output": "Desc.+Traj.",
693
+ "category": "Numerical Action Generator"
694
+ },
695
+ {
696
+ "id": 37,
697
+ "model": "ORION",
698
+ "venue": "ICCV'25",
699
+ "input": [
700
+ "camera",
701
+ "prompt",
702
+ "instruction"
703
+ ],
704
+ "dataset": [
705
+ "nuScenes",
706
+ "Bench2Drive"
707
+ ],
708
+ "vision": "EVA-02",
709
+ "language": "Vicuna-1.5",
710
+ "action": "GEN",
711
+ "output": "Desc.+Traj.",
712
+ "category": "Numerical Action Generator"
713
+ },
714
+ {
715
+ "id": 38,
716
+ "model": "SimLingo",
717
+ "venue": "CVPR'25",
718
+ "input": [
719
+ "camera",
720
+ "prompt",
721
+ "instruction"
722
+ ],
723
+ "dataset": [
724
+ "CARLA",
725
+ "Bench2Drive"
726
+ ],
727
+ "vision": "InternViT",
728
+ "language": "Qwen2",
729
+ "action": "REG",
730
+ "output": "Ctrl.+Traj.",
731
+ "category": "Numerical Action Generator"
732
+ },
733
+ {
734
+ "id": 39,
735
+ "model": "DriveGPT4-V2",
736
+ "venue": "CVPR'25",
737
+ "input": [
738
+ "camera",
739
+ "prompt",
740
+ "instruction",
741
+ "scene",
742
+ "status"
743
+ ],
744
+ "dataset": [
745
+ "CARLA"
746
+ ],
747
+ "vision": "CLIP, SigLIP",
748
+ "language": "Qwen, Tinyllama",
749
+ "action": "REG",
750
+ "output": "Ctrl.+Traj.",
751
+ "category": "Numerical Action Generator"
752
+ },
753
+ {
754
+ "id": 40,
755
+ "model": "AutoVLA",
756
+ "venue": "NeurIPS'25",
757
+ "input": [
758
+ "camera",
759
+ "prompt",
760
+ "instruction",
761
+ "status"
762
+ ],
763
+ "dataset": [
764
+ "nuScenes",
765
+ "Bench2Drive",
766
+ "NAVSIM",
767
+ "Waymo"
768
+ ],
769
+ "vision": "Qwen2.5-VL",
770
+ "language": "Qwen2.5-VL",
771
+ "action": "LH",
772
+ "output": "Traj.",
773
+ "category": "Numerical Action Generator"
774
+ },
775
+ {
776
+ "id": 41,
777
+ "model": "DriveMoE",
778
+ "venue": "arXiv'25",
779
+ "input": [
780
+ "camera",
781
+ "prompt",
782
+ "status"
783
+ ],
784
+ "dataset": [
785
+ "Bench2Drive"
786
+ ],
787
+ "vision": "PaliGemma",
788
+ "language": "PaliGemma",
789
+ "action": "GEN",
790
+ "output": "Ctrl.",
791
+ "category": "Numerical Action Generator"
792
+ },
793
+ {
794
+ "id": 42,
795
+ "model": "DSDrive",
796
+ "venue": "arXiv'25",
797
+ "input": [
798
+ "camera",
799
+ "prompt",
800
+ "instruction"
801
+ ],
802
+ "dataset": [
803
+ "CARLA"
804
+ ],
805
+ "vision": "ResNet",
806
+ "language": "LLaMA",
807
+ "action": "REG",
808
+ "output": "Desc.+Traj.",
809
+ "category": "Numerical Action Generator"
810
+ },
811
+ {
812
+ "id": 43,
813
+ "model": "OccVLA",
814
+ "venue": "arXiv'25",
815
+ "input": [
816
+ "camera",
817
+ "prompt",
818
+ "status"
819
+ ],
820
+ "dataset": [
821
+ "nuScenes"
822
+ ],
823
+ "vision": "VQ-VAE",
824
+ "language": "PaliGemma-2",
825
+ "action": "REG",
826
+ "output": "Traj.",
827
+ "category": "Numerical Action Generator"
828
+ },
829
+ {
830
+ "id": 44,
831
+ "model": "VDRive",
832
+ "venue": "arXiv'25",
833
+ "input": [
834
+ "camera",
835
+ "prompt",
836
+ "instruction",
837
+ "status"
838
+ ],
839
+ "dataset": [
840
+ "nuScenes",
841
+ "Bench2Drive"
842
+ ],
843
+ "vision": "Qwen2.5-VL, CVQ",
844
+ "language": "InternVL3",
845
+ "action": "GEN",
846
+ "output": "Desc.+Traj.",
847
+ "category": "Numerical Action Generator"
848
+ },
849
+ {
850
+ "id": 45,
851
+ "model": "ReflectDrive",
852
+ "venue": "arXiv'25",
853
+ "input": [
854
+ "camera",
855
+ "prompt",
856
+ "instruction",
857
+ "status"
858
+ ],
859
+ "dataset": [
860
+ "NAVSIM"
861
+ ],
862
+ "vision": "LLaDA-V",
863
+ "language": "LLaDA-V",
864
+ "action": "GEN",
865
+ "output": "Traj.",
866
+ "category": "Numerical Action Generator"
867
+ },
868
+ {
869
+ "id": 46,
870
+ "model": "E3AD",
871
+ "venue": "arXiv'25",
872
+ "input": [
873
+ "camera",
874
+ "prompt",
875
+ "instruction",
876
+ "status",
877
+ "context"
878
+ ],
879
+ "dataset": [
880
+ "Talk2Car"
881
+ ],
882
+ "vision": "Qwen2.5-VL",
883
+ "language": "Qwen2.5-VL",
884
+ "action": "REG",
885
+ "output": "Traj.",
886
+ "category": "Numerical Action Generator"
887
+ },
888
+ {
889
+ "id": 47,
890
+ "model": "LCDrive",
891
+ "venue": "arXiv'25",
892
+ "input": [
893
+ "camera",
894
+ "prompt",
895
+ "status"
896
+ ],
897
+ "dataset": [
898
+ "PhysicalAI-AV"
899
+ ],
900
+ "vision": "DINOv2",
901
+ "language": "Qwen3",
902
+ "action": "LH",
903
+ "output": "Traj.",
904
+ "category": "Numerical Action Generator"
905
+ },
906
+ {
907
+ "id": 48,
908
+ "model": "Alpamayo-R1",
909
+ "venue": "arXiv'25",
910
+ "input": [
911
+ "camera",
912
+ "prompt",
913
+ "instruction",
914
+ "status",
915
+ "context"
916
+ ],
917
+ "dataset": [
918
+ "Private"
919
+ ],
920
+ "vision": "Cosmos-Reason1",
921
+ "language": "Cosmos-Reason1",
922
+ "action": "REG",
923
+ "output": "Desc.+Ctrl.+Traj.",
924
+ "category": "Numerical Action Generator"
925
+ },
926
+ {
927
+ "id": 49,
928
+ "model": "UniUGP",
929
+ "venue": "arXiv'25",
930
+ "input": [
931
+ "camera",
932
+ "prompt",
933
+ "instruction",
934
+ "status"
935
+ ],
936
+ "dataset": [
937
+ "Waymo",
938
+ "nuScenes",
939
+ "nuPlan"
940
+ ],
941
+ "vision": "Qwen2.5-VL",
942
+ "language": "Qwen2.5-VL",
943
+ "action": "GEN",
944
+ "output": "Desc.+Traj.",
945
+ "category": "Numerical Action Generator"
946
+ },
947
+ {
948
+ "id": 50,
949
+ "model": "MindDrive",
950
+ "venue": "arXiv'25",
951
+ "input": [
952
+ "camera",
953
+ "prompt",
954
+ "instruction",
955
+ "status"
956
+ ],
957
+ "dataset": [
958
+ "NAVSIM"
959
+ ],
960
+ "vision": "ResNet-34",
961
+ "language": "LLaVA-1B",
962
+ "action": "GEN",
963
+ "output": "Traj.",
964
+ "category": "Numerical Action Generator"
965
+ },
966
+ {
967
+ "id": 51,
968
+ "model": "AdaThinkDrive",
969
+ "venue": "arXiv'25",
970
+ "input": [
971
+ "camera",
972
+ "prompt",
973
+ "instruction",
974
+ "status"
975
+ ],
976
+ "dataset": [
977
+ "NAVSIM"
978
+ ],
979
+ "vision": "InternVL3",
980
+ "language": "InternVL3",
981
+ "action": "REG",
982
+ "output": "Desc.+Traj.+Meta.",
983
+ "category": "Numerical Action Generator"
984
+ },
985
+ {
986
+ "id": 52,
987
+ "model": "Percept-WAM",
988
+ "venue": "arXiv'25",
989
+ "input": [
990
+ "camera",
991
+ "lidar",
992
+ "prompt",
993
+ "instruction"
994
+ ],
995
+ "dataset": [
996
+ "nuScenes",
997
+ "NAVSIM"
998
+ ],
999
+ "vision": "InternViT",
1000
+ "language": "InternVL2",
1001
+ "action": "REG",
1002
+ "output": "Traj.",
1003
+ "category": "Numerical Action Generator"
1004
+ },
1005
+ {
1006
+ "id": 53,
1007
+ "model": "Reasoning-VLA",
1008
+ "venue": "arXiv'25",
1009
+ "input": [
1010
+ "camera",
1011
+ "prompt",
1012
+ "status"
1013
+ ],
1014
+ "dataset": [
1015
+ "nuScenes",
1016
+ "NAVSIM",
1017
+ "Waymo",
1018
+ "Argoverse"
1019
+ ],
1020
+ "vision": "Qwen2.5-VL",
1021
+ "language": "Qwen2.5-VL",
1022
+ "action": "REG",
1023
+ "output": "Traj.",
1024
+ "category": "Numerical Action Generator"
1025
+ },
1026
+ {
1027
+ "id": 54,
1028
+ "model": "SpaceDrive",
1029
+ "venue": "arXiv'25",
1030
+ "input": [
1031
+ "camera",
1032
+ "prompt",
1033
+ "status",
1034
+ "scene"
1035
+ ],
1036
+ "dataset": [
1037
+ "nuScenes",
1038
+ "Bench2Drive"
1039
+ ],
1040
+ "vision": "Qwen2.5-VL",
1041
+ "language": "Qwen2.5-VL",
1042
+ "action": "REG",
1043
+ "output": "Desc.+Traj.",
1044
+ "category": "Numerical Action Generator"
1045
+ },
1046
+ {
1047
+ "id": 55,
1048
+ "model": "OpenDriveVLA",
1049
+ "venue": "AAAI'26",
1050
+ "input": [
1051
+ "camera",
1052
+ "prompt",
1053
+ "instruction",
1054
+ "status"
1055
+ ],
1056
+ "dataset": [
1057
+ "nuScenes"
1058
+ ],
1059
+ "vision": "BEVFormer",
1060
+ "language": "Qwen2.5",
1061
+ "action": "LH",
1062
+ "output": "Ctrl.+Traj.",
1063
+ "category": "Numerical Action Generator"
1064
+ },
1065
+ {
1066
+ "id": 56,
1067
+ "model": "DriveVLM",
1068
+ "venue": "CoRL'24",
1069
+ "input": [
1070
+ "camera",
1071
+ "prompt",
1072
+ "status"
1073
+ ],
1074
+ "dataset": [
1075
+ "nuScenes",
1076
+ "SUPAD"
1077
+ ],
1078
+ "vision": "-",
1079
+ "language": "QwenVL",
1080
+ "action": "REG",
1081
+ "output": "Desc.+Traj.",
1082
+ "category": "Dual-System: Explicit Action Guidance"
1083
+ },
1084
+ {
1085
+ "id": 57,
1086
+ "model": "LeapAD",
1087
+ "venue": "NeurIPS'24",
1088
+ "input": [
1089
+ "camera",
1090
+ "prompt",
1091
+ "scene",
1092
+ "traffic"
1093
+ ],
1094
+ "dataset": [
1095
+ "CARLA"
1096
+ ],
1097
+ "vision": "QwenVL",
1098
+ "language": "GPT-4, Qwen1.5",
1099
+ "action": "LH",
1100
+ "output": "Ctrl.+Traj.",
1101
+ "category": "Dual-System: Explicit Action Guidance"
1102
+ },
1103
+ {
1104
+ "id": 58,
1105
+ "model": "FasionAD",
1106
+ "venue": "arXiv'24",
1107
+ "input": [
1108
+ "camera",
1109
+ "prompt",
1110
+ "instruction",
1111
+ "status"
1112
+ ],
1113
+ "dataset": [
1114
+ "nuScenes",
1115
+ "CARLA"
1116
+ ],
1117
+ "vision": "GenAD",
1118
+ "language": "CLIP, QwenVL",
1119
+ "action": "GEN",
1120
+ "output": "Ctrl.+Traj.",
1121
+ "category": "Dual-System: Explicit Action Guidance"
1122
+ },
1123
+ {
1124
+ "id": 59,
1125
+ "model": "Senna",
1126
+ "venue": "arXiv'24",
1127
+ "input": [
1128
+ "camera",
1129
+ "prompt",
1130
+ "instruction"
1131
+ ],
1132
+ "dataset": [
1133
+ "nuScenes"
1134
+ ],
1135
+ "vision": "VADv2, ViT",
1136
+ "language": "Vicuna",
1137
+ "action": "REG",
1138
+ "output": "Desc.+Traj.",
1139
+ "category": "Dual-System: Explicit Action Guidance"
1140
+ },
1141
+ {
1142
+ "id": 60,
1143
+ "model": "DualAD",
1144
+ "venue": "IROS'25",
1145
+ "input": [
1146
+ "camera",
1147
+ "prompt",
1148
+ "scene",
1149
+ "status"
1150
+ ],
1151
+ "dataset": [
1152
+ "nuPlan"
1153
+ ],
1154
+ "vision": "-",
1155
+ "language": "GPT-4o, GLM-4",
1156
+ "action": "SEL",
1157
+ "output": "Ctrl.+Traj.",
1158
+ "category": "Dual-System: Explicit Action Guidance"
1159
+ },
1160
+ {
1161
+ "id": 61,
1162
+ "model": "DME-Driver",
1163
+ "venue": "AAAI'25",
1164
+ "input": [
1165
+ "camera",
1166
+ "prompt",
1167
+ "scene",
1168
+ "instruction",
1169
+ "status"
1170
+ ],
1171
+ "dataset": [
1172
+ "HBD"
1173
+ ],
1174
+ "vision": "UniAD",
1175
+ "language": "LLaVA",
1176
+ "action": "REG",
1177
+ "output": "Desc.+Traj.",
1178
+ "category": "Dual-System: Explicit Action Guidance"
1179
+ },
1180
+ {
1181
+ "id": 62,
1182
+ "model": "SOLVE",
1183
+ "venue": "CVPR'25",
1184
+ "input": [
1185
+ "camera",
1186
+ "prompt",
1187
+ "status"
1188
+ ],
1189
+ "dataset": [
1190
+ "nuScenes"
1191
+ ],
1192
+ "vision": "EVA-02",
1193
+ "language": "LLaVA-1.5",
1194
+ "action": "REG",
1195
+ "output": "Ctrl.+Traj.",
1196
+ "category": "Dual-System: Explicit Action Guidance"
1197
+ },
1198
+ {
1199
+ "id": 63,
1200
+ "model": "ReAL-AD",
1201
+ "venue": "ICCV'25",
1202
+ "input": [
1203
+ "camera",
1204
+ "prompt",
1205
+ "instruction"
1206
+ ],
1207
+ "dataset": [
1208
+ "nuScenes",
1209
+ "Bench2Drive"
1210
+ ],
1211
+ "vision": "UniAD, VAD",
1212
+ "language": "MiniCPM-2.5",
1213
+ "action": "REG",
1214
+ "output": "Ctrl.+Traj.",
1215
+ "category": "Dual-System: Explicit Action Guidance"
1216
+ },
1217
+ {
1218
+ "id": 64,
1219
+ "model": "LeapVAD",
1220
+ "venue": "TNNLS'25",
1221
+ "input": [
1222
+ "camera",
1223
+ "prompt",
1224
+ "scene",
1225
+ "status"
1226
+ ],
1227
+ "dataset": [
1228
+ "DriveLM",
1229
+ "CARLA"
1230
+ ],
1231
+ "vision": "QwenVL, InternVL2",
1232
+ "language": "GPT-4o",
1233
+ "action": "LH",
1234
+ "output": "Ctrl.+Traj.",
1235
+ "category": "Dual-System: Explicit Action Guidance"
1236
+ },
1237
+ {
1238
+ "id": 65,
1239
+ "model": "DiffVLA",
1240
+ "venue": "arXiv'25",
1241
+ "input": [
1242
+ "camera",
1243
+ "prompt",
1244
+ "instruction",
1245
+ "status"
1246
+ ],
1247
+ "dataset": [
1248
+ "NAVSIM"
1249
+ ],
1250
+ "vision": "CLIP",
1251
+ "language": "Vicuna-1.5",
1252
+ "action": "GEN",
1253
+ "output": "Traj.",
1254
+ "category": "Dual-System: Explicit Action Guidance"
1255
+ },
1256
+ {
1257
+ "id": 66,
1258
+ "model": "FasionAD++",
1259
+ "venue": "arXiv'25",
1260
+ "input": [
1261
+ "camera",
1262
+ "prompt",
1263
+ "scene",
1264
+ "instruction",
1265
+ "status"
1266
+ ],
1267
+ "dataset": [
1268
+ "nuScenes",
1269
+ "CARLA"
1270
+ ],
1271
+ "vision": "BEVFormer",
1272
+ "language": "Vicuna-1.5, QwenVL",
1273
+ "action": "GEN",
1274
+ "output": "Ctrl.+Traj.",
1275
+ "category": "Dual-System: Explicit Action Guidance"
1276
+ },
1277
+ {
1278
+ "id": 67,
1279
+ "model": "VLP",
1280
+ "venue": "CVPR'24",
1281
+ "input": [
1282
+ "camera",
1283
+ "prompt",
1284
+ "scene",
1285
+ "instruction",
1286
+ "status"
1287
+ ],
1288
+ "dataset": [
1289
+ "nuScenes"
1290
+ ],
1291
+ "vision": "UniAD, VAD",
1292
+ "language": "CLIP",
1293
+ "action": "REG",
1294
+ "output": "Traj.",
1295
+ "category": "Dual-System: Implicit Representations Transfer"
1296
+ },
1297
+ {
1298
+ "id": 68,
1299
+ "model": "VLM-AD",
1300
+ "venue": "CoRL'25",
1301
+ "input": [
1302
+ "camera",
1303
+ "prompt",
1304
+ "scene",
1305
+ "status"
1306
+ ],
1307
+ "dataset": [
1308
+ "nuScenes"
1309
+ ],
1310
+ "vision": "UniAD, VAD",
1311
+ "language": "CLIP",
1312
+ "action": "REG",
1313
+ "output": "Traj.",
1314
+ "category": "Dual-System: Implicit Representations Transfer"
1315
+ },
1316
+ {
1317
+ "id": 69,
1318
+ "model": "DiMA",
1319
+ "venue": "CVPR'25",
1320
+ "input": [
1321
+ "camera",
1322
+ "prompt",
1323
+ "status"
1324
+ ],
1325
+ "dataset": [
1326
+ "nuScenes"
1327
+ ],
1328
+ "vision": "UniAD, VAD",
1329
+ "language": "LLaVA-1.5",
1330
+ "action": "REG",
1331
+ "output": "Ctrl.+Traj.",
1332
+ "category": "Dual-System: Implicit Representations Transfer"
1333
+ },
1334
+ {
1335
+ "id": 70,
1336
+ "model": "ALN-P3",
1337
+ "venue": "arXiv'25",
1338
+ "input": [
1339
+ "camera",
1340
+ "prompt",
1341
+ "scene"
1342
+ ],
1343
+ "dataset": [
1344
+ "nuScenes"
1345
+ ],
1346
+ "vision": "VAD",
1347
+ "language": "LLaMA-2",
1348
+ "action": "REG",
1349
+ "output": "Desc.+Traj.",
1350
+ "category": "Dual-System: Implicit Representations Transfer"
1351
+ },
1352
+ {
1353
+ "id": 71,
1354
+ "model": "VERDI",
1355
+ "venue": "arXiv'25",
1356
+ "input": [
1357
+ "camera",
1358
+ "prompt",
1359
+ "scene",
1360
+ "status"
1361
+ ],
1362
+ "dataset": [
1363
+ "nuScenes"
1364
+ ],
1365
+ "vision": "VAD",
1366
+ "language": "Qwen2.5-VL",
1367
+ "action": "REG",
1368
+ "output": "Ctrl.+Traj.",
1369
+ "category": "Dual-System: Implicit Representations Transfer"
1370
+ },
1371
+ {
1372
+ "id": 72,
1373
+ "model": "VLM-E2E",
1374
+ "venue": "arXiv'25",
1375
+ "input": [
1376
+ "camera",
1377
+ "prompt",
1378
+ "scene"
1379
+ ],
1380
+ "dataset": [
1381
+ "nuScenes"
1382
+ ],
1383
+ "vision": "EfficientNet",
1384
+ "language": "CLIP",
1385
+ "action": "REG",
1386
+ "output": "Traj.",
1387
+ "category": "Dual-System: Implicit Representations Transfer"
1388
+ },
1389
+ {
1390
+ "id": 73,
1391
+ "model": "ReCogDrive",
1392
+ "venue": "arXiv'25",
1393
+ "input": [
1394
+ "camera",
1395
+ "prompt",
1396
+ "status"
1397
+ ],
1398
+ "dataset": [
1399
+ "NAVSIM"
1400
+ ],
1401
+ "vision": "InternViT",
1402
+ "language": "Qwen2.5",
1403
+ "action": "GEN",
1404
+ "output": "Desc.+Traj.",
1405
+ "category": "Dual-System: Implicit Representations Transfer"
1406
+ },
1407
+ {
1408
+ "id": 74,
1409
+ "model": "InsightDrive",
1410
+ "venue": "arXiv'25",
1411
+ "input": [
1412
+ "camera",
1413
+ "prompt",
1414
+ "scene",
1415
+ "status"
1416
+ ],
1417
+ "dataset": [
1418
+ "nuScenes"
1419
+ ],
1420
+ "vision": "ResNet",
1421
+ "language": "BERT",
1422
+ "action": "REG",
1423
+ "output": "Traj.",
1424
+ "category": "Dual-System: Implicit Representations Transfer"
1425
+ },
1426
+ {
1427
+ "id": 75,
1428
+ "model": "NetRoller",
1429
+ "venue": "arXiv'25",
1430
+ "input": [
1431
+ "camera",
1432
+ "prompt"
1433
+ ],
1434
+ "dataset": [
1435
+ "nuScenes",
1436
+ "DriveLM"
1437
+ ],
1438
+ "vision": "CLIP",
1439
+ "language": "LLaMA-2",
1440
+ "action": "REG",
1441
+ "output": "Traj.",
1442
+ "category": "Dual-System: Implicit Representations Transfer"
1443
+ },
1444
+ {
1445
+ "id": 76,
1446
+ "model": "ViLaD",
1447
+ "venue": "arXiv'25",
1448
+ "input": [
1449
+ "camera",
1450
+ "prompt",
1451
+ "context"
1452
+ ],
1453
+ "dataset": [
1454
+ "nuScenes"
1455
+ ],
1456
+ "vision": "SigLIP-2",
1457
+ "language": "LLaDA-V",
1458
+ "action": "GEN",
1459
+ "output": "Ctrl.+Traj.",
1460
+ "category": "Dual-System: Implicit Representations Transfer"
1461
+ },
1462
+ {
1463
+ "id": 77,
1464
+ "model": "OmniScene",
1465
+ "venue": "arXiv'25",
1466
+ "input": [
1467
+ "camera",
1468
+ "prompt",
1469
+ "scene",
1470
+ "status"
1471
+ ],
1472
+ "dataset": [
1473
+ "nuScenes"
1474
+ ],
1475
+ "vision": "ResNet",
1476
+ "language": "Qwen2.5-VL",
1477
+ "action": "REG",
1478
+ "output": "Traj.",
1479
+ "category": "Dual-System: Implicit Representations Transfer"
1480
+ },
1481
+ {
1482
+ "id": 78,
1483
+ "model": "LMAD",
1484
+ "venue": "arXiv'25",
1485
+ "input": [
1486
+ "camera",
1487
+ "prompt"
1488
+ ],
1489
+ "dataset": [
1490
+ "nuScenes",
1491
+ "DriveLM"
1492
+ ],
1493
+ "vision": "VAD",
1494
+ "language": "LLaVA-1.5",
1495
+ "action": "LH",
1496
+ "output": "Desc.+Meta.",
1497
+ "category": "Dual-System: Implicit Representations Transfer"
1498
+ }
1499
+ ]
results/wod_results.json ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {"id": 1, "model": "Waymo Baseline", "year": 2025, "input": "camera", "vision": "-", "language": "-", "action": "-", "rfs_overall": 7.53, "rfs_spotlight": 6.60, "ade_5s": 3.02, "ade_3s": 1.32, "category": "Vision-Action"},
3
+ {"id": 2, "model": "Swin-Trajectory", "year": 2025, "input": "camera", "vision": "SwinT", "language": "-", "action": "REG", "rfs_overall": 7.54, "rfs_spotlight": 6.68, "ade_5s": 2.81, "ade_3s": 1.21, "category": "Vision-Action"},
4
+ {"id": 3, "model": "DiffusionDrive", "year": 2025, "input": "camera", "vision": "ResNet", "language": "-", "action": "GEN", "rfs_overall": 7.69, "rfs_spotlight": 6.65, "ade_5s": 2.99, "ade_3s": 1.31, "category": "Vision-Action"},
5
+ {"id": 4, "model": "RAP-DINO", "year": 2025, "input": "camera", "vision": "DINO", "language": "-", "action": "REG", "rfs_overall": 8.04, "rfs_spotlight": 7.20, "ade_5s": 2.65, "ade_3s": 1.17, "category": "Vision-Action"},
6
+ {"id": 5, "model": "OpenEMMA", "year": 2025, "input": "camera, prompt, status", "vision": "Qwen2-VL", "language": "Qwen2-VL", "action": "LH", "rfs_overall": 5.16, "rfs_spotlight": 4.71, "ade_5s": 12.74, "ade_3s": 6.68, "category": "Vision-Language-Action"},
7
+ {"id": 6, "model": "HMVLM", "year": 2025, "input": "camera, prompt, command, status", "vision": "ViT", "language": "Qwen2.5-VL", "action": "LH", "rfs_overall": 7.74, "rfs_spotlight": 6.73, "ade_5s": 3.07, "ade_3s": 1.33, "category": "Vision-Language-Action"},
8
+ {"id": 7, "model": "AutoVLA", "year": 2025, "input": "camera, prompt, command, status", "vision": "Qwen2.5-VL", "language": "Qwen2.5-VL", "action": "LH", "rfs_overall": 7.56, "rfs_spotlight": 6.94, "ade_5s": 2.96, "ade_3s": 1.35, "category": "Vision-Language-Action"},
9
+ {"id": 8, "model": "Poutine", "year": 2025, "input": "camera, prompt, status, command", "vision": "ViT", "language": "Qwen2.5-VL", "action": "LH", "rfs_overall": 7.99, "rfs_spotlight": 6.89, "ade_5s": 2.74, "ade_3s": 1.21, "category": "Vision-Language-Action"},
10
+ {"id": 9, "model": "LightEMMA", "year": 2025, "input": "camera, prompt", "vision": "Qwen2.5-VL", "language": "Qwen2.5-VL", "action": "LH", "rfs_overall": 6.52, "rfs_spotlight": 5.71, "ade_5s": 3.73, "ade_3s": 1.71, "category": "Vision-Language-Action"},
11
+ {"id": 10, "model": "dVLM-AD", "year": 2025, "input": "camera, prompt, status", "vision": "SigLIP2", "language": "LLaDA-V", "action": "LH", "rfs_overall": 7.63, "rfs_spotlight": null, "ade_5s": 3.02, "ade_3s": 1.29, "category": "Vision-Language-Action"}
12
+ ]
src/UNKNOWN.egg-info/PKG-INFO ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ Metadata-Version: 2.4
2
+ Name: UNKNOWN
3
+ Version: 0.0.0
src/UNKNOWN.egg-info/SOURCES.txt ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ README.md
2
+ pyproject.toml
3
+ src/about.py
4
+ src/envs.py
5
+ src/populate.py
6
+ src/UNKNOWN.egg-info/PKG-INFO
7
+ src/UNKNOWN.egg-info/SOURCES.txt
8
+ src/UNKNOWN.egg-info/dependency_links.txt
9
+ src/UNKNOWN.egg-info/top_level.txt
10
+ src/display/css_html_js.py
11
+ src/display/formatting.py
12
+ src/display/utils.py
13
+ src/leaderboard/read_evals.py
14
+ src/submission/check_validity.py
15
+ src/submission/submit.py
src/UNKNOWN.egg-info/dependency_links.txt ADDED
@@ -0,0 +1 @@
 
 
1
+
src/UNKNOWN.egg-info/top_level.txt ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ about
2
+ display
3
+ envs
4
+ leaderboard
5
+ populate
6
+ submission
src/about.py ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from dataclasses import dataclass
2
+ from enum import Enum
3
+
4
+ @dataclass
5
+ class Task:
6
+ benchmark: str
7
+ metric: str
8
+ col_name: str
9
+
10
+
11
+ # Select your tasks here
12
+ # ---------------------------------------------------
13
+ class Tasks(Enum):
14
+ # task_key in the json file, metric_key in the json file, name to display in the leaderboard
15
+ task0 = Task("anli_r1", "acc", "ANLI")
16
+ task1 = Task("logiqa", "acc_norm", "LogiQA")
17
+
18
+ NUM_FEWSHOT = 0 # Change with your few shot
19
+ # ---------------------------------------------------
20
+
21
+
22
+
23
+ # Your leaderboard name
24
+ TITLE = """<h1 align="center" id="space-title">Demo leaderboard</h1>"""
25
+
26
+ # What does your leaderboard evaluate?
27
+ INTRODUCTION_TEXT = """
28
+ Intro text
29
+ """
30
+
31
+ # Which evaluations are you running? how can people reproduce what you have?
32
+ LLM_BENCHMARKS_TEXT = f"""
33
+ ## How it works
34
+
35
+ ## Reproducibility
36
+ To reproduce our results, here is the commands you can run:
37
+
38
+ """
39
+
40
+ EVALUATION_QUEUE_TEXT = """
41
+ ## Some good practices before submitting a model
42
+
43
+ ### 1) Make sure you can load your model and tokenizer using AutoClasses:
44
+ ```python
45
+ from transformers import AutoConfig, AutoModel, AutoTokenizer
46
+ config = AutoConfig.from_pretrained("your model name", revision=revision)
47
+ model = AutoModel.from_pretrained("your model name", revision=revision)
48
+ tokenizer = AutoTokenizer.from_pretrained("your model name", revision=revision)
49
+ ```
50
+ If this step fails, follow the error messages to debug your model before submitting it. It's likely your model has been improperly uploaded.
51
+
52
+ Note: make sure your model is public!
53
+ Note: if your model needs `use_remote_code=True`, we do not support this option yet but we are working on adding it, stay posted!
54
+
55
+ ### 2) Convert your model weights to [safetensors](https://huggingface.co/docs/safetensors/index)
56
+ It's a new format for storing weights which is safer and faster to load and use. It will also allow us to add the number of parameters of your model to the `Extended Viewer`!
57
+
58
+ ### 3) Make sure your model has an open license!
59
+ This is a leaderboard for Open LLMs, and we'd love for as many people as possible to know they can use your model 🤗
60
+
61
+ ### 4) Fill up your model card
62
+ When we add extra information about models to the leaderboard, it will be automatically taken from the model card
63
+
64
+ ## In case of model failure
65
+ If your model is displayed in the `FAILED` category, its execution stopped.
66
+ Make sure you have followed the above steps first.
67
+ If everything is done, check you can launch the EleutherAIHarness on your model locally, using the above command without modifications (you can add `--limit` to limit the number of examples per task).
68
+ """
69
+
70
+ CITATION_BUTTON_LABEL = "Copy the following snippet to cite these results"
71
+ CITATION_BUTTON_TEXT = r"""
72
+ """
src/display/css_html_js.py ADDED
@@ -0,0 +1,105 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ custom_css = """
2
+
3
+ .markdown-text {
4
+ font-size: 16px !important;
5
+ }
6
+
7
+ #models-to-add-text {
8
+ font-size: 18px !important;
9
+ }
10
+
11
+ #citation-button span {
12
+ font-size: 16px !important;
13
+ }
14
+
15
+ #citation-button textarea {
16
+ font-size: 16px !important;
17
+ }
18
+
19
+ #citation-button > label > button {
20
+ margin: 6px;
21
+ transform: scale(1.3);
22
+ }
23
+
24
+ #leaderboard-table {
25
+ margin-top: 15px
26
+ }
27
+
28
+ #leaderboard-table-lite {
29
+ margin-top: 15px
30
+ }
31
+
32
+ #search-bar-table-box > div:first-child {
33
+ background: none;
34
+ border: none;
35
+ }
36
+
37
+ #search-bar {
38
+ padding: 0px;
39
+ }
40
+
41
+ /* Limit the width of the first AutoEvalColumn so that names don't expand too much */
42
+ #leaderboard-table td:nth-child(2),
43
+ #leaderboard-table th:nth-child(2) {
44
+ max-width: 400px;
45
+ overflow: auto;
46
+ white-space: nowrap;
47
+ }
48
+
49
+ .tab-buttons button {
50
+ font-size: 20px;
51
+ }
52
+
53
+ #scale-logo {
54
+ border-style: none !important;
55
+ box-shadow: none;
56
+ display: block;
57
+ margin-left: auto;
58
+ margin-right: auto;
59
+ max-width: 600px;
60
+ }
61
+
62
+ #scale-logo .download {
63
+ display: none;
64
+ }
65
+ #filter_type{
66
+ border: 0;
67
+ padding-left: 0;
68
+ padding-top: 0;
69
+ }
70
+ #filter_type label {
71
+ display: flex;
72
+ }
73
+ #filter_type label > span{
74
+ margin-top: var(--spacing-lg);
75
+ margin-right: 0.5em;
76
+ }
77
+ #filter_type label > .wrap{
78
+ width: 103px;
79
+ }
80
+ #filter_type label > .wrap .wrap-inner{
81
+ padding: 2px;
82
+ }
83
+ #filter_type label > .wrap .wrap-inner input{
84
+ width: 1px
85
+ }
86
+ #filter-columns-type{
87
+ border:0;
88
+ padding:0.5;
89
+ }
90
+ #filter-columns-size{
91
+ border:0;
92
+ padding:0.5;
93
+ }
94
+ #box-filter > .form{
95
+ border: 0
96
+ }
97
+ """
98
+
99
+ get_window_url_params = """
100
+ function(url_params) {
101
+ const params = new URLSearchParams(window.location.search);
102
+ url_params = Object.fromEntries(params);
103
+ return url_params;
104
+ }
105
+ """
src/display/formatting.py ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ def model_hyperlink(link, model_name):
2
+ return f'<a target="_blank" href="{link}" style="color: var(--link-text-color); text-decoration: underline;text-decoration-style: dotted;">{model_name}</a>'
3
+
4
+
5
+ def make_clickable_model(model_name):
6
+ link = f"https://huggingface.co/{model_name}"
7
+ return model_hyperlink(link, model_name)
8
+
9
+
10
+ def styled_error(error):
11
+ return f"<p style='color: red; font-size: 20px; text-align: center;'>{error}</p>"
12
+
13
+
14
+ def styled_warning(warn):
15
+ return f"<p style='color: orange; font-size: 20px; text-align: center;'>{warn}</p>"
16
+
17
+
18
+ def styled_message(message):
19
+ return f"<p style='color: green; font-size: 20px; text-align: center;'>{message}</p>"
20
+
21
+
22
+ def has_no_nan_values(df, columns):
23
+ return df[columns].notna().all(axis=1)
24
+
25
+
26
+ def has_nan_values(df, columns):
27
+ return df[columns].isna().any(axis=1)
src/display/utils.py ADDED
@@ -0,0 +1,110 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from dataclasses import dataclass, make_dataclass
2
+ from enum import Enum
3
+
4
+ import pandas as pd
5
+
6
+ from src.about import Tasks
7
+
8
+ def fields(raw_class):
9
+ return [v for k, v in raw_class.__dict__.items() if k[:2] != "__" and k[-2:] != "__"]
10
+
11
+
12
+ # These classes are for user facing column names,
13
+ # to avoid having to change them all around the code
14
+ # when a modif is needed
15
+ @dataclass
16
+ class ColumnContent:
17
+ name: str
18
+ type: str
19
+ displayed_by_default: bool
20
+ hidden: bool = False
21
+ never_hidden: bool = False
22
+
23
+ ## Leaderboard columns
24
+ auto_eval_column_dict = []
25
+ # Init
26
+ auto_eval_column_dict.append(["model_type_symbol", ColumnContent, ColumnContent("T", "str", True, never_hidden=True)])
27
+ auto_eval_column_dict.append(["model", ColumnContent, ColumnContent("Model", "markdown", True, never_hidden=True)])
28
+ #Scores
29
+ auto_eval_column_dict.append(["average", ColumnContent, ColumnContent("Average ⬆️", "number", True)])
30
+ for task in Tasks:
31
+ auto_eval_column_dict.append([task.name, ColumnContent, ColumnContent(task.value.col_name, "number", True)])
32
+ # Model information
33
+ auto_eval_column_dict.append(["model_type", ColumnContent, ColumnContent("Type", "str", False)])
34
+ auto_eval_column_dict.append(["architecture", ColumnContent, ColumnContent("Architecture", "str", False)])
35
+ auto_eval_column_dict.append(["weight_type", ColumnContent, ColumnContent("Weight type", "str", False, True)])
36
+ auto_eval_column_dict.append(["precision", ColumnContent, ColumnContent("Precision", "str", False)])
37
+ auto_eval_column_dict.append(["license", ColumnContent, ColumnContent("Hub License", "str", False)])
38
+ auto_eval_column_dict.append(["params", ColumnContent, ColumnContent("#Params (B)", "number", False)])
39
+ auto_eval_column_dict.append(["likes", ColumnContent, ColumnContent("Hub ❤️", "number", False)])
40
+ auto_eval_column_dict.append(["still_on_hub", ColumnContent, ColumnContent("Available on the hub", "bool", False)])
41
+ auto_eval_column_dict.append(["revision", ColumnContent, ColumnContent("Model sha", "str", False, False)])
42
+
43
+ # We use make dataclass to dynamically fill the scores from Tasks
44
+ AutoEvalColumn = make_dataclass("AutoEvalColumn", auto_eval_column_dict, frozen=True)
45
+
46
+ ## For the queue columns in the submission tab
47
+ @dataclass(frozen=True)
48
+ class EvalQueueColumn: # Queue column
49
+ model = ColumnContent("model", "markdown", True)
50
+ revision = ColumnContent("revision", "str", True)
51
+ private = ColumnContent("private", "bool", True)
52
+ precision = ColumnContent("precision", "str", True)
53
+ weight_type = ColumnContent("weight_type", "str", "Original")
54
+ status = ColumnContent("status", "str", True)
55
+
56
+ ## All the model information that we might need
57
+ @dataclass
58
+ class ModelDetails:
59
+ name: str
60
+ display_name: str = ""
61
+ symbol: str = "" # emoji
62
+
63
+
64
+ class ModelType(Enum):
65
+ PT = ModelDetails(name="pretrained", symbol="🟢")
66
+ FT = ModelDetails(name="fine-tuned", symbol="🔶")
67
+ IFT = ModelDetails(name="instruction-tuned", symbol="⭕")
68
+ RL = ModelDetails(name="RL-tuned", symbol="🟦")
69
+ Unknown = ModelDetails(name="", symbol="?")
70
+
71
+ def to_str(self, separator=" "):
72
+ return f"{self.value.symbol}{separator}{self.value.name}"
73
+
74
+ @staticmethod
75
+ def from_str(type):
76
+ if "fine-tuned" in type or "🔶" in type:
77
+ return ModelType.FT
78
+ if "pretrained" in type or "🟢" in type:
79
+ return ModelType.PT
80
+ if "RL-tuned" in type or "🟦" in type:
81
+ return ModelType.RL
82
+ if "instruction-tuned" in type or "⭕" in type:
83
+ return ModelType.IFT
84
+ return ModelType.Unknown
85
+
86
+ class WeightType(Enum):
87
+ Adapter = ModelDetails("Adapter")
88
+ Original = ModelDetails("Original")
89
+ Delta = ModelDetails("Delta")
90
+
91
+ class Precision(Enum):
92
+ float16 = ModelDetails("float16")
93
+ bfloat16 = ModelDetails("bfloat16")
94
+ Unknown = ModelDetails("?")
95
+
96
+ def from_str(precision):
97
+ if precision in ["torch.float16", "float16"]:
98
+ return Precision.float16
99
+ if precision in ["torch.bfloat16", "bfloat16"]:
100
+ return Precision.bfloat16
101
+ return Precision.Unknown
102
+
103
+ # Column selection
104
+ COLS = [c.name for c in fields(AutoEvalColumn) if not c.hidden]
105
+
106
+ EVAL_COLS = [c.name for c in fields(EvalQueueColumn)]
107
+ EVAL_TYPES = [c.type for c in fields(EvalQueueColumn)]
108
+
109
+ BENCHMARK_COLS = [t.value.col_name for t in Tasks]
110
+
src/envs.py ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ from huggingface_hub import HfApi
4
+
5
+ # Info to change for your repository
6
+ # ----------------------------------
7
+ TOKEN = os.environ.get("HF_TOKEN") # A read/write token for your org
8
+
9
+ OWNER = "demo-leaderboard-backend" # Change to your org - don't forget to create a results and request dataset, with the correct format!
10
+ # ----------------------------------
11
+
12
+ REPO_ID = f"{OWNER}/leaderboard"
13
+ QUEUE_REPO = f"{OWNER}/requests"
14
+ RESULTS_REPO = f"{OWNER}/results"
15
+
16
+ # If you setup a cache later, just change HF_HOME
17
+ CACHE_PATH=os.getenv("HF_HOME", ".")
18
+
19
+ # Local caches
20
+ EVAL_REQUESTS_PATH = os.path.join(CACHE_PATH, "eval-queue")
21
+ EVAL_RESULTS_PATH = os.path.join(CACHE_PATH, "eval-results")
22
+ EVAL_REQUESTS_PATH_BACKEND = os.path.join(CACHE_PATH, "eval-queue-bk")
23
+ EVAL_RESULTS_PATH_BACKEND = os.path.join(CACHE_PATH, "eval-results-bk")
24
+
25
+ API = HfApi(token=TOKEN)
src/leaderboard/read_evals.py ADDED
@@ -0,0 +1,196 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import glob
2
+ import json
3
+ import math
4
+ import os
5
+ from dataclasses import dataclass
6
+
7
+ import dateutil
8
+ import numpy as np
9
+
10
+ from src.display.formatting import make_clickable_model
11
+ from src.display.utils import AutoEvalColumn, ModelType, Tasks, Precision, WeightType
12
+ from src.submission.check_validity import is_model_on_hub
13
+
14
+
15
+ @dataclass
16
+ class EvalResult:
17
+ """Represents one full evaluation. Built from a combination of the result and request file for a given run.
18
+ """
19
+ eval_name: str # org_model_precision (uid)
20
+ full_model: str # org/model (path on hub)
21
+ org: str
22
+ model: str
23
+ revision: str # commit hash, "" if main
24
+ results: dict
25
+ precision: Precision = Precision.Unknown
26
+ model_type: ModelType = ModelType.Unknown # Pretrained, fine tuned, ...
27
+ weight_type: WeightType = WeightType.Original # Original or Adapter
28
+ architecture: str = "Unknown"
29
+ license: str = "?"
30
+ likes: int = 0
31
+ num_params: int = 0
32
+ date: str = "" # submission date of request file
33
+ still_on_hub: bool = False
34
+
35
+ @classmethod
36
+ def init_from_json_file(self, json_filepath):
37
+ """Inits the result from the specific model result file"""
38
+ with open(json_filepath) as fp:
39
+ data = json.load(fp)
40
+
41
+ config = data.get("config")
42
+
43
+ # Precision
44
+ precision = Precision.from_str(config.get("model_dtype"))
45
+
46
+ # Get model and org
47
+ org_and_model = config.get("model_name", config.get("model_args", None))
48
+ org_and_model = org_and_model.split("/", 1)
49
+
50
+ if len(org_and_model) == 1:
51
+ org = None
52
+ model = org_and_model[0]
53
+ result_key = f"{model}_{precision.value.name}"
54
+ else:
55
+ org = org_and_model[0]
56
+ model = org_and_model[1]
57
+ result_key = f"{org}_{model}_{precision.value.name}"
58
+ full_model = "/".join(org_and_model)
59
+
60
+ still_on_hub, _, model_config = is_model_on_hub(
61
+ full_model, config.get("model_sha", "main"), trust_remote_code=True, test_tokenizer=False
62
+ )
63
+ architecture = "?"
64
+ if model_config is not None:
65
+ architectures = getattr(model_config, "architectures", None)
66
+ if architectures:
67
+ architecture = ";".join(architectures)
68
+
69
+ # Extract results available in this file (some results are split in several files)
70
+ results = {}
71
+ for task in Tasks:
72
+ task = task.value
73
+
74
+ # We average all scores of a given metric (not all metrics are present in all files)
75
+ accs = np.array([v.get(task.metric, None) for k, v in data["results"].items() if task.benchmark == k])
76
+ if accs.size == 0 or any([acc is None for acc in accs]):
77
+ continue
78
+
79
+ mean_acc = np.mean(accs) * 100.0
80
+ results[task.benchmark] = mean_acc
81
+
82
+ return self(
83
+ eval_name=result_key,
84
+ full_model=full_model,
85
+ org=org,
86
+ model=model,
87
+ results=results,
88
+ precision=precision,
89
+ revision= config.get("model_sha", ""),
90
+ still_on_hub=still_on_hub,
91
+ architecture=architecture
92
+ )
93
+
94
+ def update_with_request_file(self, requests_path):
95
+ """Finds the relevant request file for the current model and updates info with it"""
96
+ request_file = get_request_file_for_model(requests_path, self.full_model, self.precision.value.name)
97
+
98
+ try:
99
+ with open(request_file, "r") as f:
100
+ request = json.load(f)
101
+ self.model_type = ModelType.from_str(request.get("model_type", ""))
102
+ self.weight_type = WeightType[request.get("weight_type", "Original")]
103
+ self.license = request.get("license", "?")
104
+ self.likes = request.get("likes", 0)
105
+ self.num_params = request.get("params", 0)
106
+ self.date = request.get("submitted_time", "")
107
+ except Exception:
108
+ print(f"Could not find request file for {self.org}/{self.model} with precision {self.precision.value.name}")
109
+
110
+ def to_dict(self):
111
+ """Converts the Eval Result to a dict compatible with our dataframe display"""
112
+ average = sum([v for v in self.results.values() if v is not None]) / len(Tasks)
113
+ data_dict = {
114
+ "eval_name": self.eval_name, # not a column, just a save name,
115
+ AutoEvalColumn.precision.name: self.precision.value.name,
116
+ AutoEvalColumn.model_type.name: self.model_type.value.name,
117
+ AutoEvalColumn.model_type_symbol.name: self.model_type.value.symbol,
118
+ AutoEvalColumn.weight_type.name: self.weight_type.value.name,
119
+ AutoEvalColumn.architecture.name: self.architecture,
120
+ AutoEvalColumn.model.name: make_clickable_model(self.full_model),
121
+ AutoEvalColumn.revision.name: self.revision,
122
+ AutoEvalColumn.average.name: average,
123
+ AutoEvalColumn.license.name: self.license,
124
+ AutoEvalColumn.likes.name: self.likes,
125
+ AutoEvalColumn.params.name: self.num_params,
126
+ AutoEvalColumn.still_on_hub.name: self.still_on_hub,
127
+ }
128
+
129
+ for task in Tasks:
130
+ data_dict[task.value.col_name] = self.results[task.value.benchmark]
131
+
132
+ return data_dict
133
+
134
+
135
+ def get_request_file_for_model(requests_path, model_name, precision):
136
+ """Selects the correct request file for a given model. Only keeps runs tagged as FINISHED"""
137
+ request_files = os.path.join(
138
+ requests_path,
139
+ f"{model_name}_eval_request_*.json",
140
+ )
141
+ request_files = glob.glob(request_files)
142
+
143
+ # Select correct request file (precision)
144
+ request_file = ""
145
+ request_files = sorted(request_files, reverse=True)
146
+ for tmp_request_file in request_files:
147
+ with open(tmp_request_file, "r") as f:
148
+ req_content = json.load(f)
149
+ if (
150
+ req_content["status"] in ["FINISHED"]
151
+ and req_content["precision"] == precision.split(".")[-1]
152
+ ):
153
+ request_file = tmp_request_file
154
+ return request_file
155
+
156
+
157
+ def get_raw_eval_results(results_path: str, requests_path: str) -> list[EvalResult]:
158
+ """From the path of the results folder root, extract all needed info for results"""
159
+ model_result_filepaths = []
160
+
161
+ for root, _, files in os.walk(results_path):
162
+ # We should only have json files in model results
163
+ if len(files) == 0 or any([not f.endswith(".json") for f in files]):
164
+ continue
165
+
166
+ # Sort the files by date
167
+ try:
168
+ files.sort(key=lambda x: x.removesuffix(".json").removeprefix("results_")[:-7])
169
+ except dateutil.parser._parser.ParserError:
170
+ files = [files[-1]]
171
+
172
+ for file in files:
173
+ model_result_filepaths.append(os.path.join(root, file))
174
+
175
+ eval_results = {}
176
+ for model_result_filepath in model_result_filepaths:
177
+ # Creation of result
178
+ eval_result = EvalResult.init_from_json_file(model_result_filepath)
179
+ eval_result.update_with_request_file(requests_path)
180
+
181
+ # Store results of same eval together
182
+ eval_name = eval_result.eval_name
183
+ if eval_name in eval_results.keys():
184
+ eval_results[eval_name].results.update({k: v for k, v in eval_result.results.items() if v is not None})
185
+ else:
186
+ eval_results[eval_name] = eval_result
187
+
188
+ results = []
189
+ for v in eval_results.values():
190
+ try:
191
+ v.to_dict() # we test if the dict version is complete
192
+ results.append(v)
193
+ except KeyError: # not all eval values present
194
+ continue
195
+
196
+ return results
src/populate.py ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import os
3
+
4
+ import pandas as pd
5
+
6
+ from src.display.formatting import has_no_nan_values, make_clickable_model
7
+ from src.display.utils import AutoEvalColumn, EvalQueueColumn
8
+ from src.leaderboard.read_evals import get_raw_eval_results
9
+
10
+
11
+ def get_leaderboard_df(results_path: str, requests_path: str, cols: list, benchmark_cols: list) -> pd.DataFrame:
12
+ """Creates a dataframe from all the individual experiment results"""
13
+ raw_data = get_raw_eval_results(results_path, requests_path)
14
+ all_data_json = [v.to_dict() for v in raw_data]
15
+
16
+ df = pd.DataFrame.from_records(all_data_json)
17
+ df = df.sort_values(by=[AutoEvalColumn.average.name], ascending=False)
18
+ df = df[cols].round(decimals=2)
19
+
20
+ # filter out if any of the benchmarks have not been produced
21
+ df = df[has_no_nan_values(df, benchmark_cols)]
22
+ return df
23
+
24
+
25
+ def get_evaluation_queue_df(save_path: str, cols: list) -> list[pd.DataFrame]:
26
+ """Creates the different dataframes for the evaluation queues requestes"""
27
+ entries = [entry for entry in os.listdir(save_path) if not entry.startswith(".")]
28
+ all_evals = []
29
+
30
+ for entry in entries:
31
+ if ".json" in entry:
32
+ file_path = os.path.join(save_path, entry)
33
+ with open(file_path) as fp:
34
+ data = json.load(fp)
35
+
36
+ data[EvalQueueColumn.model.name] = make_clickable_model(data["model"])
37
+ data[EvalQueueColumn.revision.name] = data.get("revision", "main")
38
+
39
+ all_evals.append(data)
40
+ elif ".md" not in entry:
41
+ # this is a folder
42
+ sub_entries = [e for e in os.listdir(f"{save_path}/{entry}") if os.path.isfile(e) and not e.startswith(".")]
43
+ for sub_entry in sub_entries:
44
+ file_path = os.path.join(save_path, entry, sub_entry)
45
+ with open(file_path) as fp:
46
+ data = json.load(fp)
47
+
48
+ data[EvalQueueColumn.model.name] = make_clickable_model(data["model"])
49
+ data[EvalQueueColumn.revision.name] = data.get("revision", "main")
50
+ all_evals.append(data)
51
+
52
+ pending_list = [e for e in all_evals if e["status"] in ["PENDING", "RERUN"]]
53
+ running_list = [e for e in all_evals if e["status"] == "RUNNING"]
54
+ finished_list = [e for e in all_evals if e["status"].startswith("FINISHED") or e["status"] == "PENDING_NEW_EVAL"]
55
+ df_pending = pd.DataFrame.from_records(pending_list, columns=cols)
56
+ df_running = pd.DataFrame.from_records(running_list, columns=cols)
57
+ df_finished = pd.DataFrame.from_records(finished_list, columns=cols)
58
+ return df_finished[cols], df_running[cols], df_pending[cols]
src/submission/check_validity.py ADDED
@@ -0,0 +1,99 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import os
3
+ import re
4
+ from collections import defaultdict
5
+ from datetime import datetime, timedelta, timezone
6
+
7
+ import huggingface_hub
8
+ from huggingface_hub import ModelCard
9
+ from huggingface_hub.hf_api import ModelInfo
10
+ from transformers import AutoConfig
11
+ from transformers.models.auto.tokenization_auto import AutoTokenizer
12
+
13
+ def check_model_card(repo_id: str) -> tuple[bool, str]:
14
+ """Checks if the model card and license exist and have been filled"""
15
+ try:
16
+ card = ModelCard.load(repo_id)
17
+ except huggingface_hub.utils.EntryNotFoundError:
18
+ return False, "Please add a model card to your model to explain how you trained/fine-tuned it."
19
+
20
+ # Enforce license metadata
21
+ if card.data.license is None:
22
+ if not ("license_name" in card.data and "license_link" in card.data):
23
+ return False, (
24
+ "License not found. Please add a license to your model card using the `license` metadata or a"
25
+ " `license_name`/`license_link` pair."
26
+ )
27
+
28
+ # Enforce card content
29
+ if len(card.text) < 200:
30
+ return False, "Please add a description to your model card, it is too short."
31
+
32
+ return True, ""
33
+
34
+ def is_model_on_hub(model_name: str, revision: str, token: str = None, trust_remote_code=False, test_tokenizer=False) -> tuple[bool, str]:
35
+ """Checks if the model model_name is on the hub, and whether it (and its tokenizer) can be loaded with AutoClasses."""
36
+ try:
37
+ config = AutoConfig.from_pretrained(model_name, revision=revision, trust_remote_code=trust_remote_code, token=token)
38
+ if test_tokenizer:
39
+ try:
40
+ tk = AutoTokenizer.from_pretrained(model_name, revision=revision, trust_remote_code=trust_remote_code, token=token)
41
+ except ValueError as e:
42
+ return (
43
+ False,
44
+ f"uses a tokenizer which is not in a transformers release: {e}",
45
+ None
46
+ )
47
+ except Exception as e:
48
+ return (False, "'s tokenizer cannot be loaded. Is your tokenizer class in a stable transformers release, and correctly configured?", None)
49
+ return True, None, config
50
+
51
+ except ValueError:
52
+ return (
53
+ False,
54
+ "needs to be launched with `trust_remote_code=True`. For safety reason, we do not allow these models to be automatically submitted to the leaderboard.",
55
+ None
56
+ )
57
+
58
+ except Exception as e:
59
+ return False, "was not found on hub!", None
60
+
61
+
62
+ def get_model_size(model_info: ModelInfo, precision: str):
63
+ """Gets the model size from the configuration, or the model name if the configuration does not contain the information."""
64
+ try:
65
+ model_size = round(model_info.safetensors["total"] / 1e9, 3)
66
+ except (AttributeError, TypeError):
67
+ return 0 # Unknown model sizes are indicated as 0, see NUMERIC_INTERVALS in app.py
68
+
69
+ size_factor = 8 if (precision == "GPTQ" or "gptq" in model_info.modelId.lower()) else 1
70
+ model_size = size_factor * model_size
71
+ return model_size
72
+
73
+ def get_model_arch(model_info: ModelInfo):
74
+ """Gets the model architecture from the configuration"""
75
+ return model_info.config.get("architectures", "Unknown")
76
+
77
+ def already_submitted_models(requested_models_dir: str) -> set[str]:
78
+ """Gather a list of already submitted models to avoid duplicates"""
79
+ depth = 1
80
+ file_names = []
81
+ users_to_submission_dates = defaultdict(list)
82
+
83
+ for root, _, files in os.walk(requested_models_dir):
84
+ current_depth = root.count(os.sep) - requested_models_dir.count(os.sep)
85
+ if current_depth == depth:
86
+ for file in files:
87
+ if not file.endswith(".json"):
88
+ continue
89
+ with open(os.path.join(root, file), "r") as f:
90
+ info = json.load(f)
91
+ file_names.append(f"{info['model']}_{info['revision']}_{info['precision']}")
92
+
93
+ # Select organisation
94
+ if info["model"].count("/") == 0 or "submitted_time" not in info:
95
+ continue
96
+ organisation, _ = info["model"].split("/")
97
+ users_to_submission_dates[organisation].append(info["submitted_time"])
98
+
99
+ return set(file_names), users_to_submission_dates
src/submission/submit.py ADDED
@@ -0,0 +1,119 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import os
3
+ from datetime import datetime, timezone
4
+
5
+ from src.display.formatting import styled_error, styled_message, styled_warning
6
+ from src.envs import API, EVAL_REQUESTS_PATH, TOKEN, QUEUE_REPO
7
+ from src.submission.check_validity import (
8
+ already_submitted_models,
9
+ check_model_card,
10
+ get_model_size,
11
+ is_model_on_hub,
12
+ )
13
+
14
+ REQUESTED_MODELS = None
15
+ USERS_TO_SUBMISSION_DATES = None
16
+
17
+ def add_new_eval(
18
+ model: str,
19
+ base_model: str,
20
+ revision: str,
21
+ precision: str,
22
+ weight_type: str,
23
+ model_type: str,
24
+ ):
25
+ global REQUESTED_MODELS
26
+ global USERS_TO_SUBMISSION_DATES
27
+ if not REQUESTED_MODELS:
28
+ REQUESTED_MODELS, USERS_TO_SUBMISSION_DATES = already_submitted_models(EVAL_REQUESTS_PATH)
29
+
30
+ user_name = ""
31
+ model_path = model
32
+ if "/" in model:
33
+ user_name = model.split("/")[0]
34
+ model_path = model.split("/")[1]
35
+
36
+ precision = precision.split(" ")[0]
37
+ current_time = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
38
+
39
+ if model_type is None or model_type == "":
40
+ return styled_error("Please select a model type.")
41
+
42
+ # Does the model actually exist?
43
+ if revision == "":
44
+ revision = "main"
45
+
46
+ # Is the model on the hub?
47
+ if weight_type in ["Delta", "Adapter"]:
48
+ base_model_on_hub, error, _ = is_model_on_hub(model_name=base_model, revision=revision, token=TOKEN, test_tokenizer=True)
49
+ if not base_model_on_hub:
50
+ return styled_error(f'Base model "{base_model}" {error}')
51
+
52
+ if not weight_type == "Adapter":
53
+ model_on_hub, error, _ = is_model_on_hub(model_name=model, revision=revision, token=TOKEN, test_tokenizer=True)
54
+ if not model_on_hub:
55
+ return styled_error(f'Model "{model}" {error}')
56
+
57
+ # Is the model info correctly filled?
58
+ try:
59
+ model_info = API.model_info(repo_id=model, revision=revision)
60
+ except Exception:
61
+ return styled_error("Could not get your model information. Please fill it up properly.")
62
+
63
+ model_size = get_model_size(model_info=model_info, precision=precision)
64
+
65
+ # Were the model card and license filled?
66
+ try:
67
+ license = model_info.cardData["license"]
68
+ except Exception:
69
+ return styled_error("Please select a license for your model")
70
+
71
+ modelcard_OK, error_msg = check_model_card(model)
72
+ if not modelcard_OK:
73
+ return styled_error(error_msg)
74
+
75
+ # Seems good, creating the eval
76
+ print("Adding new eval")
77
+
78
+ eval_entry = {
79
+ "model": model,
80
+ "base_model": base_model,
81
+ "revision": revision,
82
+ "precision": precision,
83
+ "weight_type": weight_type,
84
+ "status": "PENDING",
85
+ "submitted_time": current_time,
86
+ "model_type": model_type,
87
+ "likes": model_info.likes,
88
+ "params": model_size,
89
+ "license": license,
90
+ "private": False,
91
+ }
92
+
93
+ # Check for duplicate submission
94
+ if f"{model}_{revision}_{precision}" in REQUESTED_MODELS:
95
+ return styled_warning("This model has been already submitted.")
96
+
97
+ print("Creating eval file")
98
+ OUT_DIR = f"{EVAL_REQUESTS_PATH}/{user_name}"
99
+ os.makedirs(OUT_DIR, exist_ok=True)
100
+ out_path = f"{OUT_DIR}/{model_path}_eval_request_False_{precision}_{weight_type}.json"
101
+
102
+ with open(out_path, "w") as f:
103
+ f.write(json.dumps(eval_entry))
104
+
105
+ print("Uploading eval file")
106
+ API.upload_file(
107
+ path_or_fileobj=out_path,
108
+ path_in_repo=out_path.split("eval-queue/")[1],
109
+ repo_id=QUEUE_REPO,
110
+ repo_type="dataset",
111
+ commit_message=f"Add {model} to eval queue",
112
+ )
113
+
114
+ # Remove the local file
115
+ os.remove(out_path)
116
+
117
+ return styled_message(
118
+ "Your request has been submitted to the evaluation queue!\nPlease wait for up to an hour for the model to show in the PENDING list."
119
+ )