freesky commited on
Commit
17eab04
·
verified ·
1 Parent(s): ccfd04e

Add files using upload-large-folder tool

Browse files
Files changed (50) hide show
  1. distributed_evaluate_ovobench.py +278 -0
  2. ovo_bench_formatted.json +0 -0
  3. ovo_bench_formatted_part.json +1658 -0
  4. ovo_bench_new.json +0 -0
  5. transfer_annotation_format.py +78 -0
  6. transform.py +49 -0
  7. videos/AutoEvalMetaData/14.mp4 +3 -0
  8. videos/AutoEvalMetaData/274.mp4 +3 -0
  9. videos/AutoEvalMetaData/294.mp4 +3 -0
  10. videos/AutoEvalMetaData/48.mp4 +3 -0
  11. videos/AutoEvalMetaData/89.mp4 +3 -0
  12. videos/COIN/2vzC30892IE.mp4 +3 -0
  13. videos/COIN/8tZUVfEyytY.mp4 +3 -0
  14. videos/COIN/BM8H9eE6jhI.mp4 +3 -0
  15. videos/COIN/Hf2AisK1wHY.mp4 +3 -0
  16. videos/COIN/N_qj_iSrF64.mp4 +3 -0
  17. videos/COIN/Q-uCII9Rtz0.mp4 +3 -0
  18. videos/COIN/QInuE1KwCZM.mp4 +3 -0
  19. videos/COIN/UBq35JWbBG0.mp4 +3 -0
  20. videos/COIN/V7SKv1tHWZ4.mp4 +3 -0
  21. videos/COIN/fHv947jR_6U.mp4 +3 -0
  22. videos/COIN/xCJmXGMl54I.mp4 +3 -0
  23. videos/COIN/xcpffGBVi8Y.mp4 +3 -0
  24. videos/YouTube_Games/8gtpP2Toigg&list=PL3bIgEVl_gDzIfuotDjHjcXzUkB6ZvMXk&index=2.mp4 +3 -0
  25. videos/YouTube_Games/PLJ3VIGhVd3r8Int6IZT_v3S_BzG9RVfiG&index=2.mp4 +3 -0
  26. videos/YouTube_Games/PLJ3VIGhVd3r8Int6IZT_v3S_BzG9RVfiG&index=3.mp4 +3 -0
  27. videos/YouTube_Games/PLJ3VIGhVd3r8Int6IZT_v3S_BzG9RVfiG&index=4.mp4 +3 -0
  28. videos/YouTube_Games/PLJ3VIGhVd3r8Int6IZT_v3S_BzG9RVfiG&index=7.mp4 +3 -0
  29. videos/YouTube_Games/QRJDDR_z3Lk&list=PL3bIgEVl_gDzIfuotDjHjcXzUkB6ZvMXk&index=5.mp4 +3 -0
  30. videos/YouTube_Games/RiIYSrrwjuU&list=PL3bIgEVl_gDzIfuotDjHjcXzUkB6ZvMXk&index=6.mp4 +3 -0
  31. videos/YouTube_Games/m-7k8KfT9s8&list=PL3bIgEVl_gDzIfuotDjHjcXzUkB6ZvMXk&index=8.mp4 +3 -0
  32. videos/cross_task/VS4zWF9hQpQ.mp4 +3 -0
  33. videos/cross_task/bQBNnsExUlg.mp4 +3 -0
  34. videos/cross_task/k5_89KhvpK4.mp4 +3 -0
  35. videos/ovo-bench-formatted.jsonl +0 -0
  36. videos/perception_test/video_10889.mp4 +3 -0
  37. videos/perception_test/video_5670.mp4 +3 -0
  38. videos/perception_test/video_9789.mp4 +3 -0
  39. videos/star/WRW74.mp4 +3 -0
  40. videos/thumos/thumos15_video_validation_0000405.mp4 +3 -0
  41. videos/thumos/thumos15_video_validation_0000435.mp4 +3 -0
  42. videos/thumos/thumos15_video_validation_0000442.mp4 +3 -0
  43. videos/thumos/thumos15_video_validation_0000889.mp4 +3 -0
  44. videos/thumos/thumos15_video_validation_0001752.mp4 +3 -0
  45. videos/thumos/thumos15_video_validation_0001932.mp4 +3 -0
  46. videos/thumos/video_test_0000324.mp4 +3 -0
  47. videos/thumos/video_test_0000357.mp4 +3 -0
  48. videos/thumos/video_test_0001072.mp4 +3 -0
  49. videos/thumos/video_test_0001129.mp4 +3 -0
  50. videos/thumos/video_test_0001452.mp4 +3 -0
distributed_evaluate_ovobench.py ADDED
@@ -0,0 +1,278 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json, os, torch, functools, tqdm, random, sys, argparse
2
+ import numpy as np
3
+ import decord
4
+ from torch.utils.data import Dataset
5
+ from transformers import Trainer, TrainingArguments, logging, Qwen2VLForConditionalGeneration, AutoProcessor, Qwen2_5_VLForConditionalGeneration
6
+ from livecc_utils import _read_video_decord_plus, _spatial_resize_video
7
+ from qwen_vl_utils.vision_process import process_vision_info, smart_nframes, FPS
8
+
9
+
10
+ logger = logging.get_logger(__name__)
11
+ # HF-style logger
12
+
13
+ def _read_may1fps_video_decord(ele: dict):
14
+ """read video using decord.VideoReader. can handle more cases compared to _read_video_decord.
15
+
16
+ Args:
17
+ ele (dict): a dict contains the configuration of video.
18
+ support keys:
19
+ - video: the path of video. support "file://", "http://", "https://" and local path.
20
+ - video_start: the start time of video.
21
+ - video_end: the end time of video.
22
+ Returns:
23
+ torch.Tensor: the video tensor with shape (T, C, H, W).
24
+ sample_fps
25
+ clip_pts if return_pts=True
26
+ """
27
+ video_path = ele["video"]
28
+
29
+ if os.path.exists(video_path):
30
+ vr = decord.VideoReader(video_path, num_threads=2)
31
+ else:
32
+ raise ValueError(f'video_path {video_path} not found')
33
+
34
+ video_start = ele.get('video_start', None)
35
+ video_end = ele.get('video_end', None)
36
+
37
+ video_fps = vr.get_avg_fps()
38
+
39
+ clip_idxs, clip_pts = None, None
40
+
41
+ if video_start is not None or video_end is not None:
42
+ vr.get_frame_timestamp(0)
43
+ video_pts = vr._frame_pts[:,1]
44
+ video_start = video_pts[0] if not video_start else video_start
45
+ video_end = video_pts[-1] if not video_end else video_end
46
+
47
+ video_start = min(max(video_pts[0], video_start), video_pts[-1])
48
+ video_end = min(max(video_pts[0], video_end), video_pts[-1])
49
+
50
+ video_end = max(video_start + 1, video_end)
51
+
52
+ clip_idxs = ((video_start <= video_pts) & (video_pts <= video_end)).nonzero()[0]
53
+
54
+ total_frames = len(clip_idxs)
55
+ else:
56
+ total_frames = len(vr)
57
+
58
+ total_frames_for_smart_nframes = total_frames
59
+ video_fps_for_smart_nframes = video_fps
60
+
61
+ if total_frames < 2:
62
+ total_frames_for_smart_nframes = 2
63
+
64
+ if video_fps < FPS:
65
+ total_frames_for_smart_nframes = int(total_frames * FPS / video_fps)
66
+ video_fps_for_smart_nframes = FPS
67
+
68
+ nframes = smart_nframes(ele, total_frames=total_frames_for_smart_nframes, video_fps=video_fps_for_smart_nframes)
69
+
70
+ nframes_idxs = np.linspace(0, total_frames - 1, nframes).round().astype(int)
71
+
72
+ clip_idxs = nframes_idxs if clip_idxs is None else clip_idxs[nframes_idxs]
73
+
74
+ clip = torch.from_numpy(vr.get_batch(clip_idxs).asnumpy()).permute(0, 3, 1, 2) # Convert to TCHW format
75
+
76
+ sample_fps = len(clip_idxs) / max(total_frames, 1e-6) * video_fps
77
+
78
+ return clip, sample_fps
79
+
80
+
81
+ def save_function_print(function: callable, save_path: str, *args, **kwargs):
82
+ original_stdout = sys.stdout
83
+ try:
84
+ with open(save_path, 'w') as f:
85
+ sys.stdout = f
86
+ function(*args, **kwargs)
87
+ finally:
88
+ sys.stdout = original_stdout
89
+
90
+
91
+ class OvoBenchMCQDataset(Dataset):
92
+ def __init__(self, path, question_prefix, question_postfix, answer_prefix, sample: int = None):
93
+ lines = open(path).readlines()
94
+
95
+ if sample is not None:
96
+ random.seed(42)
97
+ lines = random.sample(lines, sample)
98
+
99
+ self.datums = [json.loads(line) for line in tqdm.tqdm(lines, desc='load datums')]
100
+
101
+ if isinstance(self.datums[0], str):
102
+ self.datums = [json.loads(datum) for datum in tqdm.tqdm(self.datums, desc='load datumsx2')]
103
+
104
+ self.question_prefix = question_prefix
105
+ self.question_postfix = question_postfix
106
+ self.answer_prefix = answer_prefix
107
+
108
+ self.data_dir = os.path.dirname(path)
109
+
110
+ def __len__(self):
111
+ return len(self.datums)
112
+
113
+ def __getitem__(self, i):
114
+ datum = self.datums[i]
115
+ conversation = [{"role": "user", "content": []}]
116
+
117
+ video_inputs = None
118
+
119
+ if datum['task'] in ['REC', 'SSR', 'CRR']: # 'REC', 'SSR', 'CRR' have already been chunked
120
+ query = datum['question']
121
+ else:
122
+ query = self.question_prefix + datum['question'] + '\n' + '\n'.join(datum['options']) + self.question_postfix
123
+
124
+ video, _ = _read_may1fps_video_decord({
125
+ 'video': os.path.join(self.data_dir, datum['video']),
126
+ 'video_start': datum['video_start'],
127
+ 'video_end': datum['video_end']
128
+ })
129
+
130
+ video = _spatial_resize_video(video)
131
+
132
+ conversation[0]['content'].append({"type": "video", "video": video})
133
+
134
+ video_inputs = [video]
135
+
136
+ conversation[0]['content'].append({"type": "text", "text": query})
137
+
138
+ if video_inputs is None:
139
+ for _ in range(10):
140
+ try:
141
+ _, video_inputs = process_vision_info(conversation)
142
+ break
143
+ except:
144
+ print(f"{_}-th process_vision_info failed. retry...")
145
+ return conversation, video_inputs[0]
146
+
147
+ def data_collator(self, batch, processor):
148
+ conversations, video_inputs = zip(*batch)
149
+
150
+ texts = processor.apply_chat_template(conversations, tokenize=False, add_generation_prompt=True)
151
+
152
+ texts = [text + self.answer_prefix for text in texts]
153
+
154
+ inputs = processor(
155
+ text=texts,
156
+ images=None,
157
+ videos=list(video_inputs),
158
+ padding=True,
159
+ return_tensors="pt",
160
+ )
161
+
162
+ return inputs
163
+
164
+
165
+ def preprocess_logits_for_metrics(logits, labels, strict_option_ids):
166
+ return torch.stack([logit[(logit[:, 0] != -100).nonzero().squeeze()[-1], strict_option_ids] for logit in logits]).argmax(dim=-1)
167
+
168
+
169
+ def mcq_predict(
170
+ model,
171
+ processor,
172
+ benchmark_path: str,
173
+ options: list[str],
174
+ question_prefix: str = '',
175
+ question_postfix: str = '\nPlease select the correct answer.',
176
+ answer_prefix: str = 'Answer:',
177
+ abcd_previous_str: str = ': ',
178
+ use_liger_kernel: bool = True,
179
+ per_device_eval_batch_size: int = 1,
180
+ dataloader_num_workers: int = 4,
181
+ ):
182
+ strict_option_ids = [processor.tokenizer(f'{abcd_previous_str}{_}').input_ids[-1] for _ in options]
183
+
184
+ dataset = OvoBenchMCQDataset(benchmark_path, question_prefix=question_prefix, question_postfix=question_postfix, answer_prefix=answer_prefix)
185
+
186
+ trainer = Trainer(
187
+ model=model,
188
+ args=TrainingArguments(
189
+ output_dir='outputs/', do_predict=True,
190
+ per_device_eval_batch_size=per_device_eval_batch_size,
191
+ dataloader_num_workers=dataloader_num_workers,
192
+ report_to='none', use_liger_kernel=use_liger_kernel
193
+ ),
194
+ data_collator=functools.partial(dataset.data_collator, processor=processor),
195
+ processing_class=processor,
196
+ preprocess_logits_for_metrics=functools.partial(preprocess_logits_for_metrics, strict_option_ids=strict_option_ids),
197
+ )
198
+
199
+ letter_idxs_predictions = trainer.predict(dataset, ignore_keys=['past_key_values', 'hidden_states', 'attentions', 'rope_deltas']).predictions
200
+
201
+ return letter_idxs_predictions, dataset.datums, trainer.args.process_index
202
+
203
+
204
+ def evaluate_ovobench_results(results: list):
205
+ task_to_counts = {}
206
+ for result in results:
207
+ task = result['task']
208
+ if task not in task_to_counts:
209
+ task_to_counts[task] = {'correct': 0, 'total': 0}
210
+ task_to_counts[task]['total'] += 1
211
+ if result['response'][:len(result['answer'])] == result['answer']:
212
+ task_to_counts[task]['correct'] += 1
213
+
214
+ rt_accs, bt_accs, fr_accs = [], [], []
215
+ for task, counts in task_to_counts.items():
216
+ print(f'{task}: {counts["correct"]}/{counts["total"]}={counts["correct"]/counts["total"]}')
217
+ if task in ['OCR', 'ACR', 'ATR', 'STU', 'FPD', 'OJR']:
218
+ rt_accs.append(counts['correct']/counts['total'])
219
+ elif task in ['EPM', 'ASI', 'HLD']:
220
+ bt_accs.append(counts['correct']/counts['total'])
221
+ else:
222
+ fr_accs.append(counts['correct']/counts['total'])
223
+
224
+ if rt_accs:
225
+ print(f'Real-Time Visual Perception avg.: {sum(rt_accs)}/{len(rt_accs)}={sum(rt_accs)/len(rt_accs)}')
226
+ if bt_accs:
227
+ print(f'Backward Tracing avg.: {sum(bt_accs)}/{len(bt_accs)}={sum(bt_accs)/len(bt_accs)}')
228
+ if fr_accs:
229
+ print(f'Forward Tracing avg.: {sum(fr_accs)}/{len(fr_accs)}={sum(fr_accs)/len(fr_accs)}')
230
+
231
+
232
+ if __name__ == '__main__':
233
+ parser = argparse.ArgumentParser(description="Format OVO-Bench dataset JSONL file.")
234
+
235
+ parser.add_argument("--benchmark_dir", type=str, required=True, help="Path to ovobench dir.")
236
+ parser.add_argument("--model_path", type=str, required=True, help="Path to model dir.")
237
+
238
+ args = parser.parse_args()
239
+ benchmark_path = os.path.join(args.benchmark_dir, 'ovo-bench-formatted.jsonl')
240
+
241
+ model_path = args.model_path
242
+ try:
243
+ model = Qwen2_5_VLForConditionalGeneration.from_pretrained(model_path, torch_dtype="auto", attn_implementation='flash_attention_2')
244
+ except:
245
+ model = Qwen2VLForConditionalGeneration.from_pretrained(model_path, torch_dtype="auto", attn_implementation='flash_attention_2')
246
+
247
+ processor = AutoProcessor.from_pretrained(model_path, padding_side='left')
248
+
249
+ options = ['No', 'Yes', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E']
250
+
251
+ letter_idxs_predictions, benchmark_datums, process_index = mcq_predict(
252
+ model=model, processor=processor, benchmark_path=benchmark_path,
253
+ options=options, use_liger_kernel='LiveCC' in model_path,
254
+ answer_prefix = 'The answer is:\n',
255
+ abcd_previous_str = '\n',
256
+ )
257
+
258
+ if process_index == 0:
259
+ results = []
260
+ for datum, letter_idx_prediction in zip(benchmark_datums, letter_idxs_predictions):
261
+ results.append({
262
+ 'id': datum['id'],
263
+ "task": datum['task'],
264
+ "question": datum['question'],
265
+ "answer": datum['answer'],
266
+ "response": options[letter_idx_prediction],
267
+ })
268
+
269
+ save_json_path = f'results/ovobench/{os.path.basename(model_path)}.json'
270
+ os.makedirs(os.path.dirname(save_json_path), exist_ok=True)
271
+ json.dump(results, open(save_json_path, 'w'))
272
+
273
+ save_txt_path = save_json_path.replace('.json', '.txt')
274
+ save_function_print(
275
+ evaluate_ovobench_results,
276
+ save_txt_path,
277
+ results
278
+ )
ovo_bench_formatted.json ADDED
The diff for this file is too large to render. See raw diff
 
ovo_bench_formatted_part.json ADDED
@@ -0,0 +1,1658 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "video_id": 1193,
4
+ "video_path": "data/ovobench/videos/Ego4D/video/ccf8a14a-e815-4a2d-8ddc-8863b4e158a6.mp4",
5
+ "task": "FPD",
6
+ "conversations": [
7
+ {
8
+ "question": "What is this person about to do?",
9
+ "answer": "The individual appears ready to apply a coating or color to a surface using a brush or similar tool.",
10
+ "choices": [
11
+ "The person seems to be getting ready to write or draw using a pencil or pen.",
12
+ "The person is preparing to attach something using a stapler or similar device.",
13
+ "The individual appears ready to apply a coating or color to a surface using a brush or similar tool.",
14
+ "The person is about to measure something using a ruler or tape measure."
15
+ ],
16
+ "end_time": 38.7
17
+ }
18
+ ]
19
+ },
20
+ {
21
+ "video_id": 1194,
22
+ "video_path": "data/ovobench/videos/Ego4D/video/ebc98edd-f56e-431a-acb5-9d40fea9d4c6.mp4",
23
+ "task": "FPD",
24
+ "conversations": [
25
+ {
26
+ "question": "What is this person about to do with the objects around them?",
27
+ "answer": "The person is going to clean the cooker.",
28
+ "choices": [
29
+ "The person is about to start a cooking lesson.",
30
+ "The person is going to clean the cooker.",
31
+ "The person is arranging the utensils.",
32
+ "The person is about to prepare a meal."
33
+ ],
34
+ "end_time": 69.03
35
+ }
36
+ ]
37
+ },
38
+ {
39
+ "video_id": 1195,
40
+ "video_path": "data/ovobench/videos/Ego4D/video/ebc98edd-f56e-431a-acb5-9d40fea9d4c6.mp4",
41
+ "task": "FPD",
42
+ "conversations": [
43
+ {
44
+ "question": "What is this person preparing to do?",
45
+ "answer": "The person is preparing to pour water from a jug.",
46
+ "choices": [
47
+ "The person is preparing to mix a cocktail.",
48
+ "The person is preparing to set the table for dinner.",
49
+ "The person is preparing to brew coffee.",
50
+ "The person is preparing to pour water from a jug."
51
+ ],
52
+ "end_time": 237.5
53
+ }
54
+ ]
55
+ },
56
+ {
57
+ "video_id": 1196,
58
+ "video_path": "data/ovobench/videos/Ego4D/video/f4e2d43a-9fd2-48c0-b1d3-ca32c82ec2c4.mp4",
59
+ "task": "FPD",
60
+ "conversations": [
61
+ {
62
+ "question": "What is this person about to do after finishing with the equipment?",
63
+ "answer": "The person is going to open a cabinet.",
64
+ "choices": [
65
+ "The person is going to check the paperwork.",
66
+ "The person is going to make a phone call.",
67
+ "The person is going to leave the room.",
68
+ "The person is going to open a cabinet."
69
+ ],
70
+ "end_time": 261.97
71
+ }
72
+ ]
73
+ },
74
+ {
75
+ "video_id": 1197,
76
+ "video_path": "data/ovobench/videos/Ego4D/video/f4e2d43a-9fd2-48c0-b1d3-ca32c82ec2c4.mp4",
77
+ "task": "FPD",
78
+ "conversations": [
79
+ {
80
+ "question": "What is the person about to do with the object?",
81
+ "answer": "The person is about to take a pen.",
82
+ "choices": [
83
+ "The person is preparing to put the pen back into its case.",
84
+ "The person is about to hand over a pen to someone else.",
85
+ "The person is about to take a pen.",
86
+ "The person is about to use the pen to take notes."
87
+ ],
88
+ "end_time": 50.17
89
+ }
90
+ ]
91
+ },
92
+ {
93
+ "video_id": 1198,
94
+ "video_path": "data/ovobench/videos/Ego4D/video/f4e2d43a-9fd2-48c0-b1d3-ca32c82ec2c4.mp4",
95
+ "task": "FPD",
96
+ "conversations": [
97
+ {
98
+ "question": "What is this person about to do?",
99
+ "answer": "The person is about to make cuts with the saw.",
100
+ "choices": [
101
+ "The person is about to paint the wall with a brush.",
102
+ "The person is about to drill a hole with the power drill.",
103
+ "The person is about to assemble the furniture with a screwdriver.",
104
+ "The person is about to make cuts with the saw."
105
+ ],
106
+ "end_time": 251.53
107
+ }
108
+ ]
109
+ },
110
+ {
111
+ "video_id": 1199,
112
+ "video_path": "data/ovobench/videos/Ego4D/video/f93e485e-9ec3-4856-809d-f050c6439b7b.mp4",
113
+ "task": "FPD",
114
+ "conversations": [
115
+ {
116
+ "question": "What is the person getting ready to do?",
117
+ "answer": "The person is preparing to screw something.",
118
+ "choices": [
119
+ "The person is getting ready to go for a run.",
120
+ "The person is preparing to bake a cake.",
121
+ "The person is preparing to paint a wall.",
122
+ "The person is preparing to screw something."
123
+ ],
124
+ "end_time": 113.57
125
+ }
126
+ ]
127
+ },
128
+ {
129
+ "video_id": 1200,
130
+ "video_path": "data/ovobench/videos/Ego4D/video/f93e485e-9ec3-4856-809d-f050c6439b7b.mp4",
131
+ "task": "FPD",
132
+ "conversations": [
133
+ {
134
+ "question": "What is this person about to do?",
135
+ "answer": "The person is about to use a screwdriver.",
136
+ "choices": [
137
+ "The person is about to hammer a nail.",
138
+ "The person is about to start painting a wall.",
139
+ "The person is about to plant a tree.",
140
+ "The person is about to use a screwdriver."
141
+ ],
142
+ "end_time": 164.03
143
+ }
144
+ ]
145
+ },
146
+ {
147
+ "video_id": 1201,
148
+ "video_path": "data/ovobench/videos/Ego4D/video/f93e485e-9ec3-4856-809d-f050c6439b7b.mp4",
149
+ "task": "FPD",
150
+ "conversations": [
151
+ {
152
+ "question": "What is this person about to do?",
153
+ "answer": "The person holds a screwdriver near a screw, indicating they are about to remove the screw.",
154
+ "choices": [
155
+ "The person is about to tighten the screw.",
156
+ "The person is about to adjust the position of the screwdriver.",
157
+ "The person holds a screwdriver near a screw, indicating they are about to remove the screw.",
158
+ "The person is about to clean the area around the screw."
159
+ ],
160
+ "end_time": 159.07
161
+ }
162
+ ]
163
+ },
164
+ {
165
+ "video_id": 1202,
166
+ "video_path": "data/ovobench/videos/Ego4D/video/08a7fe30-3f6c-42f7-8b92-88df11e59175.mp4",
167
+ "task": "FPD",
168
+ "conversations": [
169
+ {
170
+ "question": "What is this person going to do next?",
171
+ "answer": "The person is going to wash their hands.",
172
+ "choices": [
173
+ "The person is going to make a phone call.",
174
+ "The person is going to read a book.",
175
+ "The person is going to wash their hands.",
176
+ "The person is going to write an email."
177
+ ],
178
+ "end_time": 144.57
179
+ }
180
+ ]
181
+ },
182
+ {
183
+ "video_id": 1203,
184
+ "video_path": "data/ovobench/videos/Ego4D/video/08a7fe30-3f6c-42f7-8b92-88df11e59175.mp4",
185
+ "task": "FPD",
186
+ "conversations": [
187
+ {
188
+ "question": "What is this person about to do with the object in their hand?",
189
+ "answer": "The person is about to use a knife to cut.",
190
+ "choices": [
191
+ "The person is about to swing a bat to hit a ball.",
192
+ "The person is about to use a screwdriver to tighten a screw.",
193
+ "The person is about to use a knife to cut.",
194
+ "The person is about to light a match to start a fire."
195
+ ],
196
+ "end_time": 194.13
197
+ }
198
+ ]
199
+ },
200
+ {
201
+ "video_id": 1204,
202
+ "video_path": "data/ovobench/videos/Ego4D/video/08a7fe30-3f6c-42f7-8b92-88df11e59175.mp4",
203
+ "task": "FPD",
204
+ "conversations": [
205
+ {
206
+ "question": "What is this person preparing to do?",
207
+ "answer": "The person is preparing to wash their hands.",
208
+ "choices": [
209
+ "The person is preparing to cook a meal.",
210
+ "The person is preparing to wash their hands.",
211
+ "The person is preparing to write a letter.",
212
+ "The person is preparing to take a photograph."
213
+ ],
214
+ "end_time": 176.33
215
+ }
216
+ ]
217
+ },
218
+ {
219
+ "video_id": 1205,
220
+ "video_path": "data/ovobench/videos/Ego4D/video/0bc8996b-85d5-4800-b840-f7ef647a21ee.mp4",
221
+ "task": "FPD",
222
+ "conversations": [
223
+ {
224
+ "question": "What is this person preparing to do next?",
225
+ "answer": "The person is going to screw in a bolt on the engine.",
226
+ "choices": [
227
+ "The person is going to screw in a bolt on the engine.",
228
+ "The person is setting up a toolkit for an engine repair task.",
229
+ "The person is preparing to check the oil level of the engine.",
230
+ "The person is about to replace a worn-out fan belt in the engine compartment."
231
+ ],
232
+ "end_time": 107.23
233
+ }
234
+ ]
235
+ },
236
+ {
237
+ "video_id": 1206,
238
+ "video_path": "data/ovobench/videos/Ego4D/video/16ba692f-cb3a-4998-be66-f7e5e74b49f2.mp4",
239
+ "task": "FPD",
240
+ "conversations": [
241
+ {
242
+ "question": "What is this person about to do?",
243
+ "answer": "The person is reaching for a napkin on the table.",
244
+ "choices": [
245
+ "The person is reaching for a napkin on the table.",
246
+ "The person is preparing to stand up from their chair.",
247
+ "The person is reaching to adjust their glasses.",
248
+ "The person is about to take a sip of their drink."
249
+ ],
250
+ "end_time": 27.13
251
+ }
252
+ ]
253
+ },
254
+ {
255
+ "video_id": 1207,
256
+ "video_path": "data/ovobench/videos/Ego4D/video/16ba692f-cb3a-4998-be66-f7e5e74b49f2.mp4",
257
+ "task": "FPD",
258
+ "conversations": [
259
+ {
260
+ "question": "What is this person going to do with the small object in their hands?",
261
+ "answer": "The person is going to place the screw on the table.",
262
+ "choices": [
263
+ "The person is going to place the screw on the table.",
264
+ "The person is going to put the screw in a toolbox.",
265
+ "The person is going to throw the screw into the trash.",
266
+ "The person is going to use the screw to fix a chair."
267
+ ],
268
+ "end_time": 134.5
269
+ }
270
+ ]
271
+ },
272
+ {
273
+ "video_id": 1208,
274
+ "video_path": "data/ovobench/videos/Ego4D/video/16ba692f-cb3a-4998-be66-f7e5e74b49f2.mp4",
275
+ "task": "FPD",
276
+ "conversations": [
277
+ {
278
+ "question": "What is this person going to do?",
279
+ "answer": "The person is going to pick up a screwdriver.",
280
+ "choices": [
281
+ "The person is going to assemble a new piece of furniture.",
282
+ "The person is going to fix a leak under the sink.",
283
+ "The person is going to change the batteries in a toy.",
284
+ "The person is going to pick up a screwdriver."
285
+ ],
286
+ "end_time": 200.43
287
+ }
288
+ ]
289
+ },
290
+ {
291
+ "video_id": 1209,
292
+ "video_path": "data/ovobench/videos/Ego4D/video/22969066-24e1-4711-b808-bb89e0a84c9c.mp4",
293
+ "task": "FPD",
294
+ "conversations": [
295
+ {
296
+ "question": "What is this person about to do next?",
297
+ "answer": "The person is going to put the drawer in place.",
298
+ "choices": [
299
+ "The person is going to put the drawer in place.",
300
+ "The person is going to open a drawer nearby.",
301
+ "The person is going to close the drawer.",
302
+ "The person is going to adjust the contents of the drawer."
303
+ ],
304
+ "end_time": 142.73
305
+ }
306
+ ]
307
+ },
308
+ {
309
+ "video_id": 1210,
310
+ "video_path": "data/ovobench/videos/Ego4D/video/e1fa5de2-eb21-41a3-85b3-882a3652df42.mp4",
311
+ "task": "ACR",
312
+ "conversations": [
313
+ {
314
+ "question": "What is he doing with that tool?",
315
+ "answer": "He is inflating a tire.",
316
+ "choices": [
317
+ "He is repairing a bicycle.",
318
+ "She is using a jack to lift a car.",
319
+ "He is tightening bolts on a wheel.",
320
+ "He is inflating a tire."
321
+ ],
322
+ "end_time": 201.93
323
+ }
324
+ ]
325
+ },
326
+ {
327
+ "video_id": 1211,
328
+ "video_path": "data/ovobench/videos/Ego4D/video/e1fa5de2-eb21-41a3-85b3-882a3652df42.mp4",
329
+ "task": "ACR",
330
+ "conversations": [
331
+ {
332
+ "question": "What is he doing?",
333
+ "answer": "He is opening the window.",
334
+ "choices": [
335
+ "He is using a gauge to measure the pressure.",
336
+ "He is repairing a bicycle.",
337
+ "He is inflating a tire.",
338
+ "He is opening the window."
339
+ ],
340
+ "end_time": 228.93
341
+ }
342
+ ]
343
+ },
344
+ {
345
+ "video_id": 1212,
346
+ "video_path": "data/ovobench/videos/Ego4D/video/e1fa5de2-eb21-41a3-85b3-882a3652df42.mp4",
347
+ "task": "ACR",
348
+ "conversations": [
349
+ {
350
+ "question": "What action is being done to the tire?",
351
+ "answer": "He is pressing down on the tire to check its firmness.",
352
+ "choices": [
353
+ "She is lubricating the tire to improve performance.",
354
+ "They are rotating the tire to maintain even wear.",
355
+ "He is adjusting the alignment of the tire for better handling.",
356
+ "He is pressing down on the tire to check its firmness."
357
+ ],
358
+ "end_time": 210.93
359
+ }
360
+ ]
361
+ },
362
+ {
363
+ "video_id": 1213,
364
+ "video_path": "data/ovobench/videos/Ego4D/video/f43e2a97-b81b-4271-befd-569552d8129c.mp4",
365
+ "task": "ACR",
366
+ "conversations": [
367
+ {
368
+ "question": "What is he doing with the tool in his left hand?",
369
+ "answer": "He is adjusting a bolt with a tool",
370
+ "choices": [
371
+ "He is adjusting a bolt with a tool",
372
+ "He is tightening a loose connection with a wrench",
373
+ "He is aligning two components precisely with a spanner",
374
+ "He is connecting the pipe to the interface."
375
+ ],
376
+ "end_time": 201.0
377
+ }
378
+ ]
379
+ },
380
+ {
381
+ "video_id": 1214,
382
+ "video_path": "data/ovobench/videos/Ego4D/video/f43e2a97-b81b-4271-befd-569552d8129c.mp4",
383
+ "task": "ACR",
384
+ "conversations": [
385
+ {
386
+ "question": "What is he doing?",
387
+ "answer": "He is wiping something with a rag.",
388
+ "choices": [
389
+ "He is wiping something with a rag.",
390
+ "He is hitting objects with a tool.",
391
+ "He is inspecting an object closely.",
392
+ "He is connecting the pipe to the interface."
393
+ ],
394
+ "end_time": 287.0
395
+ }
396
+ ]
397
+ },
398
+ {
399
+ "video_id": 1215,
400
+ "video_path": "data/ovobench/videos/Ego4D/video/f43e2a97-b81b-4271-befd-569552d8129c.mp4",
401
+ "task": "ACR",
402
+ "conversations": [
403
+ {
404
+ "question": "What is he doing?",
405
+ "answer": "He is connecting the pipe to the interface.",
406
+ "choices": [
407
+ "He is connecting the pipe to the interface.",
408
+ "He is disconnecting the pipe from the interface.",
409
+ "He is inspecting the pipe for any damage.",
410
+ "He is cleaning the pipe with a cloth."
411
+ ],
412
+ "end_time": 103.0
413
+ }
414
+ ]
415
+ },
416
+ {
417
+ "video_id": 1216,
418
+ "video_path": "data/ovobench/videos/Ego4D/video/80c835c8-d7ec-4c6d-b1af-58315c333cbf.mp4",
419
+ "task": "ACR",
420
+ "conversations": [
421
+ {
422
+ "question": "What is being done with the object in his hands?",
423
+ "answer": "He is holding a yellow card.",
424
+ "choices": [
425
+ "He is holding a yellow card.",
426
+ "He is grasping a white notebook.",
427
+ "He is spinning a small globe.",
428
+ "He is examining a blue envelope."
429
+ ],
430
+ "end_time": 113.87
431
+ }
432
+ ]
433
+ },
434
+ {
435
+ "video_id": 1217,
436
+ "video_path": "data/ovobench/videos/Ego4D/video/80c835c8-d7ec-4c6d-b1af-58315c333cbf.mp4",
437
+ "task": "ACR",
438
+ "conversations": [
439
+ {
440
+ "question": "How does he interact with the object on the floor?",
441
+ "answer": "He picks up the object.",
442
+ "choices": [
443
+ "He sets the object aside carefully.",
444
+ "He examines the object closely.",
445
+ "He picks up the object.",
446
+ "He steps on the object."
447
+ ],
448
+ "end_time": 115.87
449
+ }
450
+ ]
451
+ },
452
+ {
453
+ "video_id": 1218,
454
+ "video_path": "data/ovobench/videos/Ego4D/video/80c835c8-d7ec-4c6d-b1af-58315c333cbf.mp4",
455
+ "task": "ACR",
456
+ "conversations": [
457
+ {
458
+ "question": "What is he adjusting?",
459
+ "answer": "adjusting the brakes of his motorcycle.",
460
+ "choices": [
461
+ "adjusting the brakes of his motorcycle.",
462
+ "adjusting the Front wheel screw.",
463
+ "adjusting the thermostat in his home.",
464
+ "adjusting the headset on his bicycle."
465
+ ],
466
+ "end_time": 140.87
467
+ }
468
+ ]
469
+ },
470
+ {
471
+ "video_id": 1219,
472
+ "video_path": "data/ovobench/videos/Ego4D/video/68e54307-c0d1-4ed3-9c67-45827293c21e.mp4",
473
+ "task": "ACR",
474
+ "conversations": [
475
+ {
476
+ "question": "What is this person doing with the basket?",
477
+ "answer": "Adjusting the basket attached to the bike.",
478
+ "choices": [
479
+ "Removing items from the basket on the bike.",
480
+ "Removing the basket from the bike.",
481
+ "Fitting a new basket onto the handlebars.",
482
+ "Adjusting the basket attached to the bike."
483
+ ],
484
+ "end_time": 134.8
485
+ }
486
+ ]
487
+ },
488
+ {
489
+ "video_id": 1220,
490
+ "video_path": "data/ovobench/videos/Ego4D/video/68e54307-c0d1-4ed3-9c67-45827293c21e.mp4",
491
+ "task": "ACR",
492
+ "conversations": [
493
+ {
494
+ "question": "What is this man doing?",
495
+ "answer": "They organize tools on the workbench.",
496
+ "choices": [
497
+ "They pick up a tool from the workbench.",
498
+ "They place a tool back on the workbench.",
499
+ "They inspect a tool closely.",
500
+ "They organize tools on the workbench."
501
+ ],
502
+ "end_time": 164.8
503
+ }
504
+ ]
505
+ },
506
+ {
507
+ "video_id": 1221,
508
+ "video_path": "data/ovobench/videos/Ego4D/video/68e54307-c0d1-4ed3-9c67-45827293c21e.mp4",
509
+ "task": "ACR",
510
+ "conversations": [
511
+ {
512
+ "question": "What action does this individual perform with the tool?",
513
+ "answer": "They approach the bicycle with the tool.",
514
+ "choices": [
515
+ "They use the tool to adjust the bicycle brakes.",
516
+ "They grasp the tool as they examine the bicycle's tire.",
517
+ "They approach the bicycle with the tool.",
518
+ "They apply the tool to tighten the bicycle's seat."
519
+ ],
520
+ "end_time": 166.8
521
+ }
522
+ ]
523
+ },
524
+ {
525
+ "video_id": 1222,
526
+ "video_path": "data/ovobench/videos/Ego4D/video/6a7972a1-4984-4680-b91c-3326dbbe97aa.mp4",
527
+ "task": "ACR",
528
+ "conversations": [
529
+ {
530
+ "question": "What action is being performed with the mushrooms?",
531
+ "answer": "They are being prepared for cooking.",
532
+ "choices": [
533
+ "They are being examined for quality.",
534
+ "They are being mixed into compost.",
535
+ "They are being arranged for display.",
536
+ "They are being prepared for cooking."
537
+ ],
538
+ "end_time": 142.77
539
+ }
540
+ ]
541
+ },
542
+ {
543
+ "video_id": 1223,
544
+ "video_path": "data/ovobench/videos/Ego4D/video/6a7972a1-4984-4680-b91c-3326dbbe97aa.mp4",
545
+ "task": "ACR",
546
+ "conversations": [
547
+ {
548
+ "question": "What is being added to the pan?",
549
+ "answer": "Oil.",
550
+ "choices": [
551
+ "Salt.",
552
+ "Butter.",
553
+ "Water.",
554
+ "Oil."
555
+ ],
556
+ "end_time": 153.77
557
+ }
558
+ ]
559
+ },
560
+ {
561
+ "video_id": 1224,
562
+ "video_path": "data/ovobench/videos/Ego4D/video/6a7972a1-4984-4680-b91c-3326dbbe97aa.mp4",
563
+ "task": "ACR",
564
+ "conversations": [
565
+ {
566
+ "question": "What is done after retrieving the bottle cap from the floor?",
567
+ "answer": "It is placed near the sink.",
568
+ "choices": [
569
+ "It is thrown into the recycling bin.",
570
+ "It is placed near the sink.",
571
+ "It is set on the bottle.",
572
+ "It is dropped into the trash can."
573
+ ],
574
+ "end_time": 170.77
575
+ }
576
+ ]
577
+ },
578
+ {
579
+ "video_id": 1225,
580
+ "video_path": "data/ovobench/videos/Ego4D/video/1b2f1101-b3d9-4be5-bc92-393c983be9b4.mp4",
581
+ "task": "ACR",
582
+ "conversations": [
583
+ {
584
+ "question": "What is he doing with his hands above the table?",
585
+ "answer": "He is placing nuts on the table.",
586
+ "choices": [
587
+ "He is stacking coins on the table.",
588
+ "He is decorating cupcakes on the table.",
589
+ "He is sorting beads on the table.",
590
+ "He is placing nuts on the table."
591
+ ],
592
+ "end_time": 94.47
593
+ }
594
+ ]
595
+ },
596
+ {
597
+ "video_id": 1226,
598
+ "video_path": "data/ovobench/videos/Ego4D/video/1b2f1101-b3d9-4be5-bc92-393c983be9b4.mp4",
599
+ "task": "ACR",
600
+ "conversations": [
601
+ {
602
+ "question": "What is he doing?",
603
+ "answer": "take out screws.",
604
+ "choices": [
605
+ "carry small parts.",
606
+ "take out screws.",
607
+ "tape the bag.",
608
+ "organize bolts."
609
+ ],
610
+ "end_time": 135.47
611
+ }
612
+ ]
613
+ },
614
+ {
615
+ "video_id": 1227,
616
+ "video_path": "data/ovobench/videos/Ego4D/video/1b2f1101-b3d9-4be5-bc92-393c983be9b4.mp4",
617
+ "task": "ACR",
618
+ "conversations": [
619
+ {
620
+ "question": "What action is he doing as he moves to the side?",
621
+ "answer": "He is holding a cardboard piece.",
622
+ "choices": [
623
+ "He is pushing a shopping cart.",
624
+ "He is swinging a rope back and forth.",
625
+ "He is guiding a toddler by the hand.",
626
+ "He is holding a cardboard piece."
627
+ ],
628
+ "end_time": 118.47
629
+ }
630
+ ]
631
+ },
632
+ {
633
+ "video_id": 1228,
634
+ "video_path": "data/ovobench/videos/Ego4D/video/6a7972a1-4984-4680-b91c-3326dbbe97aa.mp4",
635
+ "task": "ACR",
636
+ "conversations": [
637
+ {
638
+ "question": "What is the person doing with the vegetables?",
639
+ "answer": "They are chopping vegetables.",
640
+ "choices": [
641
+ "They are eating vegetables.",
642
+ "They are washing vegetables.",
643
+ "They are chopping vegetables.",
644
+ "They are cooking vegetables."
645
+ ],
646
+ "end_time": 119.3
647
+ }
648
+ ]
649
+ },
650
+ {
651
+ "video_id": 1229,
652
+ "video_path": "data/ovobench/videos/Ego4D/video/6a7972a1-4984-4680-b91c-3326dbbe97aa.mp4",
653
+ "task": "ACR",
654
+ "conversations": [
655
+ {
656
+ "question": "What does the person do after chopping the onion?",
657
+ "answer": "They start opening the package of mushrooms.",
658
+ "choices": [
659
+ "They grab a skillet to stir-fry the vegetables.",
660
+ "They take out the carrots to chop next.",
661
+ "They reach for the cutting board to slice tomatoes.",
662
+ "They start opening the package of mushrooms."
663
+ ],
664
+ "end_time": 135.3
665
+ }
666
+ ]
667
+ },
668
+ {
669
+ "video_id": 1230,
670
+ "video_path": "data/ovobench/videos/Ego4D/video/6a7972a1-4984-4680-b91c-3326dbbe97aa.mp4",
671
+ "task": "ACR",
672
+ "conversations": [
673
+ {
674
+ "question": "What action takes place with the pan on the stove?",
675
+ "answer": "Oil is poured into the pan.",
676
+ "choices": [
677
+ "Pasta is boiled in the pan.",
678
+ "Oil is poured into the pan.",
679
+ "Water is heated in the pan.",
680
+ "Vegetables are sautéd in the pan."
681
+ ],
682
+ "end_time": 153.3
683
+ }
684
+ ]
685
+ },
686
+ {
687
+ "video_id": 1231,
688
+ "video_path": "data/ovobench/videos/Ego4D/video/151dbdab-9705-4641-a8f7-9f432cec8c2e.mp4",
689
+ "task": "ACR",
690
+ "conversations": [
691
+ {
692
+ "question": "What is this person doing with the bike?",
693
+ "answer": "Adjusting the brake of the bike.",
694
+ "choices": [
695
+ "Repairing a flat tire on the bike.",
696
+ "Cleaning the frame of the bike.",
697
+ "Adjusting the seat height of the bike.",
698
+ "Adjusting the brake of the bike."
699
+ ],
700
+ "end_time": 230.97
701
+ }
702
+ ]
703
+ },
704
+ {
705
+ "video_id": 1232,
706
+ "video_path": "data/ovobench/videos/Ego4D/video/151dbdab-9705-4641-a8f7-9f432cec8c2e.mp4",
707
+ "task": "ACR",
708
+ "conversations": [
709
+ {
710
+ "question": "What is this person doing?",
711
+ "answer": "Walking towards the bike.",
712
+ "choices": [
713
+ "Stepping outside the room.",
714
+ "Walking towards the bike.",
715
+ "Checking the tire pressure.",
716
+ "Organizing electrical wires."
717
+ ],
718
+ "end_time": 107.37
719
+ }
720
+ ]
721
+ },
722
+ {
723
+ "video_id": 1233,
724
+ "video_path": "data/ovobench/videos/Ego4D/video/8ccb4f1d-d0ce-48a4-8eca-339084de7366.mp4",
725
+ "task": "ACR",
726
+ "conversations": [
727
+ {
728
+ "question": "What is he doing with the carrot?",
729
+ "answer": "He is grating the carrot.",
730
+ "choices": [
731
+ "He is dicing the carrot.",
732
+ "He is eating the carrot.",
733
+ "He is grating the carrot.",
734
+ "He is washing the carrot."
735
+ ],
736
+ "end_time": 74.0
737
+ }
738
+ ]
739
+ },
740
+ {
741
+ "video_id": 1234,
742
+ "video_path": "data/ovobench/videos/Ego4D/video/67dce8e0-ea5f-44dd-a2fe-a8f8ed157625.mp4",
743
+ "task": "ACR",
744
+ "conversations": [
745
+ {
746
+ "question": "What is the man holding while using his phone?",
747
+ "answer": "The man is holding an iron while using his phone.",
748
+ "choices": [
749
+ "The man is holding a briefcase while using his phone.",
750
+ "The man is holding a cup of coffee while using his phone.",
751
+ "The man is holding an iron while using his phone.",
752
+ "The man is holding a tennis racket while using his phone."
753
+ ],
754
+ "end_time": 201.97
755
+ }
756
+ ]
757
+ },
758
+ {
759
+ "video_id": 1235,
760
+ "video_path": "data/ovobench/videos/Ego4D/video/6774ba16-a7dc-4104-8d95-6feb6058b3e1.mp4",
761
+ "task": "ACR",
762
+ "conversations": [
763
+ {
764
+ "question": "What is this person doing?",
765
+ "answer": "shake a bottle in his hand.",
766
+ "choices": [
767
+ "inspect the tire for punctures.",
768
+ "shake a bottle in his hand.",
769
+ "tightens the wheel nuts.",
770
+ "clean the wheel rim."
771
+ ],
772
+ "end_time": 257.47
773
+ }
774
+ ]
775
+ },
776
+ {
777
+ "video_id": 1236,
778
+ "video_path": "data/ovobench/videos/Ego4D/video/9c20c227-5f30-45c3-8c7c-4d556c24eaed.mp4",
779
+ "task": "ACR",
780
+ "conversations": [
781
+ {
782
+ "question": "What action is he performing with the blue checkered cloth?",
783
+ "answer": "He is folding the cloth.",
784
+ "choices": [
785
+ "He is wiping the tabletop with the cloth.",
786
+ "He is folding the cloth.",
787
+ "He is covering a basket with the cloth.",
788
+ "He is tying the cloth around his neck."
789
+ ],
790
+ "end_time": 258.97
791
+ }
792
+ ]
793
+ },
794
+ {
795
+ "video_id": 1237,
796
+ "video_path": "data/ovobench/videos/Ego4D/video/9c20c227-5f30-45c3-8c7c-4d556c24eaed.mp4",
797
+ "task": "ACR",
798
+ "conversations": [
799
+ {
800
+ "question": "How does he do with the laundry basket.",
801
+ "answer": "He puts folded clothes in the laundry basket.",
802
+ "choices": [
803
+ "He lifts the laundry basket to take it to the washing machine.",
804
+ "He puts folded clothes in the laundry basket.",
805
+ "He throws dirty clothes into the laundry basket.",
806
+ "He adds freshly washed clothes to the laundry basket."
807
+ ],
808
+ "end_time": 269.97
809
+ }
810
+ ]
811
+ },
812
+ {
813
+ "video_id": 1238,
814
+ "video_path": "data/ovobench/videos/Ego4D/video/80c835c8-d7ec-4c6d-b1af-58315c333cbf.mp4",
815
+ "task": "ACR",
816
+ "conversations": [
817
+ {
818
+ "question": "What is the man doing with the motorcycle?",
819
+ "answer": "He is fastening a bolt on the motorcycle.",
820
+ "choices": [
821
+ "He is fastening a bolt on the motorcycle.",
822
+ "He is adjusting the handlebars of the motorcycle.",
823
+ "He is inspecting the brakes on the motorcycle.",
824
+ "He is checking the tire pressure on the motorcycle."
825
+ ],
826
+ "end_time": 61.43
827
+ }
828
+ ]
829
+ },
830
+ {
831
+ "video_id": 1239,
832
+ "video_path": "data/ovobench/videos/Ego4D/video/6a7972a1-4984-4680-b91c-3326dbbe97aa.mp4",
833
+ "task": "ACR",
834
+ "conversations": [
835
+ {
836
+ "question": "What action is being performed with the objects on her hands?",
837
+ "answer": "They are being worn.",
838
+ "choices": [
839
+ "They are being folded.",
840
+ "They are being bought.",
841
+ "They are being worn.",
842
+ "They are being sorted."
843
+ ],
844
+ "end_time": 9.5
845
+ }
846
+ ]
847
+ },
848
+ {
849
+ "video_id": 1240,
850
+ "video_path": "data/ovobench/videos/AutoEvalMetaData/171.mp4",
851
+ "task": "ACR",
852
+ "conversations": [
853
+ {
854
+ "question": "What action is being performed with this tool?",
855
+ "answer": "The tool is being displayed.",
856
+ "choices": [
857
+ "The tool is being purchased.",
858
+ "The tool is being examined.",
859
+ "The tool is being displayed.",
860
+ "The tool is being cleaned."
861
+ ],
862
+ "end_time": 30.06
863
+ }
864
+ ]
865
+ },
866
+ {
867
+ "video_id": 1241,
868
+ "video_path": "data/ovobench/videos/AutoEvalMetaData/171.mp4",
869
+ "task": "ACR",
870
+ "conversations": [
871
+ {
872
+ "question": "What action is he performing with the device in his hand?",
873
+ "answer": "He is testing the electrical current.",
874
+ "choices": [
875
+ "He is testing the electrical current.",
876
+ "He is diagnosing electrical faults.",
877
+ "He is measuring the voltage level.",
878
+ "He is calibrating the device settings."
879
+ ],
880
+ "end_time": 105.94
881
+ }
882
+ ]
883
+ },
884
+ {
885
+ "video_id": 1242,
886
+ "video_path": "data/ovobench/videos/AutoEvalMetaData/171.mp4",
887
+ "task": "ACR",
888
+ "conversations": [
889
+ {
890
+ "question": "What action does the device indicate when it lights up?",
891
+ "answer": "It indicates the presence of electrical current.",
892
+ "choices": [
893
+ "It signifies that the device is powered on.",
894
+ "It shows that the temperature is above the set limit.",
895
+ "It confirms successful data transmission.",
896
+ "It indicates the presence of electrical current."
897
+ ],
898
+ "end_time": 124.96
899
+ }
900
+ ]
901
+ },
902
+ {
903
+ "video_id": 1243,
904
+ "video_path": "data/ovobench/videos/AutoEvalMetaData/274.mp4",
905
+ "task": "ACR",
906
+ "conversations": [
907
+ {
908
+ "question": "What is the person in the blue raincoat doing?",
909
+ "answer": "He is holding the railing during the hurricane.",
910
+ "choices": [
911
+ "He is photographing the waves from the bridge during the storm.",
912
+ "He is walking his dog in the park while it rains heavily.",
913
+ "He is climbing a hill under the heavy rain for an adventure.",
914
+ "He is holding the railing during the hurricane."
915
+ ],
916
+ "end_time": 87.47
917
+ }
918
+ ]
919
+ },
920
+ {
921
+ "video_id": 1244,
922
+ "video_path": "data/ovobench/videos/AutoEvalMetaData/274.mp4",
923
+ "task": "ACR",
924
+ "conversations": [
925
+ {
926
+ "question": "How is the water flowing?",
927
+ "answer": "The water is flowing turbulently.",
928
+ "choices": [
929
+ "The water is flowing smoothly downstream.",
930
+ "The water is flowing turbulently.",
931
+ "The water is trickling gently over the rocks.",
932
+ "The water is cascading forcefully over the falls."
933
+ ],
934
+ "end_time": 188.47
935
+ }
936
+ ]
937
+ },
938
+ {
939
+ "video_id": 1245,
940
+ "video_path": "data/ovobench/videos/AutoEvalMetaData/117.mp4",
941
+ "task": "ACR",
942
+ "conversations": [
943
+ {
944
+ "question": "What is the player in the burgundy jersey doing?",
945
+ "answer": "He is taking a free kick.",
946
+ "choices": [
947
+ "He is preparing to take a corner kick.",
948
+ "He is taking a free kick.",
949
+ "He is getting ready to throw-in the ball.",
950
+ "He is dribbling the ball towards the goal."
951
+ ],
952
+ "end_time": 3.21
953
+ }
954
+ ]
955
+ },
956
+ {
957
+ "video_id": 1246,
958
+ "video_path": "data/ovobench/videos/AutoEvalMetaData/117.mp4",
959
+ "task": "ACR",
960
+ "conversations": [
961
+ {
962
+ "question": "What is the player in the blue jersey doing with the ball near the goal?",
963
+ "answer": "The player in the blue jersey is dribbling the ball.",
964
+ "choices": [
965
+ "The player in the blue jersey is juggling the ball.",
966
+ "The player in the blue jersey is dribbling the ball.",
967
+ "The player in the blue jersey is passing the ball.",
968
+ "The player in the blue jersey is shooting the ball."
969
+ ],
970
+ "end_time": 24.59
971
+ }
972
+ ]
973
+ },
974
+ {
975
+ "video_id": 1247,
976
+ "video_path": "data/ovobench/videos/AutoEvalMetaData/117.mp4",
977
+ "task": "ACR",
978
+ "conversations": [
979
+ {
980
+ "question": "What is the player with blue jersey doing with the ball?",
981
+ "answer": "The player is faking out an opponent and then crossing the ball.",
982
+ "choices": [
983
+ "The player is preparing to take a corner kick to set up a scoring opportunity.",
984
+ "The player is shooting the ball toward the goal from outside the box.",
985
+ "The player is faking out an opponent and then crossing the ball.",
986
+ "The player is dribbling past a defender while controlling the ball."
987
+ ],
988
+ "end_time": 81.91
989
+ }
990
+ ]
991
+ },
992
+ {
993
+ "video_id": 1248,
994
+ "video_path": "data/ovobench/videos/AutoEvalMetaData/117.mp4",
995
+ "task": "ACR",
996
+ "conversations": [
997
+ {
998
+ "question": "How is the player in red interacting with the player in blue near the goal?",
999
+ "answer": "The player in red is tackling the player in blue.",
1000
+ "choices": [
1001
+ "The player in red is tackling the player in blue.",
1002
+ "The player in red is pushing the player in blue away from the goal.",
1003
+ "The player in red is trying to steal the ball from the player in blue.",
1004
+ "The player in red is blocking the player in blue."
1005
+ ],
1006
+ "end_time": 80.91
1007
+ }
1008
+ ]
1009
+ },
1010
+ {
1011
+ "video_id": 1249,
1012
+ "video_path": "data/ovobench/videos/AutoEvalMetaData/117.mp4",
1013
+ "task": "ACR",
1014
+ "conversations": [
1015
+ {
1016
+ "question": "What is the goalkeeper doing?",
1017
+ "answer": "The goalkeeper is diving.",
1018
+ "choices": [
1019
+ "The goalkeeper is diving.",
1020
+ "The goalkeeper is vaulting.",
1021
+ "The goalkeeper is stretching.",
1022
+ "The goalkeeper is plunging."
1023
+ ],
1024
+ "end_time": 81.92
1025
+ }
1026
+ ]
1027
+ },
1028
+ {
1029
+ "video_id": 1250,
1030
+ "video_path": "data/ovobench/videos/AutoEvalMetaData/97.mp4",
1031
+ "task": "ACR",
1032
+ "conversations": [
1033
+ {
1034
+ "question": "What is being done with the black mesh?",
1035
+ "answer": "It is being secured over the crate.",
1036
+ "choices": [
1037
+ "It is being laid flat on the ground.",
1038
+ "It is being secured over the crate.",
1039
+ "It is being wrapped around a piece of equipment.",
1040
+ "It is being used to cover a garden bed."
1041
+ ],
1042
+ "end_time": 207.17
1043
+ }
1044
+ ]
1045
+ },
1046
+ {
1047
+ "video_id": 1251,
1048
+ "video_path": "data/ovobench/videos/AutoEvalMetaData/97.mp4",
1049
+ "task": "ACR",
1050
+ "conversations": [
1051
+ {
1052
+ "question": "How is the wire being used?",
1053
+ "answer": "The wire is wrapped and tied to secure the mesh.",
1054
+ "choices": [
1055
+ "The wire is twisted to form a decorative pattern.",
1056
+ "The wire is wrapped and tied to secure the mesh.",
1057
+ "The wire is strung along the fence to act as a deterrent.",
1058
+ "The wire is connected to the battery to power the device."
1059
+ ],
1060
+ "end_time": 209.18
1061
+ }
1062
+ ]
1063
+ },
1064
+ {
1065
+ "video_id": 1252,
1066
+ "video_path": "data/ovobench/videos/AutoEvalMetaData/97.mp4",
1067
+ "task": "ACR",
1068
+ "conversations": [
1069
+ {
1070
+ "question": "What is he doing with the crate?",
1071
+ "answer": "He is opening the crate to release the animal.",
1072
+ "choices": [
1073
+ "He is sealing the crate shut.",
1074
+ "He is moving the crate to a different spot.",
1075
+ "He is examining the contents of the crate carefully.",
1076
+ "He is opening the crate to release the animal."
1077
+ ],
1078
+ "end_time": 432.36
1079
+ }
1080
+ ]
1081
+ },
1082
+ {
1083
+ "video_id": 1253,
1084
+ "video_path": "data/ovobench/videos/AutoEvalMetaData/97.mp4",
1085
+ "task": "ACR",
1086
+ "conversations": [
1087
+ {
1088
+ "question": "What is he doing with the animal?",
1089
+ "answer": "He is holding and petting the animal.",
1090
+ "choices": [
1091
+ "He is training the animal to follow commands.",
1092
+ "He is observing and sketching the animal.",
1093
+ "He is feeding and talking to the animal.",
1094
+ "He is holding and petting the animal."
1095
+ ],
1096
+ "end_time": 441.37
1097
+ }
1098
+ ]
1099
+ },
1100
+ {
1101
+ "video_id": 1254,
1102
+ "video_path": "data/ovobench/videos/AutoEvalMetaData/309.mp4",
1103
+ "task": "ACR",
1104
+ "conversations": [
1105
+ {
1106
+ "question": "What is he doing?",
1107
+ "answer": "He is looking at the paper.",
1108
+ "choices": [
1109
+ "He is preparing the copier.",
1110
+ "He is adjusting the copier.",
1111
+ "He is looking at the paper.",
1112
+ "He is operating the copier."
1113
+ ],
1114
+ "end_time": 58.97
1115
+ }
1116
+ ]
1117
+ },
1118
+ {
1119
+ "video_id": 1255,
1120
+ "video_path": "data/ovobench/videos/AutoEvalMetaData/309.mp4",
1121
+ "task": "ACR",
1122
+ "conversations": [
1123
+ {
1124
+ "question": "What is he doing?",
1125
+ "answer": "He is drinking water.",
1126
+ "choices": [
1127
+ "He is drinking water.",
1128
+ "He is looking at the paper.",
1129
+ "He is adjusting the copier.",
1130
+ "He is talking on the phone."
1131
+ ],
1132
+ "end_time": 66.97
1133
+ }
1134
+ ]
1135
+ },
1136
+ {
1137
+ "video_id": 1256,
1138
+ "video_path": "data/ovobench/videos/AutoEvalMetaData/12.mp4",
1139
+ "task": "ACR",
1140
+ "conversations": [
1141
+ {
1142
+ "question": "What is the boy with glasses concentrating on?",
1143
+ "answer": "He is drawing on a large piece of paper.",
1144
+ "choices": [
1145
+ "He is drawing on a large piece of paper.",
1146
+ "He is writing in his notebook.",
1147
+ "He is painting a colorful landscape.",
1148
+ "He is solving a complex puzzle."
1149
+ ],
1150
+ "end_time": 209.64
1151
+ }
1152
+ ]
1153
+ },
1154
+ {
1155
+ "video_id": 1257,
1156
+ "video_path": "data/ovobench/videos/AutoEvalMetaData/12.mp4",
1157
+ "task": "ACR",
1158
+ "conversations": [
1159
+ {
1160
+ "question": "What is he doing?",
1161
+ "answer": "He is playing football.",
1162
+ "choices": [
1163
+ "He is playing football.",
1164
+ "He is playing basketball.",
1165
+ "He is playing baseball.",
1166
+ "He is playing tennis."
1167
+ ],
1168
+ "end_time": 60.64
1169
+ }
1170
+ ]
1171
+ },
1172
+ {
1173
+ "video_id": 1258,
1174
+ "video_path": "data/ovobench/videos/AutoEvalMetaData/12.mp4",
1175
+ "task": "ACR",
1176
+ "conversations": [
1177
+ {
1178
+ "question": "What is he doing?",
1179
+ "answer": "He is playing guitar.",
1180
+ "choices": [
1181
+ "He is playing football.",
1182
+ "He is playing guitar.",
1183
+ "He is playing baseball.",
1184
+ "He is playing tennis."
1185
+ ],
1186
+ "end_time": 97.64
1187
+ }
1188
+ ]
1189
+ },
1190
+ {
1191
+ "video_id": 1259,
1192
+ "video_path": "data/ovobench/videos/AutoEvalMetaData/12.mp4",
1193
+ "task": "ACR",
1194
+ "conversations": [
1195
+ {
1196
+ "question": "What is he doing?",
1197
+ "answer": "He is playing Electronic organ.",
1198
+ "choices": [
1199
+ "He is playing football.",
1200
+ "He is playing guitar.",
1201
+ "He is playing Electronic organ.",
1202
+ "He is playing tennis."
1203
+ ],
1204
+ "end_time": 101.64
1205
+ }
1206
+ ]
1207
+ },
1208
+ {
1209
+ "video_id": 1260,
1210
+ "video_path": "data/ovobench/videos/AutoEvalMetaData/12.mp4",
1211
+ "task": "ACR",
1212
+ "conversations": [
1213
+ {
1214
+ "question": "What is he doing?",
1215
+ "answer": "He is playing chess.",
1216
+ "choices": [
1217
+ "He is playing football.",
1218
+ "He is playing guitar.",
1219
+ "He is playing Electronic organ.",
1220
+ "He is playing chess."
1221
+ ],
1222
+ "end_time": 115.64
1223
+ }
1224
+ ]
1225
+ },
1226
+ {
1227
+ "video_id": 1261,
1228
+ "video_path": "data/ovobench/videos/AutoEvalMetaData/64.mp4",
1229
+ "task": "ACR",
1230
+ "conversations": [
1231
+ {
1232
+ "question": "What is he doing?",
1233
+ "answer": "He is celebrating the goal they just scored.",
1234
+ "choices": [
1235
+ "He is preparing to shoot.",
1236
+ "He is celebrating the goal they just scored.",
1237
+ "He is looking for an open teammate to pass to.",
1238
+ "He is dribbling the ball with skill and control."
1239
+ ],
1240
+ "end_time": 64.12
1241
+ }
1242
+ ]
1243
+ },
1244
+ {
1245
+ "video_id": 1262,
1246
+ "video_path": "data/ovobench/videos/AutoEvalMetaData/64.mp4",
1247
+ "task": "ACR",
1248
+ "conversations": [
1249
+ {
1250
+ "question": "What gesture is he making to the goalkeeper?",
1251
+ "answer": "He is sidestepping.",
1252
+ "choices": [
1253
+ "He is extending his arms for balance.",
1254
+ "He is leaning forward in an aggressive stance.",
1255
+ "He is raising his foot for a kick.",
1256
+ "He is sidestepping."
1257
+ ],
1258
+ "end_time": 65.12
1259
+ }
1260
+ ]
1261
+ },
1262
+ {
1263
+ "video_id": 1263,
1264
+ "video_path": "data/ovobench/videos/AutoEvalMetaData/64.mp4",
1265
+ "task": "ACR",
1266
+ "conversations": [
1267
+ {
1268
+ "question": "What is he doing with the ball?",
1269
+ "answer": "He's driving forward with the ball.",
1270
+ "choices": [
1271
+ "He's driving forward with the ball.",
1272
+ "He's passing the ball to a teammate.",
1273
+ "He's shooting the ball towards the goal.",
1274
+ "He's juggling the ball to maintain control."
1275
+ ],
1276
+ "end_time": 100.12
1277
+ }
1278
+ ]
1279
+ },
1280
+ {
1281
+ "video_id": 1264,
1282
+ "video_path": "data/ovobench/videos/AutoEvalMetaData/64.mp4",
1283
+ "task": "ACR",
1284
+ "conversations": [
1285
+ {
1286
+ "question": "How does this player score a goal?",
1287
+ "answer": "He scored with a long shot.",
1288
+ "choices": [
1289
+ "He scored with a long shot.",
1290
+ "He found the net with a header.",
1291
+ "He slotted it home from close range.",
1292
+ "He curled it into the corner."
1293
+ ],
1294
+ "end_time": 135.82
1295
+ }
1296
+ ]
1297
+ },
1298
+ {
1299
+ "video_id": 1265,
1300
+ "video_path": "data/ovobench/videos/AutoEvalMetaData/64.mp4",
1301
+ "task": "ACR",
1302
+ "conversations": [
1303
+ {
1304
+ "question": "How does this player score a goal?",
1305
+ "answer": "He scored with a free kick.",
1306
+ "choices": [
1307
+ "He scored with a penalty kick.",
1308
+ "He scored with a free kick.",
1309
+ "He scored after a counter-attack.",
1310
+ "He scored with a header from a cross."
1311
+ ],
1312
+ "end_time": 222.12
1313
+ }
1314
+ ]
1315
+ },
1316
+ {
1317
+ "video_id": 1266,
1318
+ "video_path": "data/ovobench/videos/AutoEvalMetaData/129.mp4",
1319
+ "task": "ACR",
1320
+ "conversations": [
1321
+ {
1322
+ "question": "What is the person in the white outfit doing?",
1323
+ "answer": "Hiding behind a snowdrift,having a drink.",
1324
+ "choices": [
1325
+ "Lying down on a frozen lake, adjusting the sights on their rifle.",
1326
+ "Standing on top of a snowy hill, peering through binoculars towards the horizon.",
1327
+ "Hiding behind a snowdrift,having a drink.",
1328
+ "Hiding behind a snowdrift, setting up a camera on a tripod."
1329
+ ],
1330
+ "end_time": 193.85
1331
+ }
1332
+ ]
1333
+ },
1334
+ {
1335
+ "video_id": 1267,
1336
+ "video_path": "data/ovobench/videos/AutoEvalMetaData/129.mp4",
1337
+ "task": "ACR",
1338
+ "conversations": [
1339
+ {
1340
+ "question": "What is the person in the white outfit doing?",
1341
+ "answer": "The person is sniping the enemy opposite.",
1342
+ "choices": [
1343
+ "The individual is walking briskly.",
1344
+ "The person is tiptoeing quietly.",
1345
+ "The person is jogging energetically.",
1346
+ "The person is sniping the enemy opposite."
1347
+ ],
1348
+ "end_time": 280.85
1349
+ }
1350
+ ]
1351
+ },
1352
+ {
1353
+ "video_id": 1268,
1354
+ "video_path": "data/ovobench/videos/AutoEvalMetaData/89.mp4",
1355
+ "task": "ACR",
1356
+ "conversations": [
1357
+ {
1358
+ "question": "What is he doing with the object in his hand?",
1359
+ "answer": "He is unwrapping the chocolate bar.",
1360
+ "choices": [
1361
+ "He is unwrapping the chocolate bar.",
1362
+ "He is reading the book.",
1363
+ "He is sketching with the pencil.",
1364
+ "He is texting on his phone."
1365
+ ],
1366
+ "end_time": 439.94
1367
+ }
1368
+ ]
1369
+ },
1370
+ {
1371
+ "video_id": 1269,
1372
+ "video_path": "data/ovobench/videos/AutoEvalMetaData/89.mp4",
1373
+ "task": "ACR",
1374
+ "conversations": [
1375
+ {
1376
+ "question": "What are they doing?",
1377
+ "answer": "Arm wrestle.",
1378
+ "choices": [
1379
+ "holding the object towards the camera while nodding his head.",
1380
+ "displaying the object palm upwards with a slight tilt towards the lens.",
1381
+ "raising the object with one hand while circling it with the other.",
1382
+ "Arm wrestle."
1383
+ ],
1384
+ "end_time": 539.94
1385
+ }
1386
+ ]
1387
+ },
1388
+ {
1389
+ "video_id": 1270,
1390
+ "video_path": "data/ovobench/videos/AutoEvalMetaData/89.mp4",
1391
+ "task": "ACR",
1392
+ "conversations": [
1393
+ {
1394
+ "question": "In this moment, what is his posture while interacting with the person opposite him?",
1395
+ "answer": "He is lying in bed, leaning against the headboard.",
1396
+ "choices": [
1397
+ "He is sitting on the rug, facing the other person directly.",
1398
+ "He is sitting cross-legged on the floor.",
1399
+ "He is lying in bed, leaning against the headboard.",
1400
+ "He is reclining on the sofa, propping himself up with one elbow."
1401
+ ],
1402
+ "end_time": 445.95
1403
+ }
1404
+ ]
1405
+ },
1406
+ {
1407
+ "video_id": 1271,
1408
+ "video_path": "data/ovobench/videos/AutoEvalMetaData/49.mp4",
1409
+ "task": "ACR",
1410
+ "conversations": [
1411
+ {
1412
+ "question": "What is he doing with the mug?",
1413
+ "answer": "He is stirring the mug quickly.",
1414
+ "choices": [
1415
+ "He is washing the mug under the tap.",
1416
+ "He is stirring the mug quickly.",
1417
+ "He is filling the mug with coffee.",
1418
+ "He is drying the mug with a towel."
1419
+ ],
1420
+ "end_time": 402.23
1421
+ }
1422
+ ]
1423
+ },
1424
+ {
1425
+ "video_id": 1272,
1426
+ "video_path": "data/ovobench/videos/AutoEvalMetaData/49.mp4",
1427
+ "task": "ACR",
1428
+ "conversations": [
1429
+ {
1430
+ "question": "What is he doing?",
1431
+ "answer": "He is taking a sip from the mug.",
1432
+ "choices": [
1433
+ "He is typing on a laptop keyboard.",
1434
+ "He is sketching a portrait with a pencil.",
1435
+ "He is reading a book intently.",
1436
+ "He is taking a sip from the mug."
1437
+ ],
1438
+ "end_time": 425.76
1439
+ }
1440
+ ]
1441
+ },
1442
+ {
1443
+ "video_id": 1273,
1444
+ "video_path": "data/ovobench/videos/AutoEvalMetaData/115.mp4",
1445
+ "task": "ACR",
1446
+ "conversations": [
1447
+ {
1448
+ "question": "What is he doing with the jalapeños?",
1449
+ "answer": "He is grilling them.",
1450
+ "choices": [
1451
+ "He is roasting them.",
1452
+ "He is pickling them.",
1453
+ "He is grilling them.",
1454
+ "He is stuffing them."
1455
+ ],
1456
+ "end_time": 278.69
1457
+ }
1458
+ ]
1459
+ },
1460
+ {
1461
+ "video_id": 1274,
1462
+ "video_path": "data/ovobench/videos/AutoEvalMetaData/115.mp4",
1463
+ "task": "ACR",
1464
+ "conversations": [
1465
+ {
1466
+ "question": "What is he doing?",
1467
+ "answer": "He is cutting chili peppers.",
1468
+ "choices": [
1469
+ "He is lifting it with a pair of tongs.",
1470
+ "He is cutting chili peppers.",
1471
+ "He is using a spatula to transfer it.",
1472
+ "He is cradling it gently with both hands."
1473
+ ],
1474
+ "end_time": 152.69
1475
+ }
1476
+ ]
1477
+ },
1478
+ {
1479
+ "video_id": 1275,
1480
+ "video_path": "data/ovobench/videos/AutoEvalMetaData/110.mp4",
1481
+ "task": "ACR",
1482
+ "conversations": [
1483
+ {
1484
+ "question": "What is he doing with his hand?",
1485
+ "answer": "He is gesturing.",
1486
+ "choices": [
1487
+ "He is gesturing.",
1488
+ "He is shaking hands.",
1489
+ "He is clapping.",
1490
+ "He is snapping his fingers."
1491
+ ],
1492
+ "end_time": 47.8
1493
+ }
1494
+ ]
1495
+ },
1496
+ {
1497
+ "video_id": 1276,
1498
+ "video_path": "data/ovobench/videos/AutoEvalMetaData/110.mp4",
1499
+ "task": "ACR",
1500
+ "conversations": [
1501
+ {
1502
+ "question": "What is she doing?",
1503
+ "answer": "She is standing still.",
1504
+ "choices": [
1505
+ "She is standing still.",
1506
+ "She is dancing while sipping.",
1507
+ "She is jogging slowly.",
1508
+ "She is pacing back and forth."
1509
+ ],
1510
+ "end_time": 49.8
1511
+ }
1512
+ ]
1513
+ },
1514
+ {
1515
+ "video_id": 1277,
1516
+ "video_path": "data/ovobench/videos/AutoEvalMetaData/110.mp4",
1517
+ "task": "ACR",
1518
+ "conversations": [
1519
+ {
1520
+ "question": "How does he hold the glass while drinking?",
1521
+ "answer": "He holds the glass with one hand near his mouth, the other hand is on the phone.",
1522
+ "choices": [
1523
+ "He clasps the glass with a firm hold around the top, bringing it to his lips.",
1524
+ "He holds the glass with one hand near his mouth, the other hand is on the phone.",
1525
+ "He holds the glass at mid-height with a relaxed grip as he drinks.",
1526
+ "He grips the glass with both hands at the base while sipping."
1527
+ ],
1528
+ "end_time": 55.8
1529
+ }
1530
+ ]
1531
+ },
1532
+ {
1533
+ "video_id": 1278,
1534
+ "video_path": "data/ovobench/videos/AutoEvalMetaData/150.mp4",
1535
+ "task": "ACR",
1536
+ "conversations": [
1537
+ {
1538
+ "question": "What is he doing?",
1539
+ "answer": "Aiming an M416 rifle at the target",
1540
+ "choices": [
1541
+ "Sketching a landscape with charcoal",
1542
+ "Throwing a baseball towards the batter",
1543
+ "He is driving the car.",
1544
+ "Aiming an M416 rifle at the target"
1545
+ ],
1546
+ "end_time": 97.85
1547
+ }
1548
+ ]
1549
+ },
1550
+ {
1551
+ "video_id": 1279,
1552
+ "video_path": "data/ovobench/videos/AutoEvalMetaData/150.mp4",
1553
+ "task": "ACR",
1554
+ "conversations": [
1555
+ {
1556
+ "question": "What is he doing?",
1557
+ "answer": "He is driving the car.",
1558
+ "choices": [
1559
+ "He is driving the car.",
1560
+ "He is aiming an M416 rifle at the target.",
1561
+ "He is playing the guitar.",
1562
+ "He is cooking in the kitchen."
1563
+ ],
1564
+ "end_time": 77.85
1565
+ }
1566
+ ]
1567
+ },
1568
+ {
1569
+ "video_id": 1280,
1570
+ "video_path": "data/ovobench/videos/AutoEvalMetaData/150.mp4",
1571
+ "task": "ACR",
1572
+ "conversations": [
1573
+ {
1574
+ "question": "How is he positioned while shot at from the left?",
1575
+ "answer": "He is crouching behind a vehicle.",
1576
+ "choices": [
1577
+ "He is standing behind a dumpster.",
1578
+ "He is partially hidden in a doorway.",
1579
+ "He is crouching behind a bush.",
1580
+ "He is crouching behind a vehicle."
1581
+ ],
1582
+ "end_time": 103.84
1583
+ }
1584
+ ]
1585
+ },
1586
+ {
1587
+ "video_id": 1281,
1588
+ "video_path": "data/ovobench/videos/AutoEvalMetaData/217.mp4",
1589
+ "task": "ACR",
1590
+ "conversations": [
1591
+ {
1592
+ "question": "What is he doing with the bottle?",
1593
+ "answer": "He puts the bottle on the table.",
1594
+ "choices": [
1595
+ "He shakes the bottle vigorously.",
1596
+ "He puts the bottle on the table.",
1597
+ "He picks up the bottle from the floor.",
1598
+ "He hands the bottle to a friend."
1599
+ ],
1600
+ "end_time": 333.0
1601
+ }
1602
+ ]
1603
+ },
1604
+ {
1605
+ "video_id": 1282,
1606
+ "video_path": "data/ovobench/videos/AutoEvalMetaData/217.mp4",
1607
+ "task": "ACR",
1608
+ "conversations": [
1609
+ {
1610
+ "question": "What did he do with the clothes?",
1611
+ "answer": "He picked up the clothes.",
1612
+ "choices": [
1613
+ "He sorted the clothes by color.",
1614
+ "He hung the clothes back up.",
1615
+ "He folded the clothes.",
1616
+ "He picked up the clothes."
1617
+ ],
1618
+ "end_time": 354.0
1619
+ }
1620
+ ]
1621
+ },
1622
+ {
1623
+ "video_id": 1283,
1624
+ "video_path": "data/ovobench/videos/AutoEvalMetaData/217.mp4",
1625
+ "task": "ACR",
1626
+ "conversations": [
1627
+ {
1628
+ "question": "What is he doing with his shoe?",
1629
+ "answer": "He is tying his shoelaces.",
1630
+ "choices": [
1631
+ "He is examining a small scratch on the sole.",
1632
+ "He is tying his shoelaces.",
1633
+ "He is polishing his shoe with a cloth.",
1634
+ "He is removing a stone from the tread."
1635
+ ],
1636
+ "end_time": 358.48
1637
+ }
1638
+ ]
1639
+ },
1640
+ {
1641
+ "video_id": 1284,
1642
+ "video_path": "data/ovobench/videos/AutoEvalMetaData/256.mp4",
1643
+ "task": "ACR",
1644
+ "conversations": [
1645
+ {
1646
+ "question": "What is she doing?",
1647
+ "answer": "She is working with computer.",
1648
+ "choices": [
1649
+ "She is working with computer.",
1650
+ "She is holding her hands together.",
1651
+ "She is waving with both hands.",
1652
+ "She is forming a heart shape with her fingers."
1653
+ ],
1654
+ "end_time": 87.75
1655
+ }
1656
+ ]
1657
+ }
1658
+ ]
ovo_bench_new.json ADDED
The diff for this file is too large to render. See raw diff
 
transfer_annotation_format.py ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json, argparse
2
+
3
+ class Transfer:
4
+ @staticmethod
5
+ def format_crr(datum: dict):
6
+ question = f"""You're responsible of answering questions based on the video content. The following question are relevant to the latest frames, i.e. the end of the video.\n\n{datum['question']}\n\nDecide whether existing visual content, especially latest frames, i.e frames that near the end of the video, provide enough information for answering the question.\nReturn "Yes" if existing visual content has provided enough information;\nReturn "No" otherwise."""
7
+ options = ["No", "Yes"]
8
+ video_start = datum['ask_time']
9
+ annos = [dict(
10
+ id=datum['id'],
11
+ task=datum['task'],
12
+ question=question,
13
+ # options=options,
14
+ video_start=video_start,
15
+ video_end=test_info['realtime'],
16
+ answer=options[test_info['type']],
17
+ video=datum['video'],
18
+ ) for i, test_info in enumerate(datum['test_info'])]
19
+ return annos
20
+
21
+ @staticmethod
22
+ def format_rec(datum: dict):
23
+ question = f"""You're watching a video in which people may perform a certaintype of action repetitively. The person performing are referred to as 'they' in the following statement. You're task is to count how many times did different people in the video perform this kind of action in total.\nNow, answer the following question:\n\nHow many times did they {datum['activity']}?\n\nYour response type should be INT, for example, 0/1/2/3.."""
24
+ options = [str(i) for i in range(11)]
25
+ annos = [dict(
26
+ id=datum['id'],
27
+ task=datum['task'],
28
+ question=question,
29
+ # options=options,
30
+ video_start=0,
31
+ video_end=test_info['realtime'],
32
+ answer=options[test_info['count']],
33
+ video=datum['video'],
34
+ ) for i, test_info in enumerate(datum['test_info'])]
35
+ return annos
36
+
37
+ @staticmethod
38
+ def format_ssr(datum):
39
+ options = ["No", "Yes"]
40
+ annos = [dict(
41
+ id=datum['id'],
42
+ task=datum['task'],
43
+ question=f"""You're watching a tutorial video which contain a sequential of steps. The following is one step from the whole procedures:\n\n{test_info['step']}\n\nYour task is to decide: Is the man/woman in the video currently carrying out this step?\nReturn "Yes" if the man/woman in the video is currently performing this step;\nReturn "No" if not.""",
44
+ # options=options,
45
+ video_start=0,
46
+ video_end=test_info['realtime'],
47
+ answer=options[test_info['type']],
48
+ video=datum['video'],
49
+ ) for i, test_info in enumerate(datum['test_info'])]
50
+ return annos
51
+
52
+ @staticmethod
53
+ def format_other(datum):
54
+ datum['video_start'] = 0
55
+ datum['video_end'] = datum.pop('realtime')
56
+ choices = ['A', 'B', 'C', 'D', 'E']
57
+ datum['options'] = [f'{choices[i]}. {option}' for i, option in enumerate(datum['options'])]
58
+ datum['answer'] = choices[datum.pop('gt')]
59
+ return datum
60
+
61
+ if __name__ == '__main__':
62
+ parser = argparse.ArgumentParser(description="Format OVO-Bench dataset JSONL file.")
63
+ parser.add_argument("--input", "-i", type=str, required=True, help="Path to input JSONL file.")
64
+ parser.add_argument("--output", "-o", type=str, required=True, help="Path to save formatted JSONL file.")
65
+ args = parser.parse_args()
66
+
67
+ annos = []
68
+ for datum in json.load(open(args.input)):
69
+ if hasattr(Transfer, 'format_' + datum['task'].lower()):
70
+ formatter = getattr(Transfer, 'format_' + datum['task'].lower())
71
+ annos.extend(formatter(datum))
72
+ else:
73
+ formatter = Transfer.format_other
74
+ annos.append(formatter(datum))
75
+
76
+ with open(args.output, 'w') as f:
77
+ for anno in annos:
78
+ f.write(json.dumps(anno) + '\n')
transform.py ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ from collections import defaultdict
3
+
4
+ def transform_ovo_to_movienet_format(input_file, output_file):
5
+ """
6
+ 将 ovo_bench_new.json 转换为类似 movienet_oe.json 的格式
7
+
8
+ 规则:
9
+ 1. 按 video 分组,将同一视频的问答合并到 conversations 列表
10
+ 2. 过滤掉 task 为 "REC", "SSR", "CRR" 的样本
11
+ 3. 保留每个样本的其他 keys
12
+ """
13
+ # 读取原始数据
14
+ with open(input_file, 'r', encoding='utf-8') as f:
15
+ data = json.load(f)
16
+
17
+ # 过滤掉指定的 task 类型
18
+ filtered_data = [
19
+ item for item in data
20
+ if item.get('task') not in ['REC', 'SSR', 'CRR']
21
+ ]
22
+
23
+ print(f"原始样本数: {len(data)}")
24
+ print(f"过滤后样本数: {len(filtered_data)}")
25
+
26
+ new_data = []
27
+ for sample_dict in filtered_data:
28
+ new_dict = {}
29
+ new_dict['video_id'] = sample_dict['id']
30
+ new_dict['video_path'] = 'data/ovobench/videos/' + sample_dict['video']
31
+ new_dict['task'] = sample_dict['task']
32
+ new_dict['conversations'] = []
33
+ conv = {
34
+ "question": sample_dict['question'],
35
+ "answer": sample_dict['options'][sample_dict['gt']],
36
+ "choices": sample_dict['options'],
37
+ "end_time": sample_dict['realtime'],
38
+ }
39
+ new_dict['conversations'].append(conv)
40
+ new_data.append(new_dict)
41
+
42
+ with open(output_file, 'w', encoding='utf-8') as f:
43
+ json.dump(new_data, f, ensure_ascii=False, indent=4)
44
+
45
+ if __name__ == "__main__":
46
+ input_file = "ovo_bench_new.json"
47
+ output_file = "ovobench_formatted.json"
48
+
49
+ transform_ovo_to_movienet_format(input_file, output_file)
videos/AutoEvalMetaData/14.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:20f73aead75bd60ca51f93a76e0567eb84e75841e4a6c69981852019c4fc78a5
3
+ size 96229381
videos/AutoEvalMetaData/274.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e7465815020539ec3ff0bf4a6edeb057e0951d00748064907a29c0ac95fb3f2d
3
+ size 71984960
videos/AutoEvalMetaData/294.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6f994ce9bcb79e5aa53a192d5b1b9ccb1ad283b4b70043c37c122a4145352a03
3
+ size 185614401
videos/AutoEvalMetaData/48.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9c675181b556ed3e6348cd150b15ca218578d186bc21123c6f4c878792f62555
3
+ size 125771518
videos/AutoEvalMetaData/89.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:db96dbe518c137d6b74e26c03a38a29e8dbb9f255e47d033a429d4e8b188d60b
3
+ size 92348444
videos/COIN/2vzC30892IE.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bb121f9f7042ee31ce37637bc957af5cf2762cf4718e499baff2202147703b75
3
+ size 49859144
videos/COIN/8tZUVfEyytY.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f39409d742528be3a15390e648d1e5cf74ac3cd626acde2c8d76cf7d0c9a393f
3
+ size 33376966
videos/COIN/BM8H9eE6jhI.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bc56c2491754d124bc073fdff3b4fc534b8151dc77044ebecc73e7b61b4b1caf
3
+ size 23445126
videos/COIN/Hf2AisK1wHY.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6319c47a0fc6077921cecefe8b1333514b519ddbdd8288b99a55126231b89091
3
+ size 32396096
videos/COIN/N_qj_iSrF64.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:75ddb40a8165604f42b5e6133ce0a0fd1c779089c30f5194ae59d0ebc5047271
3
+ size 21505521
videos/COIN/Q-uCII9Rtz0.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dacb3603a0b9ae09ca4964424a2b512384db73c972eec64635b6e5c795d7c09e
3
+ size 16878250
videos/COIN/QInuE1KwCZM.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:08e83668372a0792b0968acce9bcd108b995186e13be49617ee497a2ee5aa87a
3
+ size 21811363
videos/COIN/UBq35JWbBG0.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:44204e9e51edb48ee6e7d6340a32a6956d6bbe52cbf109b4b88a1b9b17b8667a
3
+ size 60396817
videos/COIN/V7SKv1tHWZ4.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4e536557941bc9d2c2a56b41ee7ef2993899795565088fa29a166f6137996222
3
+ size 56180064
videos/COIN/fHv947jR_6U.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fa70e064b9f8ff3d7e3fc8d4755ff2a4c99aeb647a5305a979f4e03819ce51a9
3
+ size 5970468
videos/COIN/xCJmXGMl54I.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dbd6b239ff45b7a0d8ed2238277be5bb3bd324a8baf34e87620ef1cbdbc3cac6
3
+ size 33607450
videos/COIN/xcpffGBVi8Y.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:875822ca48b7771783069d27e2568173ef3b5cb7ac660126116b044412a4d98d
3
+ size 28683556
videos/YouTube_Games/8gtpP2Toigg&list=PL3bIgEVl_gDzIfuotDjHjcXzUkB6ZvMXk&index=2.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dbb703475b1f1066e477d2b146ed2c0f4427e617812f31c80db7d99b5f17546e
3
+ size 639647939
videos/YouTube_Games/PLJ3VIGhVd3r8Int6IZT_v3S_BzG9RVfiG&index=2.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:de33c92278cfadc500e4686fdeedd36b6a3731ceac31205fc2fa59a8caac4863
3
+ size 786663462
videos/YouTube_Games/PLJ3VIGhVd3r8Int6IZT_v3S_BzG9RVfiG&index=3.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c92af9256a41e75bdb2b6c0fc0f3e944d83fbc9c6522e2c98e995f9513629893
3
+ size 767301783
videos/YouTube_Games/PLJ3VIGhVd3r8Int6IZT_v3S_BzG9RVfiG&index=4.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:af95249b84c79632609ce33a84e540a5cd250197ed12fa557685bb798749c8e6
3
+ size 736556851
videos/YouTube_Games/PLJ3VIGhVd3r8Int6IZT_v3S_BzG9RVfiG&index=7.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ddbcb6ddd2935deff6dc257822a59392df200d46d74d48639d5f74690a4730f4
3
+ size 937609758
videos/YouTube_Games/QRJDDR_z3Lk&list=PL3bIgEVl_gDzIfuotDjHjcXzUkB6ZvMXk&index=5.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3d1717242e05754919d21c915aad4f42ef3508677dc46636c9e13cf7cf0d5f68
3
+ size 731996074
videos/YouTube_Games/RiIYSrrwjuU&list=PL3bIgEVl_gDzIfuotDjHjcXzUkB6ZvMXk&index=6.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dfbcff44eb03e5c940ae1f97e240efb1ce292281b4ebae3512eaa1c796f96403
3
+ size 424600325
videos/YouTube_Games/m-7k8KfT9s8&list=PL3bIgEVl_gDzIfuotDjHjcXzUkB6ZvMXk&index=8.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:67e96100777e5d96443d6befab020c43ba16f9fb5e3136fc29bff6edae815da0
3
+ size 502886103
videos/cross_task/VS4zWF9hQpQ.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b94a00c61f23c6c3564368cf33bc3cb685484c22d9ed90040e846f1fad0b8515
3
+ size 18409845
videos/cross_task/bQBNnsExUlg.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:817dfc7f9ec67a19c962f41d7c5a84d29cbc7299f576d5238ee9bce338137608
3
+ size 14381897
videos/cross_task/k5_89KhvpK4.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:08d46e26b4e292e2702fffe8bfde030b556732ecb624dd0cccf2183b6515595b
3
+ size 25221972
videos/ovo-bench-formatted.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
videos/perception_test/video_10889.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0943399cfcf3f05add2daa5053e9c21a051b82d782a89324256e0007c87c3454
3
+ size 13431199
videos/perception_test/video_5670.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b5596dbdbbd4c12f2312cbfdc56338c17587275cf3506b54eb4910844b53cb45
3
+ size 24188915
videos/perception_test/video_9789.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c7cdd237585a488933575d582baf9a94b5a36a274fe60d6990fb7099aa1367e9
3
+ size 25312173
videos/star/WRW74.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1e015e4d850cc8a6337834db90db36140db15e684e7d7f3392211358b55273b3
3
+ size 21657021
videos/thumos/thumos15_video_validation_0000405.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d957198ed75bf84077445425db9ff736915feafe917765d8f22a39503e3b6068
3
+ size 23429173
videos/thumos/thumos15_video_validation_0000435.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0a881914e41a2cda83b89fc05b584018c3065ba5c7bec21eeddaba8d9d8d0c6a
3
+ size 27994004
videos/thumos/thumos15_video_validation_0000442.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2abe3b9e91ed0b39774d1a083cf849912679499cede9a8a82bd8b2a10ec66e8b
3
+ size 50201314
videos/thumos/thumos15_video_validation_0000889.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:305d2b14c8a3335090e64b4f9b074d57a6bf07bace084559a7edb2ae48da9b30
3
+ size 73703612
videos/thumos/thumos15_video_validation_0001752.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dffdffd836498fa3100a1c4d648dadc78d40c572147db1d078930553e8d4f14a
3
+ size 5200154
videos/thumos/thumos15_video_validation_0001932.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d9ea0f695b4d11ef189366c6d8ac1be201102b07624754e696a06069ea3d3c29
3
+ size 75997974
videos/thumos/video_test_0000324.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cc61510f17bd488424125db15380d7c5ba7f296602f2453c96a6808206eb134e
3
+ size 75663244
videos/thumos/video_test_0000357.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6edb8505d0c5b836f6b22976ae368d8bfa1132d526ffde2115d8ddf1e2e630e8
3
+ size 33664068
videos/thumos/video_test_0001072.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6d69082c0f054d7037425a331dd899a63106c50539722c8d182a32714361fbef
3
+ size 130956498
videos/thumos/video_test_0001129.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7aff6f33265b35df2343321a04a22731bce485240a5f1ced486028e446fa1d97
3
+ size 21328563
videos/thumos/video_test_0001452.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d17aae1328fb200259593a9ef0fb805cd25fc4fb4dafc5db104d5abf39447931
3
+ size 21648971