ibru commited on
Commit
a5b9e8e
·
verified ·
1 Parent(s): b57e4f8

Upload folder using huggingface_hub

Browse files
Files changed (1) hide show
  1. meta/data_config.py +779 -0
meta/data_config.py ADDED
@@ -0,0 +1,779 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ from abc import ABC, abstractmethod
17
+
18
+ from gr00t.data.dataset import ModalityConfig
19
+ from gr00t.data.transform.base import ComposedModalityTransform, ModalityTransform
20
+ from gr00t.data.transform.concat import ConcatTransform
21
+ from gr00t.data.transform.state_action import StateActionToTensor, StateActionTransform
22
+ from gr00t.data.transform.video import (
23
+ VideoColorJitter,
24
+ VideoCrop,
25
+ VideoResize,
26
+ VideoToNumpy,
27
+ VideoToTensor,
28
+ )
29
+ from gr00t.model.transforms import GR00TTransform
30
+
31
+
32
+ class BaseDataConfig(ABC):
33
+ @abstractmethod
34
+ def modality_config(self) -> dict[str, ModalityConfig]:
35
+ pass
36
+
37
+ @abstractmethod
38
+ def transform(self) -> ModalityTransform:
39
+ pass
40
+
41
+
42
+ ###########################################################################################
43
+
44
+
45
+ class Gr1ArmsOnlyDataConfig(BaseDataConfig):
46
+ video_keys = ["video.ego_view"]
47
+ state_keys = [
48
+ "state.left_arm",
49
+ "state.right_arm",
50
+ "state.left_hand",
51
+ "state.right_hand",
52
+ ]
53
+ action_keys = [
54
+ "action.left_arm",
55
+ "action.right_arm",
56
+ "action.left_hand",
57
+ "action.right_hand",
58
+ ]
59
+ language_keys = ["annotation.human.action.task_description"]
60
+ observation_indices = [0]
61
+ action_indices = list(range(16))
62
+
63
+ def modality_config(self) -> dict[str, ModalityConfig]:
64
+ video_modality = ModalityConfig(
65
+ delta_indices=self.observation_indices,
66
+ modality_keys=self.video_keys,
67
+ )
68
+
69
+ state_modality = ModalityConfig(
70
+ delta_indices=self.observation_indices,
71
+ modality_keys=self.state_keys,
72
+ )
73
+
74
+ action_modality = ModalityConfig(
75
+ delta_indices=self.action_indices,
76
+ modality_keys=self.action_keys,
77
+ )
78
+
79
+ language_modality = ModalityConfig(
80
+ delta_indices=self.observation_indices,
81
+ modality_keys=self.language_keys,
82
+ )
83
+
84
+ modality_configs = {
85
+ "video": video_modality,
86
+ "state": state_modality,
87
+ "action": action_modality,
88
+ "language": language_modality,
89
+ }
90
+
91
+ return modality_configs
92
+
93
+ def transform(self) -> ModalityTransform:
94
+ transforms = [
95
+ # video transforms
96
+ VideoToTensor(apply_to=self.video_keys),
97
+ VideoCrop(apply_to=self.video_keys, scale=0.95),
98
+ VideoResize(apply_to=self.video_keys, height=224, width=224, interpolation="linear"),
99
+ VideoColorJitter(
100
+ apply_to=self.video_keys,
101
+ brightness=0.3,
102
+ contrast=0.4,
103
+ saturation=0.5,
104
+ hue=0.08,
105
+ ),
106
+ VideoToNumpy(apply_to=self.video_keys),
107
+ # state transforms
108
+ StateActionToTensor(apply_to=self.state_keys),
109
+ StateActionTransform(
110
+ apply_to=self.state_keys,
111
+ normalization_modes={key: "min_max" for key in self.state_keys},
112
+ ),
113
+ # action transforms
114
+ StateActionToTensor(apply_to=self.action_keys),
115
+ StateActionTransform(
116
+ apply_to=self.action_keys,
117
+ normalization_modes={key: "min_max" for key in self.action_keys},
118
+ ),
119
+ # concat transforms
120
+ ConcatTransform(
121
+ video_concat_order=self.video_keys,
122
+ state_concat_order=self.state_keys,
123
+ action_concat_order=self.action_keys,
124
+ ),
125
+ # model-specific transform
126
+ GR00TTransform(
127
+ state_horizon=len(self.observation_indices),
128
+ action_horizon=len(self.action_indices),
129
+ max_state_dim=64,
130
+ max_action_dim=32,
131
+ ),
132
+ ]
133
+ return ComposedModalityTransform(transforms=transforms)
134
+
135
+
136
+ ###########################################################################################
137
+
138
+
139
+ class So100DataConfig(BaseDataConfig):
140
+ video_keys = ["video.webcam"]
141
+ state_keys = ["state.single_arm", "state.gripper"]
142
+ action_keys = ["action.single_arm", "action.gripper"]
143
+ language_keys = ["annotation.human.task_description"]
144
+ observation_indices = [0]
145
+ action_indices = list(range(16))
146
+
147
+ def modality_config(self) -> dict[str, ModalityConfig]:
148
+ video_modality = ModalityConfig(
149
+ delta_indices=self.observation_indices,
150
+ modality_keys=self.video_keys,
151
+ )
152
+
153
+ state_modality = ModalityConfig(
154
+ delta_indices=self.observation_indices,
155
+ modality_keys=self.state_keys,
156
+ )
157
+
158
+ action_modality = ModalityConfig(
159
+ delta_indices=self.action_indices,
160
+ modality_keys=self.action_keys,
161
+ )
162
+
163
+ language_modality = ModalityConfig(
164
+ delta_indices=self.observation_indices,
165
+ modality_keys=self.language_keys,
166
+ )
167
+
168
+ modality_configs = {
169
+ "video": video_modality,
170
+ "state": state_modality,
171
+ "action": action_modality,
172
+ "language": language_modality,
173
+ }
174
+
175
+ return modality_configs
176
+
177
+ def transform(self) -> ModalityTransform:
178
+ transforms = [
179
+ # video transforms
180
+ VideoToTensor(apply_to=self.video_keys),
181
+ VideoCrop(apply_to=self.video_keys, scale=0.95),
182
+ VideoResize(apply_to=self.video_keys, height=224, width=224, interpolation="linear"),
183
+ VideoColorJitter(
184
+ apply_to=self.video_keys,
185
+ brightness=0.3,
186
+ contrast=0.4,
187
+ saturation=0.5,
188
+ hue=0.08,
189
+ ),
190
+ VideoToNumpy(apply_to=self.video_keys),
191
+ # state transforms
192
+ StateActionToTensor(apply_to=self.state_keys),
193
+ StateActionTransform(
194
+ apply_to=self.state_keys,
195
+ normalization_modes={key: "min_max" for key in self.state_keys},
196
+ ),
197
+ # action transforms
198
+ StateActionToTensor(apply_to=self.action_keys),
199
+ StateActionTransform(
200
+ apply_to=self.action_keys,
201
+ normalization_modes={key: "min_max" for key in self.action_keys},
202
+ ),
203
+ # concat transforms
204
+ ConcatTransform(
205
+ video_concat_order=self.video_keys,
206
+ state_concat_order=self.state_keys,
207
+ action_concat_order=self.action_keys,
208
+ ),
209
+ # model-specific transform
210
+ GR00TTransform(
211
+ state_horizon=len(self.observation_indices),
212
+ action_horizon=len(self.action_indices),
213
+ max_state_dim=64,
214
+ max_action_dim=32,
215
+ ),
216
+ ]
217
+ return ComposedModalityTransform(transforms=transforms)
218
+
219
+
220
+ ###########################################################################################
221
+
222
+
223
+ class Gr1FullUpperBodyDataConfig(BaseDataConfig):
224
+ video_keys = ["video.front_view"]
225
+ state_keys = [
226
+ "state.left_arm",
227
+ "state.right_arm",
228
+ "state.left_hand",
229
+ "state.right_hand",
230
+ "state.waist",
231
+ "state.neck",
232
+ ]
233
+ action_keys = [
234
+ "action.left_arm",
235
+ "action.right_arm",
236
+ "action.left_hand",
237
+ "action.right_hand",
238
+ "action.waist",
239
+ "action.neck",
240
+ ]
241
+ language_keys = ["annotation.human.action.task_description"]
242
+ observation_indices = [0]
243
+ action_indices = list(range(16))
244
+
245
+ def modality_config(self):
246
+ video_modality = ModalityConfig(
247
+ delta_indices=self.observation_indices,
248
+ modality_keys=self.video_keys,
249
+ )
250
+ state_modality = ModalityConfig(
251
+ delta_indices=self.observation_indices,
252
+ modality_keys=self.state_keys,
253
+ )
254
+ action_modality = ModalityConfig(
255
+ delta_indices=self.action_indices,
256
+ modality_keys=self.action_keys,
257
+ )
258
+ language_modality = ModalityConfig(
259
+ delta_indices=self.observation_indices,
260
+ modality_keys=self.language_keys,
261
+ )
262
+ modality_configs = {
263
+ "video": video_modality,
264
+ "state": state_modality,
265
+ "action": action_modality,
266
+ "language": language_modality,
267
+ }
268
+ return modality_configs
269
+
270
+ def transform(self):
271
+ transforms = [
272
+ # video transforms
273
+ VideoToTensor(apply_to=self.video_keys),
274
+ VideoCrop(apply_to=self.video_keys, scale=0.95),
275
+ VideoResize(apply_to=self.video_keys, height=224, width=224, interpolation="linear"),
276
+ VideoColorJitter(
277
+ apply_to=self.video_keys,
278
+ brightness=0.3,
279
+ contrast=0.4,
280
+ saturation=0.5,
281
+ hue=0.08,
282
+ ),
283
+ VideoToNumpy(apply_to=self.video_keys),
284
+ # state transforms
285
+ StateActionToTensor(apply_to=self.state_keys),
286
+ StateActionTransform(
287
+ apply_to=self.state_keys,
288
+ normalization_modes={
289
+ "state.left_arm": "min_max",
290
+ "state.right_arm": "min_max",
291
+ "state.left_hand": "min_max",
292
+ "state.right_hand": "min_max",
293
+ "state.waist": "min_max",
294
+ "state.neck": "min_max",
295
+ },
296
+ ),
297
+ # action transforms
298
+ StateActionToTensor(apply_to=self.action_keys),
299
+ StateActionTransform(
300
+ apply_to=self.action_keys,
301
+ normalization_modes={
302
+ "action.right_arm": "min_max",
303
+ "action.left_arm": "min_max",
304
+ "action.right_hand": "min_max",
305
+ "action.left_hand": "min_max",
306
+ "action.waist": "min_max",
307
+ "action.neck": "min_max",
308
+ },
309
+ ),
310
+ # concat transforms
311
+ ConcatTransform(
312
+ video_concat_order=self.video_keys,
313
+ state_concat_order=self.state_keys,
314
+ action_concat_order=self.action_keys,
315
+ ),
316
+ GR00TTransform(
317
+ state_horizon=len(self.observation_indices),
318
+ action_horizon=len(self.action_indices),
319
+ max_state_dim=64,
320
+ max_action_dim=32,
321
+ ),
322
+ ]
323
+
324
+ return ComposedModalityTransform(transforms=transforms)
325
+
326
+
327
+ ###########################################################################################
328
+
329
+
330
+ class BimanualPandaGripperDataConfig(BaseDataConfig):
331
+ video_keys = [
332
+ "video.right_wrist_view",
333
+ "video.left_wrist_view",
334
+ "video.front_view",
335
+ ]
336
+ state_keys = [
337
+ "state.right_arm_eef_pos",
338
+ "state.right_arm_eef_quat",
339
+ "state.right_gripper_qpos",
340
+ "state.left_arm_eef_pos",
341
+ "state.left_arm_eef_quat",
342
+ "state.left_gripper_qpos",
343
+ ]
344
+ action_keys = [
345
+ "action.right_arm_eef_pos",
346
+ "action.right_arm_eef_rot",
347
+ "action.right_gripper_close",
348
+ "action.left_arm_eef_pos",
349
+ "action.left_arm_eef_rot",
350
+ "action.left_gripper_close",
351
+ ]
352
+
353
+ language_keys = ["annotation.human.action.task_description"]
354
+ observation_indices = [0]
355
+ action_indices = list(range(16))
356
+
357
+ def modality_config(self):
358
+ video_modality = ModalityConfig(
359
+ delta_indices=self.observation_indices,
360
+ modality_keys=self.video_keys,
361
+ )
362
+ state_modality = ModalityConfig(
363
+ delta_indices=self.observation_indices,
364
+ modality_keys=self.state_keys,
365
+ )
366
+ action_modality = ModalityConfig(
367
+ delta_indices=self.action_indices,
368
+ modality_keys=self.action_keys,
369
+ )
370
+ language_modality = ModalityConfig(
371
+ delta_indices=self.observation_indices,
372
+ modality_keys=self.language_keys,
373
+ )
374
+ modality_configs = {
375
+ "video": video_modality,
376
+ "state": state_modality,
377
+ "action": action_modality,
378
+ "language": language_modality,
379
+ }
380
+ return modality_configs
381
+
382
+ def transform(self):
383
+ transforms = [
384
+ # video transforms
385
+ VideoToTensor(apply_to=self.video_keys),
386
+ VideoCrop(apply_to=self.video_keys, scale=0.95),
387
+ VideoResize(apply_to=self.video_keys, height=224, width=224, interpolation="linear"),
388
+ VideoColorJitter(
389
+ apply_to=self.video_keys,
390
+ brightness=0.3,
391
+ contrast=0.4,
392
+ saturation=0.5,
393
+ hue=0.08,
394
+ ),
395
+ VideoToNumpy(apply_to=self.video_keys),
396
+ # state transforms
397
+ StateActionToTensor(apply_to=self.state_keys),
398
+ StateActionTransform(
399
+ apply_to=self.state_keys,
400
+ normalization_modes={
401
+ "state.right_arm_eef_pos": "min_max",
402
+ "state.right_gripper_qpos": "min_max",
403
+ "state.left_arm_eef_pos": "min_max",
404
+ "state.left_gripper_qpos": "min_max",
405
+ },
406
+ target_rotations={
407
+ "state.right_arm_eef_quat": "rotation_6d",
408
+ "state.left_arm_eef_quat": "rotation_6d",
409
+ },
410
+ ),
411
+ # action transforms
412
+ StateActionToTensor(apply_to=self.action_keys),
413
+ StateActionTransform(
414
+ apply_to=self.action_keys,
415
+ normalization_modes={
416
+ "action.right_gripper_close": "binary",
417
+ "action.left_gripper_close": "binary",
418
+ },
419
+ ),
420
+ # concat transforms
421
+ ConcatTransform(
422
+ video_concat_order=self.video_keys,
423
+ state_concat_order=self.state_keys,
424
+ action_concat_order=self.action_keys,
425
+ ),
426
+ GR00TTransform(
427
+ state_horizon=len(self.observation_indices),
428
+ action_horizon=len(self.action_indices),
429
+ max_state_dim=64,
430
+ max_action_dim=32,
431
+ ),
432
+ ]
433
+
434
+ return ComposedModalityTransform(transforms=transforms)
435
+
436
+
437
+ ###########################################################################################
438
+
439
+
440
+ class BimanualPandaHandDataConfig(BaseDataConfig):
441
+ video_keys = [
442
+ "video.right_wrist_view",
443
+ "video.left_wrist_view",
444
+ "video.ego_view",
445
+ ]
446
+ state_keys = [
447
+ "state.right_arm_eef_pos",
448
+ "state.right_arm_eef_quat",
449
+ "state.right_hand",
450
+ "state.left_arm_eef_pos",
451
+ "state.left_arm_eef_quat",
452
+ "state.left_hand",
453
+ ]
454
+ action_keys = [
455
+ "action.right_arm_eef_pos",
456
+ "action.right_arm_eef_rot",
457
+ "action.right_hand",
458
+ "action.left_arm_eef_pos",
459
+ "action.left_arm_eef_rot",
460
+ "action.left_hand",
461
+ ]
462
+ language_keys = ["annotation.human.action.task_description"]
463
+ observation_indices = [0]
464
+ action_indices = list(range(16))
465
+
466
+ def modality_config(self):
467
+ video_modality = ModalityConfig(
468
+ delta_indices=self.observation_indices,
469
+ modality_keys=self.video_keys,
470
+ )
471
+ state_modality = ModalityConfig(
472
+ delta_indices=self.observation_indices,
473
+ modality_keys=self.state_keys,
474
+ )
475
+ action_modality = ModalityConfig(
476
+ delta_indices=self.action_indices,
477
+ modality_keys=self.action_keys,
478
+ )
479
+ language_modality = ModalityConfig(
480
+ delta_indices=self.observation_indices,
481
+ modality_keys=self.language_keys,
482
+ )
483
+ modality_configs = {
484
+ "video": video_modality,
485
+ "state": state_modality,
486
+ "action": action_modality,
487
+ "language": language_modality,
488
+ }
489
+ return modality_configs
490
+
491
+ def transform(self):
492
+ transforms = [
493
+ # video transforms
494
+ VideoToTensor(apply_to=self.video_keys),
495
+ VideoCrop(apply_to=self.video_keys, scale=0.95),
496
+ VideoResize(apply_to=self.video_keys, height=224, width=224, interpolation="linear"),
497
+ VideoColorJitter(
498
+ apply_to=self.video_keys,
499
+ brightness=0.3,
500
+ contrast=0.4,
501
+ saturation=0.5,
502
+ hue=0.08,
503
+ ),
504
+ VideoToNumpy(apply_to=self.video_keys),
505
+ # state transforms
506
+ StateActionToTensor(apply_to=self.state_keys),
507
+ StateActionTransform(
508
+ apply_to=self.state_keys,
509
+ normalization_modes={
510
+ "state.right_arm_eef_pos": "min_max",
511
+ "state.right_hand": "min_max",
512
+ "state.left_arm_eef_pos": "min_max",
513
+ "state.left_hand": "min_max",
514
+ },
515
+ target_rotations={
516
+ "state.right_arm_eef_quat": "rotation_6d",
517
+ "state.left_arm_eef_quat": "rotation_6d",
518
+ },
519
+ ),
520
+ # action transforms
521
+ StateActionToTensor(apply_to=self.action_keys),
522
+ StateActionTransform(
523
+ apply_to=self.action_keys,
524
+ normalization_modes={
525
+ "action.right_hand": "min_max",
526
+ "action.left_hand": "min_max",
527
+ },
528
+ ),
529
+ # concat transforms
530
+ ConcatTransform(
531
+ video_concat_order=self.video_keys,
532
+ state_concat_order=self.state_keys,
533
+ action_concat_order=self.action_keys,
534
+ ),
535
+ GR00TTransform(
536
+ state_horizon=len(self.observation_indices),
537
+ action_horizon=len(self.action_indices),
538
+ max_state_dim=64,
539
+ max_action_dim=32,
540
+ ),
541
+ ]
542
+
543
+ return ComposedModalityTransform(transforms=transforms)
544
+
545
+
546
+ ###########################################################################################
547
+
548
+
549
+ class SinglePandaGripperDataConfig(BaseDataConfig):
550
+ video_keys = [
551
+ "video.left_view",
552
+ "video.right_view",
553
+ "video.wrist_view",
554
+ ]
555
+ state_keys = [
556
+ "state.end_effector_position_relative",
557
+ "state.end_effector_rotation_relative",
558
+ "state.gripper_qpos",
559
+ "state.base_position",
560
+ "state.base_rotation",
561
+ ]
562
+ action_keys = [
563
+ "action.end_effector_position",
564
+ "action.end_effector_rotation",
565
+ "action.gripper_close",
566
+ "action.base_motion",
567
+ "action.control_mode",
568
+ ]
569
+
570
+ language_keys = ["annotation.human.action.task_description"]
571
+ observation_indices = [0]
572
+ action_indices = list(range(16))
573
+
574
+ def modality_config(self):
575
+ video_modality = ModalityConfig(
576
+ delta_indices=self.observation_indices,
577
+ modality_keys=self.video_keys,
578
+ )
579
+ state_modality = ModalityConfig(
580
+ delta_indices=self.observation_indices,
581
+ modality_keys=self.state_keys,
582
+ )
583
+ action_modality = ModalityConfig(
584
+ delta_indices=self.action_indices,
585
+ modality_keys=self.action_keys,
586
+ )
587
+ language_modality = ModalityConfig(
588
+ delta_indices=self.observation_indices,
589
+ modality_keys=self.language_keys,
590
+ )
591
+ modality_configs = {
592
+ "video": video_modality,
593
+ "state": state_modality,
594
+ "action": action_modality,
595
+ "language": language_modality,
596
+ }
597
+ return modality_configs
598
+
599
+ def transform(self):
600
+ transforms = [
601
+ # video transforms
602
+ VideoToTensor(apply_to=self.video_keys),
603
+ VideoCrop(apply_to=self.video_keys, scale=0.95),
604
+ VideoResize(apply_to=self.video_keys, height=224, width=224, interpolation="linear"),
605
+ VideoColorJitter(
606
+ apply_to=self.video_keys,
607
+ brightness=0.3,
608
+ contrast=0.4,
609
+ saturation=0.5,
610
+ hue=0.08,
611
+ ),
612
+ VideoToNumpy(apply_to=self.video_keys),
613
+ # state transforms
614
+ StateActionToTensor(apply_to=self.state_keys),
615
+ StateActionTransform(
616
+ apply_to=self.state_keys,
617
+ normalization_modes={
618
+ "state.end_effector_position_relative": "min_max",
619
+ "state.end_effector_rotation_relative": "min_max",
620
+ "state.gripper_qpos": "min_max",
621
+ "state.base_position": "min_max",
622
+ "state.base_rotation": "min_max",
623
+ },
624
+ target_rotations={
625
+ "state.end_effector_rotation_relative": "rotation_6d",
626
+ "state.base_rotation": "rotation_6d",
627
+ },
628
+ ),
629
+ # action transforms
630
+ StateActionToTensor(apply_to=self.action_keys),
631
+ StateActionTransform(
632
+ apply_to=self.action_keys,
633
+ normalization_modes={
634
+ "action.end_effector_position": "min_max",
635
+ "action.end_effector_rotation": "min_max",
636
+ "action.gripper_close": "binary",
637
+ "action.base_motion": "min_max",
638
+ "action.control_mode": "binary",
639
+ },
640
+ ),
641
+ # concat transforms
642
+ ConcatTransform(
643
+ video_concat_order=self.video_keys,
644
+ state_concat_order=self.state_keys,
645
+ action_concat_order=self.action_keys,
646
+ ),
647
+ GR00TTransform(
648
+ state_horizon=len(self.observation_indices),
649
+ action_horizon=len(self.action_indices),
650
+ max_state_dim=64,
651
+ max_action_dim=32,
652
+ ),
653
+ ]
654
+
655
+ return ComposedModalityTransform(transforms=transforms)
656
+
657
+
658
+ ###########################################################################################
659
+
660
+
661
+ class Gr1ArmsWaistDataConfig(Gr1ArmsOnlyDataConfig):
662
+ video_keys = ["video.ego_view"]
663
+ state_keys = [
664
+ "state.left_arm",
665
+ "state.right_arm",
666
+ "state.left_hand",
667
+ "state.right_hand",
668
+ "state.waist",
669
+ ]
670
+ action_keys = [
671
+ "action.left_arm",
672
+ "action.right_arm",
673
+ "action.left_hand",
674
+ "action.right_hand",
675
+ "action.waist",
676
+ ]
677
+ language_keys = ["annotation.human.coarse_action"]
678
+ observation_indices = [0]
679
+ action_indices = list(range(16))
680
+
681
+ def modality_config(self):
682
+ return super().modality_config()
683
+
684
+ def transform(self):
685
+ return super().transform()
686
+
687
+
688
+ ###########################################################################################
689
+ class LekiwiDataConfig(BaseDataConfig):
690
+ video_keys = ["video.wrist", "video.front"]
691
+ state_keys = ["state.shoulder_pan", "state.shoulder_lift", "state.elbow_flex", "state.wrist_flex", "state.wrist_roll", "state.gripper", "state.left_wheel", "state.back_wheel", "state.right_wheel"]
692
+ action_keys = ["action.shoulder_pan", "action.shoulder_lift", "action.elbow_flex", "action.wrist_flex", "action.wrist_roll", "action.gripper", "action.left_wheel", "action.back_wheel", "action.right_wheel"]
693
+ language_keys = ["annotation.human.task_description"]
694
+ observation_indices = [0]
695
+ action_indices = list(range(16))
696
+
697
+ def modality_config(self) -> dict[str, ModalityConfig]:
698
+ video_modality = ModalityConfig(
699
+ delta_indices=self.observation_indices,
700
+ modality_keys=self.video_keys,
701
+ )
702
+
703
+ state_modality = ModalityConfig(
704
+ delta_indices=self.observation_indices,
705
+ modality_keys=self.state_keys,
706
+ )
707
+
708
+ action_modality = ModalityConfig(
709
+ delta_indices=self.action_indices,
710
+ modality_keys=self.action_keys,
711
+ )
712
+
713
+ language_modality = ModalityConfig(
714
+ delta_indices=self.observation_indices,
715
+ modality_keys=self.language_keys,
716
+ )
717
+
718
+ modality_configs = {
719
+ "video": video_modality,
720
+ "state": state_modality,
721
+ "action": action_modality,
722
+ "language": language_modality,
723
+ }
724
+
725
+ return modality_configs
726
+
727
+ def transform(self) -> ModalityTransform:
728
+ transforms = [
729
+ # video transforms
730
+ VideoToTensor(apply_to=self.video_keys),
731
+ VideoCrop(apply_to=self.video_keys, scale=0.95),
732
+ VideoResize(apply_to=self.video_keys, height=224, width=224, interpolation="linear"),
733
+ VideoColorJitter(
734
+ apply_to=self.video_keys,
735
+ brightness=0.3,
736
+ contrast=0.4,
737
+ saturation=0.5,
738
+ hue=0.08,
739
+ ),
740
+ VideoToNumpy(apply_to=self.video_keys),
741
+ # state transforms
742
+ StateActionToTensor(apply_to=self.state_keys),
743
+ StateActionTransform(
744
+ apply_to=self.state_keys,
745
+ normalization_modes={key: "min_max" for key in self.state_keys},
746
+ ),
747
+ # action transforms
748
+ StateActionToTensor(apply_to=self.action_keys),
749
+ StateActionTransform(
750
+ apply_to=self.action_keys,
751
+ normalization_modes={key: "min_max" for key in self.action_keys},
752
+ ),
753
+ # concat transforms
754
+ ConcatTransform(
755
+ video_concat_order=self.video_keys,
756
+ state_concat_order=self.state_keys,
757
+ action_concat_order=self.action_keys,
758
+ ),
759
+ # model-specific transform
760
+ GR00TTransform(
761
+ state_horizon=len(self.observation_indices),
762
+ action_horizon=len(self.action_indices),
763
+ max_state_dim=64,
764
+ max_action_dim=32,
765
+ ),
766
+ ]
767
+ return ComposedModalityTransform(transforms=transforms)
768
+
769
+
770
+ DATA_CONFIG_MAP = {
771
+ "gr1_arms_waist": Gr1ArmsWaistDataConfig(),
772
+ "gr1_arms_only": Gr1ArmsOnlyDataConfig(),
773
+ "gr1_full_upper_body": Gr1FullUpperBodyDataConfig(),
774
+ "bimanual_panda_gripper": BimanualPandaGripperDataConfig(),
775
+ "bimanual_panda_hand": BimanualPandaHandDataConfig(),
776
+ "single_panda_gripper": SinglePandaGripperDataConfig(),
777
+ "so100": So100DataConfig(),
778
+ "lekiwi": LekiwiDataConfig()
779
+ }