ibru's picture
Upload folder using huggingface_hub
a5b9e8e verified
# SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABC, abstractmethod
from gr00t.data.dataset import ModalityConfig
from gr00t.data.transform.base import ComposedModalityTransform, ModalityTransform
from gr00t.data.transform.concat import ConcatTransform
from gr00t.data.transform.state_action import StateActionToTensor, StateActionTransform
from gr00t.data.transform.video import (
VideoColorJitter,
VideoCrop,
VideoResize,
VideoToNumpy,
VideoToTensor,
)
from gr00t.model.transforms import GR00TTransform
class BaseDataConfig(ABC):
@abstractmethod
def modality_config(self) -> dict[str, ModalityConfig]:
pass
@abstractmethod
def transform(self) -> ModalityTransform:
pass
###########################################################################################
class Gr1ArmsOnlyDataConfig(BaseDataConfig):
video_keys = ["video.ego_view"]
state_keys = [
"state.left_arm",
"state.right_arm",
"state.left_hand",
"state.right_hand",
]
action_keys = [
"action.left_arm",
"action.right_arm",
"action.left_hand",
"action.right_hand",
]
language_keys = ["annotation.human.action.task_description"]
observation_indices = [0]
action_indices = list(range(16))
def modality_config(self) -> dict[str, ModalityConfig]:
video_modality = ModalityConfig(
delta_indices=self.observation_indices,
modality_keys=self.video_keys,
)
state_modality = ModalityConfig(
delta_indices=self.observation_indices,
modality_keys=self.state_keys,
)
action_modality = ModalityConfig(
delta_indices=self.action_indices,
modality_keys=self.action_keys,
)
language_modality = ModalityConfig(
delta_indices=self.observation_indices,
modality_keys=self.language_keys,
)
modality_configs = {
"video": video_modality,
"state": state_modality,
"action": action_modality,
"language": language_modality,
}
return modality_configs
def transform(self) -> ModalityTransform:
transforms = [
# video transforms
VideoToTensor(apply_to=self.video_keys),
VideoCrop(apply_to=self.video_keys, scale=0.95),
VideoResize(apply_to=self.video_keys, height=224, width=224, interpolation="linear"),
VideoColorJitter(
apply_to=self.video_keys,
brightness=0.3,
contrast=0.4,
saturation=0.5,
hue=0.08,
),
VideoToNumpy(apply_to=self.video_keys),
# state transforms
StateActionToTensor(apply_to=self.state_keys),
StateActionTransform(
apply_to=self.state_keys,
normalization_modes={key: "min_max" for key in self.state_keys},
),
# action transforms
StateActionToTensor(apply_to=self.action_keys),
StateActionTransform(
apply_to=self.action_keys,
normalization_modes={key: "min_max" for key in self.action_keys},
),
# concat transforms
ConcatTransform(
video_concat_order=self.video_keys,
state_concat_order=self.state_keys,
action_concat_order=self.action_keys,
),
# model-specific transform
GR00TTransform(
state_horizon=len(self.observation_indices),
action_horizon=len(self.action_indices),
max_state_dim=64,
max_action_dim=32,
),
]
return ComposedModalityTransform(transforms=transforms)
###########################################################################################
class So100DataConfig(BaseDataConfig):
video_keys = ["video.webcam"]
state_keys = ["state.single_arm", "state.gripper"]
action_keys = ["action.single_arm", "action.gripper"]
language_keys = ["annotation.human.task_description"]
observation_indices = [0]
action_indices = list(range(16))
def modality_config(self) -> dict[str, ModalityConfig]:
video_modality = ModalityConfig(
delta_indices=self.observation_indices,
modality_keys=self.video_keys,
)
state_modality = ModalityConfig(
delta_indices=self.observation_indices,
modality_keys=self.state_keys,
)
action_modality = ModalityConfig(
delta_indices=self.action_indices,
modality_keys=self.action_keys,
)
language_modality = ModalityConfig(
delta_indices=self.observation_indices,
modality_keys=self.language_keys,
)
modality_configs = {
"video": video_modality,
"state": state_modality,
"action": action_modality,
"language": language_modality,
}
return modality_configs
def transform(self) -> ModalityTransform:
transforms = [
# video transforms
VideoToTensor(apply_to=self.video_keys),
VideoCrop(apply_to=self.video_keys, scale=0.95),
VideoResize(apply_to=self.video_keys, height=224, width=224, interpolation="linear"),
VideoColorJitter(
apply_to=self.video_keys,
brightness=0.3,
contrast=0.4,
saturation=0.5,
hue=0.08,
),
VideoToNumpy(apply_to=self.video_keys),
# state transforms
StateActionToTensor(apply_to=self.state_keys),
StateActionTransform(
apply_to=self.state_keys,
normalization_modes={key: "min_max" for key in self.state_keys},
),
# action transforms
StateActionToTensor(apply_to=self.action_keys),
StateActionTransform(
apply_to=self.action_keys,
normalization_modes={key: "min_max" for key in self.action_keys},
),
# concat transforms
ConcatTransform(
video_concat_order=self.video_keys,
state_concat_order=self.state_keys,
action_concat_order=self.action_keys,
),
# model-specific transform
GR00TTransform(
state_horizon=len(self.observation_indices),
action_horizon=len(self.action_indices),
max_state_dim=64,
max_action_dim=32,
),
]
return ComposedModalityTransform(transforms=transforms)
###########################################################################################
class Gr1FullUpperBodyDataConfig(BaseDataConfig):
video_keys = ["video.front_view"]
state_keys = [
"state.left_arm",
"state.right_arm",
"state.left_hand",
"state.right_hand",
"state.waist",
"state.neck",
]
action_keys = [
"action.left_arm",
"action.right_arm",
"action.left_hand",
"action.right_hand",
"action.waist",
"action.neck",
]
language_keys = ["annotation.human.action.task_description"]
observation_indices = [0]
action_indices = list(range(16))
def modality_config(self):
video_modality = ModalityConfig(
delta_indices=self.observation_indices,
modality_keys=self.video_keys,
)
state_modality = ModalityConfig(
delta_indices=self.observation_indices,
modality_keys=self.state_keys,
)
action_modality = ModalityConfig(
delta_indices=self.action_indices,
modality_keys=self.action_keys,
)
language_modality = ModalityConfig(
delta_indices=self.observation_indices,
modality_keys=self.language_keys,
)
modality_configs = {
"video": video_modality,
"state": state_modality,
"action": action_modality,
"language": language_modality,
}
return modality_configs
def transform(self):
transforms = [
# video transforms
VideoToTensor(apply_to=self.video_keys),
VideoCrop(apply_to=self.video_keys, scale=0.95),
VideoResize(apply_to=self.video_keys, height=224, width=224, interpolation="linear"),
VideoColorJitter(
apply_to=self.video_keys,
brightness=0.3,
contrast=0.4,
saturation=0.5,
hue=0.08,
),
VideoToNumpy(apply_to=self.video_keys),
# state transforms
StateActionToTensor(apply_to=self.state_keys),
StateActionTransform(
apply_to=self.state_keys,
normalization_modes={
"state.left_arm": "min_max",
"state.right_arm": "min_max",
"state.left_hand": "min_max",
"state.right_hand": "min_max",
"state.waist": "min_max",
"state.neck": "min_max",
},
),
# action transforms
StateActionToTensor(apply_to=self.action_keys),
StateActionTransform(
apply_to=self.action_keys,
normalization_modes={
"action.right_arm": "min_max",
"action.left_arm": "min_max",
"action.right_hand": "min_max",
"action.left_hand": "min_max",
"action.waist": "min_max",
"action.neck": "min_max",
},
),
# concat transforms
ConcatTransform(
video_concat_order=self.video_keys,
state_concat_order=self.state_keys,
action_concat_order=self.action_keys,
),
GR00TTransform(
state_horizon=len(self.observation_indices),
action_horizon=len(self.action_indices),
max_state_dim=64,
max_action_dim=32,
),
]
return ComposedModalityTransform(transforms=transforms)
###########################################################################################
class BimanualPandaGripperDataConfig(BaseDataConfig):
video_keys = [
"video.right_wrist_view",
"video.left_wrist_view",
"video.front_view",
]
state_keys = [
"state.right_arm_eef_pos",
"state.right_arm_eef_quat",
"state.right_gripper_qpos",
"state.left_arm_eef_pos",
"state.left_arm_eef_quat",
"state.left_gripper_qpos",
]
action_keys = [
"action.right_arm_eef_pos",
"action.right_arm_eef_rot",
"action.right_gripper_close",
"action.left_arm_eef_pos",
"action.left_arm_eef_rot",
"action.left_gripper_close",
]
language_keys = ["annotation.human.action.task_description"]
observation_indices = [0]
action_indices = list(range(16))
def modality_config(self):
video_modality = ModalityConfig(
delta_indices=self.observation_indices,
modality_keys=self.video_keys,
)
state_modality = ModalityConfig(
delta_indices=self.observation_indices,
modality_keys=self.state_keys,
)
action_modality = ModalityConfig(
delta_indices=self.action_indices,
modality_keys=self.action_keys,
)
language_modality = ModalityConfig(
delta_indices=self.observation_indices,
modality_keys=self.language_keys,
)
modality_configs = {
"video": video_modality,
"state": state_modality,
"action": action_modality,
"language": language_modality,
}
return modality_configs
def transform(self):
transforms = [
# video transforms
VideoToTensor(apply_to=self.video_keys),
VideoCrop(apply_to=self.video_keys, scale=0.95),
VideoResize(apply_to=self.video_keys, height=224, width=224, interpolation="linear"),
VideoColorJitter(
apply_to=self.video_keys,
brightness=0.3,
contrast=0.4,
saturation=0.5,
hue=0.08,
),
VideoToNumpy(apply_to=self.video_keys),
# state transforms
StateActionToTensor(apply_to=self.state_keys),
StateActionTransform(
apply_to=self.state_keys,
normalization_modes={
"state.right_arm_eef_pos": "min_max",
"state.right_gripper_qpos": "min_max",
"state.left_arm_eef_pos": "min_max",
"state.left_gripper_qpos": "min_max",
},
target_rotations={
"state.right_arm_eef_quat": "rotation_6d",
"state.left_arm_eef_quat": "rotation_6d",
},
),
# action transforms
StateActionToTensor(apply_to=self.action_keys),
StateActionTransform(
apply_to=self.action_keys,
normalization_modes={
"action.right_gripper_close": "binary",
"action.left_gripper_close": "binary",
},
),
# concat transforms
ConcatTransform(
video_concat_order=self.video_keys,
state_concat_order=self.state_keys,
action_concat_order=self.action_keys,
),
GR00TTransform(
state_horizon=len(self.observation_indices),
action_horizon=len(self.action_indices),
max_state_dim=64,
max_action_dim=32,
),
]
return ComposedModalityTransform(transforms=transforms)
###########################################################################################
class BimanualPandaHandDataConfig(BaseDataConfig):
video_keys = [
"video.right_wrist_view",
"video.left_wrist_view",
"video.ego_view",
]
state_keys = [
"state.right_arm_eef_pos",
"state.right_arm_eef_quat",
"state.right_hand",
"state.left_arm_eef_pos",
"state.left_arm_eef_quat",
"state.left_hand",
]
action_keys = [
"action.right_arm_eef_pos",
"action.right_arm_eef_rot",
"action.right_hand",
"action.left_arm_eef_pos",
"action.left_arm_eef_rot",
"action.left_hand",
]
language_keys = ["annotation.human.action.task_description"]
observation_indices = [0]
action_indices = list(range(16))
def modality_config(self):
video_modality = ModalityConfig(
delta_indices=self.observation_indices,
modality_keys=self.video_keys,
)
state_modality = ModalityConfig(
delta_indices=self.observation_indices,
modality_keys=self.state_keys,
)
action_modality = ModalityConfig(
delta_indices=self.action_indices,
modality_keys=self.action_keys,
)
language_modality = ModalityConfig(
delta_indices=self.observation_indices,
modality_keys=self.language_keys,
)
modality_configs = {
"video": video_modality,
"state": state_modality,
"action": action_modality,
"language": language_modality,
}
return modality_configs
def transform(self):
transforms = [
# video transforms
VideoToTensor(apply_to=self.video_keys),
VideoCrop(apply_to=self.video_keys, scale=0.95),
VideoResize(apply_to=self.video_keys, height=224, width=224, interpolation="linear"),
VideoColorJitter(
apply_to=self.video_keys,
brightness=0.3,
contrast=0.4,
saturation=0.5,
hue=0.08,
),
VideoToNumpy(apply_to=self.video_keys),
# state transforms
StateActionToTensor(apply_to=self.state_keys),
StateActionTransform(
apply_to=self.state_keys,
normalization_modes={
"state.right_arm_eef_pos": "min_max",
"state.right_hand": "min_max",
"state.left_arm_eef_pos": "min_max",
"state.left_hand": "min_max",
},
target_rotations={
"state.right_arm_eef_quat": "rotation_6d",
"state.left_arm_eef_quat": "rotation_6d",
},
),
# action transforms
StateActionToTensor(apply_to=self.action_keys),
StateActionTransform(
apply_to=self.action_keys,
normalization_modes={
"action.right_hand": "min_max",
"action.left_hand": "min_max",
},
),
# concat transforms
ConcatTransform(
video_concat_order=self.video_keys,
state_concat_order=self.state_keys,
action_concat_order=self.action_keys,
),
GR00TTransform(
state_horizon=len(self.observation_indices),
action_horizon=len(self.action_indices),
max_state_dim=64,
max_action_dim=32,
),
]
return ComposedModalityTransform(transforms=transforms)
###########################################################################################
class SinglePandaGripperDataConfig(BaseDataConfig):
video_keys = [
"video.left_view",
"video.right_view",
"video.wrist_view",
]
state_keys = [
"state.end_effector_position_relative",
"state.end_effector_rotation_relative",
"state.gripper_qpos",
"state.base_position",
"state.base_rotation",
]
action_keys = [
"action.end_effector_position",
"action.end_effector_rotation",
"action.gripper_close",
"action.base_motion",
"action.control_mode",
]
language_keys = ["annotation.human.action.task_description"]
observation_indices = [0]
action_indices = list(range(16))
def modality_config(self):
video_modality = ModalityConfig(
delta_indices=self.observation_indices,
modality_keys=self.video_keys,
)
state_modality = ModalityConfig(
delta_indices=self.observation_indices,
modality_keys=self.state_keys,
)
action_modality = ModalityConfig(
delta_indices=self.action_indices,
modality_keys=self.action_keys,
)
language_modality = ModalityConfig(
delta_indices=self.observation_indices,
modality_keys=self.language_keys,
)
modality_configs = {
"video": video_modality,
"state": state_modality,
"action": action_modality,
"language": language_modality,
}
return modality_configs
def transform(self):
transforms = [
# video transforms
VideoToTensor(apply_to=self.video_keys),
VideoCrop(apply_to=self.video_keys, scale=0.95),
VideoResize(apply_to=self.video_keys, height=224, width=224, interpolation="linear"),
VideoColorJitter(
apply_to=self.video_keys,
brightness=0.3,
contrast=0.4,
saturation=0.5,
hue=0.08,
),
VideoToNumpy(apply_to=self.video_keys),
# state transforms
StateActionToTensor(apply_to=self.state_keys),
StateActionTransform(
apply_to=self.state_keys,
normalization_modes={
"state.end_effector_position_relative": "min_max",
"state.end_effector_rotation_relative": "min_max",
"state.gripper_qpos": "min_max",
"state.base_position": "min_max",
"state.base_rotation": "min_max",
},
target_rotations={
"state.end_effector_rotation_relative": "rotation_6d",
"state.base_rotation": "rotation_6d",
},
),
# action transforms
StateActionToTensor(apply_to=self.action_keys),
StateActionTransform(
apply_to=self.action_keys,
normalization_modes={
"action.end_effector_position": "min_max",
"action.end_effector_rotation": "min_max",
"action.gripper_close": "binary",
"action.base_motion": "min_max",
"action.control_mode": "binary",
},
),
# concat transforms
ConcatTransform(
video_concat_order=self.video_keys,
state_concat_order=self.state_keys,
action_concat_order=self.action_keys,
),
GR00TTransform(
state_horizon=len(self.observation_indices),
action_horizon=len(self.action_indices),
max_state_dim=64,
max_action_dim=32,
),
]
return ComposedModalityTransform(transforms=transforms)
###########################################################################################
class Gr1ArmsWaistDataConfig(Gr1ArmsOnlyDataConfig):
video_keys = ["video.ego_view"]
state_keys = [
"state.left_arm",
"state.right_arm",
"state.left_hand",
"state.right_hand",
"state.waist",
]
action_keys = [
"action.left_arm",
"action.right_arm",
"action.left_hand",
"action.right_hand",
"action.waist",
]
language_keys = ["annotation.human.coarse_action"]
observation_indices = [0]
action_indices = list(range(16))
def modality_config(self):
return super().modality_config()
def transform(self):
return super().transform()
###########################################################################################
class LekiwiDataConfig(BaseDataConfig):
video_keys = ["video.wrist", "video.front"]
state_keys = ["state.shoulder_pan", "state.shoulder_lift", "state.elbow_flex", "state.wrist_flex", "state.wrist_roll", "state.gripper", "state.left_wheel", "state.back_wheel", "state.right_wheel"]
action_keys = ["action.shoulder_pan", "action.shoulder_lift", "action.elbow_flex", "action.wrist_flex", "action.wrist_roll", "action.gripper", "action.left_wheel", "action.back_wheel", "action.right_wheel"]
language_keys = ["annotation.human.task_description"]
observation_indices = [0]
action_indices = list(range(16))
def modality_config(self) -> dict[str, ModalityConfig]:
video_modality = ModalityConfig(
delta_indices=self.observation_indices,
modality_keys=self.video_keys,
)
state_modality = ModalityConfig(
delta_indices=self.observation_indices,
modality_keys=self.state_keys,
)
action_modality = ModalityConfig(
delta_indices=self.action_indices,
modality_keys=self.action_keys,
)
language_modality = ModalityConfig(
delta_indices=self.observation_indices,
modality_keys=self.language_keys,
)
modality_configs = {
"video": video_modality,
"state": state_modality,
"action": action_modality,
"language": language_modality,
}
return modality_configs
def transform(self) -> ModalityTransform:
transforms = [
# video transforms
VideoToTensor(apply_to=self.video_keys),
VideoCrop(apply_to=self.video_keys, scale=0.95),
VideoResize(apply_to=self.video_keys, height=224, width=224, interpolation="linear"),
VideoColorJitter(
apply_to=self.video_keys,
brightness=0.3,
contrast=0.4,
saturation=0.5,
hue=0.08,
),
VideoToNumpy(apply_to=self.video_keys),
# state transforms
StateActionToTensor(apply_to=self.state_keys),
StateActionTransform(
apply_to=self.state_keys,
normalization_modes={key: "min_max" for key in self.state_keys},
),
# action transforms
StateActionToTensor(apply_to=self.action_keys),
StateActionTransform(
apply_to=self.action_keys,
normalization_modes={key: "min_max" for key in self.action_keys},
),
# concat transforms
ConcatTransform(
video_concat_order=self.video_keys,
state_concat_order=self.state_keys,
action_concat_order=self.action_keys,
),
# model-specific transform
GR00TTransform(
state_horizon=len(self.observation_indices),
action_horizon=len(self.action_indices),
max_state_dim=64,
max_action_dim=32,
),
]
return ComposedModalityTransform(transforms=transforms)
DATA_CONFIG_MAP = {
"gr1_arms_waist": Gr1ArmsWaistDataConfig(),
"gr1_arms_only": Gr1ArmsOnlyDataConfig(),
"gr1_full_upper_body": Gr1FullUpperBodyDataConfig(),
"bimanual_panda_gripper": BimanualPandaGripperDataConfig(),
"bimanual_panda_hand": BimanualPandaHandDataConfig(),
"single_panda_gripper": SinglePandaGripperDataConfig(),
"so100": So100DataConfig(),
"lekiwi": LekiwiDataConfig()
}