Isaac-GR00T / examples /SimplerEnv /custom_data_config.py
zhenyuzhao's picture
Add files using upload-large-folder tool
723608d verified
# SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from gr00t.data.transform.base import ComposedModalityTransform, ModalityTransform
from gr00t.data.transform.concat import ConcatTransform
from gr00t.data.transform.state_action import StateActionToTensor, StateActionTransform
from gr00t.data.transform.video import (
VideoColorJitter,
VideoCrop,
VideoResize,
VideoToNumpy,
VideoToTensor,
)
from gr00t.experiment.data_config import BaseDataConfig, So100DataConfig
from gr00t.model.transforms import GR00TTransform
class FractalDataConfig(BaseDataConfig):
video_keys = [
"video.image",
]
state_keys = [
"state.x",
"state.y",
"state.z",
"state.rx",
"state.ry",
"state.rz",
"state.rw",
"state.gripper",
]
action_keys = [
"action.x",
"action.y",
"action.z",
"action.roll",
"action.pitch",
"action.yaw",
"action.gripper",
]
language_keys = ["annotation.human.action.task_description"]
observation_indices = [0]
action_indices = list(range(16))
def transform(self) -> ModalityTransform:
transforms = [
# video transforms
VideoToTensor(apply_to=self.video_keys),
VideoCrop(apply_to=self.video_keys, scale=0.95),
VideoResize(apply_to=self.video_keys, height=224, width=224, interpolation="linear"),
VideoColorJitter(
apply_to=self.video_keys,
brightness=0.3,
contrast=0.4,
saturation=0.5,
hue=0.08,
),
VideoToNumpy(apply_to=self.video_keys),
# state transforms
StateActionToTensor(apply_to=self.state_keys),
StateActionTransform(
apply_to=self.state_keys,
normalization_modes={key: "min_max" for key in self.state_keys},
),
# action transforms
StateActionToTensor(apply_to=self.action_keys),
StateActionTransform(
apply_to=self.action_keys,
normalization_modes={
"action.x": "mean_std",
"action.y": "mean_std",
"action.z": "mean_std",
"action.roll": "mean_std",
"action.pitch": "mean_std",
"action.yaw": "mean_std",
"action.gripper": "min_max",
},
),
ConcatTransform(
video_concat_order=self.video_keys,
state_concat_order=self.state_keys,
action_concat_order=self.action_keys,
),
GR00TTransform(
state_horizon=len(self.observation_indices),
action_horizon=len(self.action_indices),
max_state_dim=64,
max_action_dim=32,
),
]
return ComposedModalityTransform(transforms=transforms)
# NOTE: we use the default so100 with minmax norm for all commponents
# using different normalization mode can sometimes lead to better performance
class BridgeDataConfig(So100DataConfig):
video_keys = [
"video.image_0",
]
state_keys = [
"state.x",
"state.y",
"state.z",
"state.roll",
"state.pitch",
"state.yaw",
"state.pad",
"state.gripper",
]
action_keys = [
"action.x",
"action.y",
"action.z",
"action.roll",
"action.pitch",
"action.yaw",
"action.gripper",
]
language_keys = ["annotation.human.action.task_description"]
observation_indices = [0]
action_indices = list(range(16))