| { | |
| "featuresDict": { | |
| "features": { | |
| "episode_metadata": { | |
| "featuresDict": { | |
| "features": { | |
| "episode_id": { | |
| "description": "Episode ID from h5. defined as traj_n", | |
| "pythonClassName": "tensorflow_datasets.core.features.text_feature.Text", | |
| "text": {} | |
| }, | |
| "file_path": { | |
| "description": "Path to the original data file including dataset name represented as timestamp.", | |
| "pythonClassName": "tensorflow_datasets.core.features.text_feature.Text", | |
| "text": {} | |
| } | |
| } | |
| }, | |
| "pythonClassName": "tensorflow_datasets.core.features.features_dict.FeaturesDict" | |
| }, | |
| "steps": { | |
| "pythonClassName": "tensorflow_datasets.core.features.dataset_feature.Dataset", | |
| "sequence": { | |
| "feature": { | |
| "featuresDict": { | |
| "features": { | |
| "action": { | |
| "description": "Action according to maniskill pd_ee_delta_pose format [3x RPY ee orientation in robot frame, 1x gripper position (-1 is closed and 1 is open)].", | |
| "pythonClassName": "tensorflow_datasets.core.features.tensor_feature.Tensor", | |
| "tensor": { | |
| "dtype": "float32", | |
| "encoding": "none", | |
| "shape": { | |
| "dimensions": [ | |
| "7" | |
| ] | |
| } | |
| } | |
| }, | |
| "discount": { | |
| "description": "Discount if provided, default to 1.", | |
| "pythonClassName": "tensorflow_datasets.core.features.scalar.Scalar", | |
| "tensor": { | |
| "dtype": "float32", | |
| "encoding": "none", | |
| "shape": {} | |
| } | |
| }, | |
| "env_id": { | |
| "description": "env registration id.", | |
| "pythonClassName": "tensorflow_datasets.core.features.text_feature.Text", | |
| "text": {} | |
| }, | |
| "is_first": { | |
| "description": "True on first step of the episode.", | |
| "pythonClassName": "tensorflow_datasets.core.features.scalar.Scalar", | |
| "tensor": { | |
| "dtype": "bool", | |
| "encoding": "none", | |
| "shape": {} | |
| } | |
| }, | |
| "is_last": { | |
| "description": "True on last step of the episode.", | |
| "pythonClassName": "tensorflow_datasets.core.features.scalar.Scalar", | |
| "tensor": { | |
| "dtype": "bool", | |
| "encoding": "none", | |
| "shape": {} | |
| } | |
| }, | |
| "is_terminal": { | |
| "description": "True on last step of the episode if it is a terminal step, True for demos.", | |
| "pythonClassName": "tensorflow_datasets.core.features.scalar.Scalar", | |
| "tensor": { | |
| "dtype": "bool", | |
| "encoding": "none", | |
| "shape": {} | |
| } | |
| }, | |
| "language_embedding": { | |
| "description": "Kona language embedding. See https://tfhub.dev/google/universal-sentence-encoder-large/5", | |
| "pythonClassName": "tensorflow_datasets.core.features.tensor_feature.Tensor", | |
| "tensor": { | |
| "dtype": "float32", | |
| "encoding": "none", | |
| "shape": { | |
| "dimensions": [ | |
| "512" | |
| ] | |
| } | |
| } | |
| }, | |
| "language_instruction": { | |
| "description": "Language Instruction.", | |
| "pythonClassName": "tensorflow_datasets.core.features.text_feature.Text", | |
| "text": {} | |
| }, | |
| "observation": { | |
| "featuresDict": { | |
| "features": { | |
| "gripper": { | |
| "description": "2 finger joints.", | |
| "pythonClassName": "tensorflow_datasets.core.features.tensor_feature.Tensor", | |
| "tensor": { | |
| "dtype": "float32", | |
| "encoding": "none", | |
| "shape": { | |
| "dimensions": [ | |
| "2" | |
| ] | |
| } | |
| } | |
| }, | |
| "proprio": { | |
| "description": "7 joints proprioception, consists of [j1, j2, j3, j4, j5, j6, j7].", | |
| "pythonClassName": "tensorflow_datasets.core.features.tensor_feature.Tensor", | |
| "tensor": { | |
| "dtype": "float32", | |
| "encoding": "none", | |
| "shape": { | |
| "dimensions": [ | |
| "7" | |
| ] | |
| } | |
| } | |
| }, | |
| "rgb_0": { | |
| "description": "Main RGB camera.", | |
| "image": { | |
| "dtype": "uint8", | |
| "encodingFormat": "png", | |
| "shape": { | |
| "dimensions": [ | |
| "256", | |
| "256", | |
| "3" | |
| ] | |
| } | |
| }, | |
| "pythonClassName": "tensorflow_datasets.core.features.image_feature.Image" | |
| }, | |
| "rgb_1": { | |
| "description": "Human render RGB camera in maniskill, shows clues about the task not present in rgb_0.", | |
| "image": { | |
| "dtype": "uint8", | |
| "encodingFormat": "png", | |
| "shape": { | |
| "dimensions": [ | |
| "256", | |
| "256", | |
| "3" | |
| ] | |
| } | |
| }, | |
| "pythonClassName": "tensorflow_datasets.core.features.image_feature.Image" | |
| }, | |
| "tquat": { | |
| "description": "3 translation 4 quaternion.", | |
| "pythonClassName": "tensorflow_datasets.core.features.tensor_feature.Tensor", | |
| "tensor": { | |
| "dtype": "float32", | |
| "encoding": "none", | |
| "shape": { | |
| "dimensions": [ | |
| "7" | |
| ] | |
| } | |
| } | |
| } | |
| } | |
| }, | |
| "pythonClassName": "tensorflow_datasets.core.features.features_dict.FeaturesDict" | |
| }, | |
| "rcs_action": { | |
| "description": "Action according to rcs format [3x translation, 3x RPY ee orientation in robot frame, 1x gripper position (0 is closed and 1 is open)].", | |
| "pythonClassName": "tensorflow_datasets.core.features.tensor_feature.Tensor", | |
| "tensor": { | |
| "dtype": "float32", | |
| "encoding": "none", | |
| "shape": { | |
| "dimensions": [ | |
| "7" | |
| ] | |
| } | |
| } | |
| }, | |
| "reward": { | |
| "description": "Reward if provided, 1 on final step for demos.", | |
| "pythonClassName": "tensorflow_datasets.core.features.scalar.Scalar", | |
| "tensor": { | |
| "dtype": "float32", | |
| "encoding": "none", | |
| "shape": {} | |
| } | |
| } | |
| } | |
| }, | |
| "pythonClassName": "tensorflow_datasets.core.features.features_dict.FeaturesDict" | |
| }, | |
| "length": "-1" | |
| } | |
| } | |
| } | |
| }, | |
| "pythonClassName": "tensorflow_datasets.core.features.features_dict.FeaturesDict" | |
| } |