badinkajink's picture
Upload 31 files
9b21107 verified
{
"featuresDict": {
"features": {
"episode_metadata": {
"featuresDict": {
"features": {
"file_path": {
"description": "Path to the original data file.",
"pythonClassName": "tensorflow_datasets.core.features.text_feature.Text",
"text": {}
}
}
},
"pythonClassName": "tensorflow_datasets.core.features.features_dict.FeaturesDict"
},
"steps": {
"pythonClassName": "tensorflow_datasets.core.features.dataset_feature.Dataset",
"sequence": {
"feature": {
"featuresDict": {
"features": {
"action": {
"description": "Robot action, consists of delta values across [6x ee pos (x, y, z, r, p, y), 1x delta gripper position, 1x delta gripper applied force, 1x terminate episode].",
"pythonClassName": "tensorflow_datasets.core.features.tensor_feature.Tensor",
"tensor": {
"dtype": "float64",
"encoding": "none",
"shape": {
"dimensions": [
"9"
]
}
}
},
"action_dict": {
"featuresDict": {
"features": {
"cartesian_position": {
"description": "end effector pose delta, relative to base frame",
"pythonClassName": "tensorflow_datasets.core.features.tensor_feature.Tensor",
"tensor": {
"dtype": "float64",
"encoding": "none",
"shape": {
"dimensions": [
"6"
]
}
}
},
"gripper_force": {
"description": "gripper force delta.",
"pythonClassName": "tensorflow_datasets.core.features.tensor_feature.Tensor",
"tensor": {
"dtype": "float64",
"encoding": "none",
"shape": {
"dimensions": [
"1"
]
}
}
},
"gripper_position": {
"description": "gripper position delta.",
"pythonClassName": "tensorflow_datasets.core.features.tensor_feature.Tensor",
"tensor": {
"dtype": "float64",
"encoding": "none",
"shape": {
"dimensions": [
"1"
]
}
}
},
"rotation": {
"description": "end effector rotation delta, relative to base frame",
"pythonClassName": "tensorflow_datasets.core.features.tensor_feature.Tensor",
"tensor": {
"dtype": "float64",
"encoding": "none",
"shape": {
"dimensions": [
"3"
]
}
}
},
"translation": {
"description": "end effector translation delta, relative to base frame",
"pythonClassName": "tensorflow_datasets.core.features.tensor_feature.Tensor",
"tensor": {
"dtype": "float64",
"encoding": "none",
"shape": {
"dimensions": [
"3"
]
}
}
}
}
},
"pythonClassName": "tensorflow_datasets.core.features.features_dict.FeaturesDict"
},
"discount": {
"description": "Discount if provided, default to 1.",
"pythonClassName": "tensorflow_datasets.core.features.scalar.Scalar",
"tensor": {
"dtype": "float32",
"encoding": "none",
"shape": {}
}
},
"is_first": {
"description": "True on first step of the episode.",
"pythonClassName": "tensorflow_datasets.core.features.scalar.Scalar",
"tensor": {
"dtype": "bool",
"encoding": "none",
"shape": {}
}
},
"is_last": {
"description": "True on last step of the episode.",
"pythonClassName": "tensorflow_datasets.core.features.scalar.Scalar",
"tensor": {
"dtype": "bool",
"encoding": "none",
"shape": {}
}
},
"is_terminal": {
"description": "True on last step of the episode if it is a terminal step, True for demos.",
"pythonClassName": "tensorflow_datasets.core.features.scalar.Scalar",
"tensor": {
"dtype": "bool",
"encoding": "none",
"shape": {}
}
},
"language_embedding": {
"description": "Kona language embedding. See https://tfhub.dev/google/universal-sentence-encoder-large/5",
"pythonClassName": "tensorflow_datasets.core.features.tensor_feature.Tensor",
"tensor": {
"dtype": "float32",
"encoding": "none",
"shape": {
"dimensions": [
"512"
]
}
}
},
"language_instruction": {
"description": "Language Instruction.",
"pythonClassName": "tensorflow_datasets.core.features.text_feature.Text",
"text": {}
},
"observation": {
"featuresDict": {
"features": {
"applied_force": {
"description": "gripper applied force",
"pythonClassName": "tensorflow_datasets.core.features.tensor_feature.Tensor",
"tensor": {
"dtype": "float64",
"encoding": "none",
"shape": {
"dimensions": [
"1"
]
}
}
},
"cartesian_position": {
"description": "6x end-effector pose (x,y,z,rx,ry,rz relative to base frame)",
"pythonClassName": "tensorflow_datasets.core.features.tensor_feature.Tensor",
"tensor": {
"dtype": "float64",
"encoding": "none",
"shape": {
"dimensions": [
"6"
]
}
}
},
"contact_force": {
"description": "gripper measured contact force",
"pythonClassName": "tensorflow_datasets.core.features.tensor_feature.Tensor",
"tensor": {
"dtype": "float64",
"encoding": "none",
"shape": {
"dimensions": [
"1"
]
}
}
},
"gripper_position": {
"description": "gripper position",
"pythonClassName": "tensorflow_datasets.core.features.tensor_feature.Tensor",
"tensor": {
"dtype": "float64",
"encoding": "none",
"shape": {
"dimensions": [
"1"
]
}
}
},
"image": {
"description": "Main camera RGB observation.",
"image": {
"dtype": "uint8",
"encodingFormat": "jpeg",
"shape": {
"dimensions": [
"480",
"640",
"3"
]
}
},
"pythonClassName": "tensorflow_datasets.core.features.image_feature.Image"
},
"joint_position": {
"description": "UR5 6DoF joint positions (q0, q1, q2, q3, q4, q5)",
"pythonClassName": "tensorflow_datasets.core.features.tensor_feature.Tensor",
"tensor": {
"dtype": "float64",
"encoding": "none",
"shape": {
"dimensions": [
"6"
]
}
}
},
"state": {
"description": "Robot state, consists of [6x robot joint angles, 6x end-effector position (x,y,z,rx,ry,rz relative to base frame), 1x gripper position, 1x gripper applied force, 1x gripper contact force, 1x action_blocked flag].",
"pythonClassName": "tensorflow_datasets.core.features.tensor_feature.Tensor",
"tensor": {
"dtype": "float64",
"encoding": "none",
"shape": {
"dimensions": [
"16"
]
}
}
},
"wrist_image": {
"description": "Wrist camera RGB observation.",
"image": {
"dtype": "uint8",
"encodingFormat": "jpeg",
"shape": {
"dimensions": [
"480",
"640",
"3"
]
}
},
"pythonClassName": "tensorflow_datasets.core.features.image_feature.Image"
}
}
},
"pythonClassName": "tensorflow_datasets.core.features.features_dict.FeaturesDict"
},
"reward": {
"description": "Reward if provided, 1 on final step for demos.",
"pythonClassName": "tensorflow_datasets.core.features.scalar.Scalar",
"tensor": {
"dtype": "float32",
"encoding": "none",
"shape": {}
}
},
"subtask": {
"description": "Language Instruction for subtask.",
"pythonClassName": "tensorflow_datasets.core.features.text_feature.Text",
"text": {}
},
"timestep_pad_mask": {
"description": "False on first step of the episode if context window==2 and for padded steps",
"pythonClassName": "tensorflow_datasets.core.features.scalar.Scalar",
"tensor": {
"dtype": "bool",
"encoding": "none",
"shape": {}
}
}
}
},
"pythonClassName": "tensorflow_datasets.core.features.features_dict.FeaturesDict"
},
"length": "-1"
}
}
}
},
"pythonClassName": "tensorflow_datasets.core.features.features_dict.FeaturesDict"
}