| | --- |
| | license: cc-by-4.0 |
| | task_categories: |
| | - robotics |
| | tags: |
| | - LeRobot |
| | - Robotic manipulation |
| | pretty_name: BridgeData V2 Scripted Demos |
| | size_categories: |
| | - 100K<n<1M |
| | --- |
| | |
| | ## BridgeData V2 Scripted Demos |
| |
|
| | Scripted demonstrations in [BridgeData V2](https://rail-berkeley.github.io/bridgedata/). |
| |
|
| | Ported from raw *scripted_6_18* data at full resolution to LeRobotDataset v3.0 format (0.01 TiB | 0.1k inodes). |
| |
|
| | <div align="center" style="margin: 16px 0;"> |
| | <video controls autoplay loop muted playsinline style="max-width: 100%; border-radius: 10px;"> |
| | <source src="https://huggingface.co/datasets/jnogga/bridge_data_v2_scripted/resolve/main/bridge_example_episode.mp4" type="video/mp4"> |
| | Your browser does not support the video tag. |
| | </video> |
| | </div> |
| | |
| | For the teleoperated trajectories with language annotation, see [jnogga/bridge_data_v2_teleop](https://huggingface.co/datasets/jnogga/bridge_data_v2_teleop). |
| |
|
| | ## Dataset Structure |
| |
|
| | Note that the available cameras vary between episodes. Missing camera perspectives are padded, and the corresponding *_available* sample fields serve as a mask. |
| |
|
| | [meta/info.json](meta/info.json): |
| | ```json |
| | { |
| | "codebase_version": "v3.0", |
| | "robot_type": "widow_x", |
| | "fps": 5, |
| | "data_files_size_in_mb": 100.0, |
| | "video_files_size_in_mb": 200.0, |
| | "chunks_size": 1000, |
| | "total_episodes": 9701, |
| | "total_frames": 456260, |
| | "total_tasks": 9701, |
| | "splits": { |
| | "train": "0:9701" |
| | }, |
| | "data_path": "data/chunk-{chunk_index:03d}/file_{file_index:03d}.parquet", |
| | "video_path": "videos/{video_key}/chunk-{chunk_index:03d}/file_{file_index:03d}.mp4", |
| | "features": { |
| | "action.cartesian": { |
| | "dtype": "float32", |
| | "shape": [ |
| | 7 |
| | ], |
| | "names": [ |
| | "position.x", |
| | "position.y", |
| | "position.z", |
| | "quaternion.w", |
| | "quaternion.x", |
| | "quaternion.y", |
| | "quaternion.z" |
| | ], |
| | "fps": 5 |
| | }, |
| | "action.gripper_position": { |
| | "dtype": "float32", |
| | "shape": [ |
| | 1 |
| | ], |
| | "names": null, |
| | "fps": 5 |
| | }, |
| | "observation.cartesian": { |
| | "dtype": "float32", |
| | "shape": [ |
| | 7 |
| | ], |
| | "names": [ |
| | "position.x", |
| | "position.y", |
| | "position.z", |
| | "quaternion.w", |
| | "quaternion.x", |
| | "quaternion.y", |
| | "quaternion.z" |
| | ], |
| | "fps": 5 |
| | }, |
| | "observation.gripper_position": { |
| | "dtype": "float32", |
| | "shape": [ |
| | 1 |
| | ], |
| | "names": null, |
| | "fps": 5 |
| | }, |
| | "observation.eef_transform": { |
| | "dtype": "float32", |
| | "shape": [ |
| | 7 |
| | ], |
| | "names": [ |
| | "position.x", |
| | "position.y", |
| | "position.z", |
| | "quaternion.w", |
| | "quaternion.x", |
| | "quaternion.y", |
| | "quaternion.z" |
| | ], |
| | "fps": 5 |
| | }, |
| | "observation.joint_position": { |
| | "dtype": "float32", |
| | "shape": [ |
| | 6 |
| | ], |
| | "names": [ |
| | "joint_0", |
| | "joint_1", |
| | "joint_2", |
| | "joint_3", |
| | "joint_4", |
| | "joint_5" |
| | ], |
| | "fps": 5 |
| | }, |
| | "observation.joint_velocity": { |
| | "dtype": "float32", |
| | "shape": [ |
| | 6 |
| | ], |
| | "names": [ |
| | "joint_0", |
| | "joint_1", |
| | "joint_2", |
| | "joint_3", |
| | "joint_4", |
| | "joint_5" |
| | ], |
| | "fps": 5 |
| | }, |
| | "frame_index": { |
| | "dtype": "int64", |
| | "shape": [ |
| | 1 |
| | ], |
| | "names": null, |
| | "fps": 5 |
| | }, |
| | "timestamp": { |
| | "dtype": "float32", |
| | "shape": [ |
| | 1 |
| | ], |
| | "names": null, |
| | "fps": 5 |
| | }, |
| | "index": { |
| | "dtype": "int64", |
| | "shape": [ |
| | 1 |
| | ], |
| | "names": null, |
| | "fps": 5 |
| | }, |
| | "task_index": { |
| | "dtype": "int64", |
| | "shape": [ |
| | 1 |
| | ], |
| | "names": null, |
| | "fps": 5 |
| | }, |
| | "episode_index": { |
| | "dtype": "int64", |
| | "shape": [ |
| | 1 |
| | ], |
| | "names": null, |
| | "fps": 5 |
| | }, |
| | "observation.images.camera_0_available": { |
| | "dtype": "bool", |
| | "shape": [ |
| | 1 |
| | ], |
| | "names": null, |
| | "fps": 5 |
| | }, |
| | "observation.images.camera_1_available": { |
| | "dtype": "bool", |
| | "shape": [ |
| | 1 |
| | ], |
| | "names": null, |
| | "fps": 5 |
| | }, |
| | "observation.images.camera_2_available": { |
| | "dtype": "bool", |
| | "shape": [ |
| | 1 |
| | ], |
| | "names": null, |
| | "fps": 5 |
| | }, |
| | "observation.images.camera_3_available": { |
| | "dtype": "bool", |
| | "shape": [ |
| | 1 |
| | ], |
| | "names": null, |
| | "fps": 5 |
| | }, |
| | "observation.images.camera_4_available": { |
| | "dtype": "bool", |
| | "shape": [ |
| | 1 |
| | ], |
| | "names": null, |
| | "fps": 5 |
| | }, |
| | "observation.images.camera_0": { |
| | "dtype": "video", |
| | "shape": [ |
| | 480, |
| | 640, |
| | 3 |
| | ], |
| | "names": [ |
| | "height", |
| | "width", |
| | "channel" |
| | ], |
| | "info": { |
| | "video.height": 480, |
| | "video.width": 640, |
| | "video.codec": "h264", |
| | "video.pix_fmt": "yuv420p", |
| | "video.is_depth_map": false, |
| | "video.fps": 5, |
| | "video.channels": 3, |
| | "has_audio": false |
| | }, |
| | "fps": 5 |
| | }, |
| | "observation.images.camera_1": { |
| | "dtype": "video", |
| | "shape": [ |
| | 480, |
| | 640, |
| | 3 |
| | ], |
| | "names": [ |
| | "height", |
| | "width", |
| | "channel" |
| | ], |
| | "info": { |
| | "video.height": 480, |
| | "video.width": 640, |
| | "video.codec": "h264", |
| | "video.pix_fmt": "yuv420p", |
| | "video.is_depth_map": false, |
| | "video.fps": 5, |
| | "video.channels": 3, |
| | "has_audio": false |
| | }, |
| | "fps": 5 |
| | }, |
| | "observation.images.camera_2": { |
| | "dtype": "video", |
| | "shape": [ |
| | 480, |
| | 640, |
| | 3 |
| | ], |
| | "names": [ |
| | "height", |
| | "width", |
| | "channel" |
| | ], |
| | "info": { |
| | "video.height": 480, |
| | "video.width": 640, |
| | "video.codec": "h264", |
| | "video.pix_fmt": "yuv420p", |
| | "video.is_depth_map": false, |
| | "video.fps": 5, |
| | "video.channels": 3, |
| | "has_audio": false |
| | }, |
| | "fps": 5 |
| | }, |
| | "observation.images.camera_3": { |
| | "dtype": "video", |
| | "shape": [ |
| | 480, |
| | 640, |
| | 3 |
| | ], |
| | "names": [ |
| | "height", |
| | "width", |
| | "channel" |
| | ], |
| | "info": { |
| | "video.height": 480, |
| | "video.width": 640, |
| | "video.codec": "h264", |
| | "video.pix_fmt": "yuv420p", |
| | "video.is_depth_map": false, |
| | "video.fps": 5, |
| | "video.channels": 3, |
| | "has_audio": false |
| | }, |
| | "fps": 5 |
| | }, |
| | "observation.images.camera_4": { |
| | "dtype": "video", |
| | "shape": [ |
| | 480, |
| | 640, |
| | 3 |
| | ], |
| | "names": [ |
| | "height", |
| | "width", |
| | "channel" |
| | ], |
| | "info": { |
| | "video.height": 480, |
| | "video.width": 640, |
| | "video.codec": "h264", |
| | "video.pix_fmt": "yuv420p", |
| | "video.is_depth_map": false, |
| | "video.fps": 5, |
| | "video.channels": 3, |
| | "has_audio": false |
| | }, |
| | "fps": 5 |
| | } |
| | } |
| | } |
| | ``` |
| |
|
| | ## Getting started |
| |
|
| | ```py |
| | # pip install lerobot |
| | from lerobot.datasets.lerobot_dataset import LeRobotDataset |
| | |
| | dataset = LeRobotDataset("jnogga/bridge_data_v2_scripted") |
| | ``` |
| |
|
| | See [bridge_example.ipynb](bridge_example.ipynb) for a more detailed example. |
| |
|
| | ## Citation |
| |
|
| | All credit goes to the original authors of BridgeData V2. If you find their work helpful, please cite |
| |
|
| | **BibTeX:** |
| |
|
| | ```bibtex |
| | @inproceedings{walke2023bridgedata, |
| | title={BridgeData V2: A Dataset for Robot Learning at Scale}, |
| | author={Walke, Homer and Black, Kevin and Lee, Abraham and Kim, Moo Jin and Du, Max and Zheng, Chongyi and Zhao, Tony and Hansen-Estruch, Philippe and Vuong, Quan and He, Andre and Myers, Vivek and Fang, Kuan and Finn, Chelsea and Levine, Sergey}, |
| | booktitle={Conference on Robot Learning (CoRL)}, |
| | year={2023} |
| | } |
| | ``` |