Daniel Ordonez commited on
Commit
bb8ab62
·
1 Parent(s): 364dc60
.gitignore ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.pyc
2
+ *.__pycache__
3
+ /.idea
4
+ /.empl_cache/
5
+ /_tests/
6
+ /launch/
7
+ /__pycache__/
8
+ /.pytest_cache/
9
+ output/
10
+ *numpy_*
11
+ *.DS_Store
12
+ .ipynb_checkpoints/
13
+ .vscode/
14
+
data/test/aliengo/proprioceptive_data_ep=20_steps=1999.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8fa6e42148b81109b35317980f89bcb983478b6c70742c6d152198223ca1eaf9
3
+ size 440515032
data/test/aliengo/proprioceptive_data_ep=20_steps=1999_test.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8fa6e42148b81109b35317980f89bcb983478b6c70742c6d152198223ca1eaf9
3
+ size 440515032
data/test/aliengo/proprioceptive_data_ep=20_steps=1999_val.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8fa6e42148b81109b35317980f89bcb983478b6c70742c6d152198223ca1eaf9
3
+ size 440515032
proprioceptive_dataset.py ADDED
@@ -0,0 +1,257 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Created by Daniel Ordoñez (daniels.ordonez@gmail.com) at 17/02/25
2
+ from os import PathLike
3
+ from pathlib import Path
4
+
5
+ import numpy as np
6
+ import torch
7
+ from gym_quadruped.utils.data.h5py import H5Reader
8
+ from torch.utils.data import Dataset
9
+
10
+
11
+ class ProprioceptiveDataset(Dataset):
12
+ """Dataset for classification/regression tasks using proprioceptive data.
13
+
14
+ Args:
15
+ data_file: (Path) Path to the HDF5 file containing the data to be read by a gym_quadruped.utils.data.h5_dataset.H5Reader.
16
+ Dataset is assumed to be composed of observations queried by name and of shape (n_time_frames, n_features).
17
+ x_obs_names: (list[str]) List of the names of the observations to be used as input features.
18
+ y_obs_names: (list[str]) List of the names of the observations to be used as output features.
19
+ x_frames: (int) Number of time frames to be used as input features.
20
+ y_frames: (int) Number of time frames to be used as output features.
21
+ mode: (str) If "dynamic" x and y are of the form `x = [t, t-1, ..., t-x_frames]` and `y = [t+1, t+2, ..., t+y_frames]`.
22
+ If "static" x and y are of the form `x = [t-x_frames, ...,t-1, t]` and `y = [t-y_frames, ...,t-1, t]`.
23
+ load_to_memory: (bool) If True, the dataset is loaded to memory for faster access.
24
+ dtype: (torch.dtype) Data type of the dataset.
25
+ device: (torch.device) Device to load the dataset to.
26
+ """
27
+
28
+ def __init__(
29
+ self,
30
+ data_file: PathLike,
31
+ x_obs_names,
32
+ y_obs_names,
33
+ x_frames: int = 1,
34
+ y_frames: int = 1,
35
+ mode="static", # "static" | "dynamic"
36
+ load_to_memory=False,
37
+ dtype=torch.float32,
38
+ device=None,
39
+ ):
40
+ assert x_frames > 0 and y_frames > 0, "X and Y need to be composed of at least one frame."
41
+ self.x_frames, self.y_frames = x_frames, y_frames
42
+ # Load the Gym Quadruped dataset.
43
+ self.h5file = H5Reader(data_file)
44
+ for obs_name in x_obs_names + y_obs_names:
45
+ assert obs_name in self.h5file.recordings.keys(), (
46
+ f"Observation {obs_name} not in {self.h5file.recordings.keys()}"
47
+ )
48
+
49
+ self.x_obs_names, self.y_obs_names = x_obs_names, y_obs_names
50
+ self.device = device # Device to load the dataset to
51
+ self.dtype = dtype
52
+ self.mean_vars = {} # Mean and variance of each observation in the dataset
53
+
54
+ self._mode = mode
55
+ self._load_to_memory = load_to_memory # Load dataset to RAM / Device
56
+ self._n_samples = None
57
+ self._traj_lengths = {} # Dataset can be composed of trajectories/episodes of different lengths
58
+ self._indices = [] # Indices of samples in the raw_data
59
+
60
+ self.compute_sample_indices()
61
+ self._memory_data = None
62
+ if self._load_to_memory:
63
+ self._load_dataset_to_memory()
64
+
65
+ def compute_sample_indices(self):
66
+ """Compute the indices of the samples in the dataset.
67
+
68
+ Dataset is composed of trajectories of shape (n_time_frames, n_features).
69
+ The indices are tuples (traj_id, slice_idx) where slice_idx is a slice object indicating the start and end of
70
+ the sample indices in time for the trajectory with id traj_id.
71
+ """
72
+ tmp_obs_name = self.x_obs_names[0]
73
+ if self._mode == "static":
74
+ context_length = max(self.x_frames, self.y_frames) #
75
+ elif self._mode == "dynamic":
76
+ context_length = self.x_frames + self.y_frames
77
+ else:
78
+ raise ValueError(f"Mode {self._mode} not supported. Choose 'static' or 'dynamic'.")
79
+
80
+ for traj_id in range(self.h5file.n_trajectories):
81
+ traj_len = self.h5file.recordings[tmp_obs_name][traj_id].shape[0]
82
+ traj_slices = self._slices_from_traj_len(traj_len, context_length, time_lag=1)
83
+ self._indices.extend([(traj_id, s) for s in traj_slices])
84
+ self._traj_lengths[traj_id] = traj_len
85
+
86
+ for obs_name in self.x_obs_names + self.y_obs_names:
87
+ assert self.h5file.recordings[obs_name][traj_id].shape[0] == traj_len, (
88
+ f"Obs {tmp_obs_name} and {obs_name} have different time dimensions for trajectory {traj_id}."
89
+ )
90
+
91
+ @property
92
+ def n_trajectories(self):
93
+ """Returns the number of trajectories in the dataset."""
94
+ return len(self._traj_lengths)
95
+
96
+ @property
97
+ def raw_data(self):
98
+ """Returns the raw data contained in the dataset."""
99
+ if self._load_to_memory:
100
+ return self._memory_data
101
+ else:
102
+ return self.h5file.recordings
103
+
104
+ def _load_dataset_to_memory(self):
105
+ """Loads the dataset to memory for faster access."""
106
+ self._memory_data = {}
107
+ for obs_name in self.x_obs_names + self.y_obs_names:
108
+ obs_data = [] # Trajectories might have different lengths
109
+ for traj_id in range(self.h5file.n_trajectories):
110
+ traj_data = self.h5file.recordings[obs_name][traj_id]
111
+ obs_data.append(torch.tensor(traj_data).to(device=self.device, dtype=self.dtype))
112
+ self._memory_data[obs_name] = obs_data
113
+
114
+ def shuffle(self, seed=None):
115
+ """Shuffles the dataset."""
116
+ if seed is not None:
117
+ np.random.seed(seed)
118
+ np.random.shuffle(self._indices)
119
+
120
+ def __getitem__(self, idx):
121
+ """Return x in the past and y in the future for the idx-th sample in the dataset.
122
+
123
+ Args:
124
+ idx: (int) Index of the sample in the dataset.
125
+
126
+ Returns:
127
+ x_obs: (dict[str, ArrayLike]) input observations with shape (x_frames, obs_dim) per observation name in `x_obs_names`.
128
+ y_obs: (dict[str, ArrayLike]) output observations with shape (y_frames, obd_dim) per observation name in `y_obs_names`.
129
+ """
130
+ traj_idx, window_slice = self._indices[idx]
131
+ if self._mode == "static":
132
+ x_slice = slice(-self.x_frames, None)
133
+ y_slice = slice(-self.y_frames, None)
134
+ elif self._mode == "dynamic":
135
+ x_slice = slice(0, self.x_frames)
136
+ y_slice = slice(-self.y_frames, None)
137
+
138
+ x_obs, y_obs = {}, {}
139
+ for obs_name in self.x_obs_names: # X is composed of the first x_frames observations
140
+ x_obs[obs_name] = self.raw_data[obs_name][traj_idx][window_slice][x_slice]
141
+ for obs_name in self.y_obs_names: # Y is composed of the last y_frames observations
142
+ y_obs[obs_name] = self.raw_data[obs_name][traj_idx][window_slice][y_slice]
143
+
144
+ return x_obs, y_obs
145
+
146
+ def compute_obs_moments(self, obs_reps: dict = None):
147
+ """Computes the mean and variance for each observation in x_obs_names and y_obs_names."""
148
+ for obs_name in self.x_obs_names + self.y_obs_names:
149
+ trajs = [self.h5file.recordings[obs_name][traj_id] for traj_id in self._traj_lengths.keys()]
150
+ obs_data = np.concatenate(trajs, axis=0)
151
+ if obs_reps is not None:
152
+ from iekf_ms.utils.symmetric_stats import symmetric_moments
153
+
154
+ obs_mean, obs_var = symmetric_moments(torch.tensor(obs_data), obs_reps[obs_name])
155
+ else:
156
+ obs_mean = np.mean(obs_data, axis=0)
157
+ obs_var = np.var(obs_data, axis=0)
158
+ self.mean_vars[obs_name] = (obs_mean, obs_var)
159
+
160
+ def subset_dataset(self, trajectory_ids) -> "ProprioceptiveDataset":
161
+ """Creates a subset of the dataset containing only the specified trajectories."""
162
+ assert len(trajectory_ids) > 0, "Trajectory ids must be a non-empty list."
163
+
164
+ subset = ProprioceptiveDataset(
165
+ self.h5file.file_path,
166
+ self.x_obs_names,
167
+ self.y_obs_names,
168
+ self.x_frames,
169
+ self.y_frames,
170
+ mode=self._mode,
171
+ load_to_memory=self._load_to_memory,
172
+ dtype=self.dtype,
173
+ device=self.device,
174
+ )
175
+
176
+ # Filter indices and trajectory lengths
177
+ subset._indices = [idx for idx in self._indices if idx[0] in trajectory_ids]
178
+ for i in range(self.h5file.n_trajectories):
179
+ if i not in trajectory_ids:
180
+ subset._traj_lengths.pop(i)
181
+
182
+ return subset
183
+
184
+ def __len__(self):
185
+ return len(self._indices)
186
+
187
+ @staticmethod
188
+ def _slices_from_traj_len(time_horizon: int, context_length: int, time_lag: int) -> list[slice]:
189
+ """Returns the list of slices (start_time_idx, end_time_idx) for each context window in the trajectory.
190
+
191
+ Args:
192
+ time_horizon: (int) Number time-frames of the trajectory.
193
+ context_length: (int) Number of time-frames per context window
194
+ time_lag: (int) Time lag between successive context windows.
195
+
196
+ Returns:
197
+ list[slice]: List of slices for each context window.
198
+
199
+ Examples:
200
+ --------
201
+ >>> time_horizon, context_length, time_lag = 10, 4, 2
202
+ >>> slices = TimeSeriesDataset._slices_from_traj_len(time_horizon, context_length, time_lag)
203
+ >>> for s in slices:
204
+ ... print(f"start: {s.start}, end: {s.stop}")
205
+ start: 0, end: 4
206
+ start: 2, end: 6
207
+ start: 4, end: 8
208
+ start: 6, end: 10
209
+
210
+ """
211
+ slices = []
212
+ for start in range(0, time_horizon - context_length + 1, time_lag):
213
+ end = start + context_length
214
+ slices.append(slice(start, end))
215
+
216
+ return slices
217
+
218
+ def __repr__(self):
219
+ return f"{len(self._traj_lengths)} trajectories and {len(self)} total samples."
220
+
221
+
222
+ if __name__ == "__main__":
223
+ data_path = Path("aliengo/proprioceptive_data_ep=20_steps=1999.h5").absolute()
224
+
225
+ dataset = ProprioceptiveDataset(
226
+ data_path,
227
+ x_obs_names=["qpos_js", "qvel_js"],
228
+ y_obs_names=["imu_acc", "imu_gyro"],
229
+ x_frames=10,
230
+ y_frames=1,
231
+ mode="static",
232
+ )
233
+ print(len(dataset))
234
+ for i in range(10):
235
+ x, y = dataset[i]
236
+ for obs_name, obs_val in x.items():
237
+ print(f"X: {obs_name}: {np.asarray(obs_val).shape}")
238
+ for obs_name, obs_val in y.items():
239
+ print(f"Y: {obs_name}: {np.asarray(obs_val).shape}")
240
+
241
+ # _______________
242
+
243
+ dataset = ProprioceptiveDataset(
244
+ data_path,
245
+ x_obs_names=["qpos_js", "qvel_js"],
246
+ y_obs_names=["imu_acc", "imu_gyro"],
247
+ x_frames=10,
248
+ y_frames=5,
249
+ mode="dynamic",
250
+ )
251
+ print(len(dataset))
252
+ for i in range(10):
253
+ x, y = dataset[i]
254
+ for obs_name, obs_val in x.items():
255
+ print(f"X: {obs_name}: {np.asarray(obs_val).shape}")
256
+ for obs_name, obs_val in y.items():
257
+ print(f"Y: {obs_name}: {np.asarray(obs_val).shape}")
quadruped_locomotion.py ADDED
@@ -0,0 +1,155 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # TODO: Address all TODOs and remove all explanatory comments
15
+ """TODO: Add a description here."""
16
+
17
+ import csv
18
+ import json
19
+ import os
20
+ from typing import Optional, Union
21
+
22
+ import datasets
23
+ from datasets import (Array2D, Dataset, DatasetDict, DownloadConfig, DownloadManager, DownloadMode, Split,
24
+ VerificationMode)
25
+
26
+ from proprioceptive_dataset import ProprioceptiveDataset
27
+
28
+ # TODO: Add BibTeX citation
29
+ # Find for instance the citation on arxiv or on the dataset repo/website
30
+ _CITATION = """\
31
+ @InProceedings{huggingface:dataset,
32
+ title = {A great new dataset},
33
+ author={huggingface, Inc.
34
+ },
35
+ year={2020}
36
+ }
37
+ """
38
+
39
+ # TODO: Add description of the dataset here
40
+ # You can copy an official description
41
+ _DESCRIPTION = """\
42
+ This new dataset is designed to solve this great NLP task and is crafted with a lot of care.
43
+ """
44
+
45
+ # TODO: Add a link to an official homepage for the dataset here
46
+ _HOMEPAGE = ""
47
+
48
+ # TODO: Add the licence for the dataset here if you can find it
49
+ _LICENSE = ""
50
+
51
+ # TODO: Add link to the official dataset URLs here
52
+ # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
53
+ # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
54
+ _URLS = {
55
+ "aliengo": {
56
+ "train": "data/test/aliengo/proprioceptive_data_ep=20_steps=1999.h5",
57
+ "val": "data/test/aliengo/proprioceptive_data_ep=20_steps=1999_val.h5",
58
+ "test": "data/test/aliengo/proprioceptive_data_ep=20_steps=1999_test.h5",
59
+ },
60
+ }
61
+
62
+ class QuadrupedConfig(datasets.BuilderConfig):
63
+
64
+ def __init__(self,
65
+ robot_name: str,
66
+ obs_names: list[str,... ] = None,
67
+ **kwargs):
68
+ self.robot_name = robot_name
69
+ self.obs_names = obs_names
70
+ super(QuadrupedConfig, self).__init__(**kwargs)
71
+
72
+
73
+ class QuadrupedLocomotion(datasets.DatasetBuilder):
74
+ """Dataset of proprioceptive and exteroceptive sensor data during legged locomotion of diverse quadrupeds."""
75
+
76
+ VERSION = datasets.Version("0.0.1")
77
+
78
+ # You will be able to load one or the other configurations in the following list with
79
+ # data = datasets.load_dataset('my_dataset', 'first_domain')
80
+ # data = datasets.load_dataset('my_dataset', 'second_domain')
81
+ BUILDER_CONFIGS = [
82
+ QuadrupedConfig(robot_name="aliengo",
83
+ description="Aliengo trotting dataset"),
84
+ # QuadrupedConfig(robot_name="aliengo_all-terrain_trotting", description="Aliengo trotting dataset"),
85
+ ]
86
+
87
+ DEFAULT_CONFIG_NAME = "first_domain" # It's not mandatory to have a default configuration. Just use one if it make sense.
88
+
89
+ def _info(self):
90
+
91
+ full_obs = {
92
+ "time": Array2D(dtype="float32", shape=(None, 1)),
93
+ "qpos": Array2D(dtype="float32", shape=(None, None)),
94
+ "qvel": Array2D(dtype="float32", shape=(None, None)),
95
+ "qpos_js": Array2D(dtype="float32", shape=(None, None)),
96
+ "qvel_js": Array2D(dtype="float32", shape=(None, None)),
97
+ "base_pos": Array2D(dtype="float32", shape=(None, 3)),
98
+ "base_lin_vel": Array2D(dtype="float32", shape=(None, 3)),
99
+ "base_lin_vel_err": Array2D(dtype="float32", shape=(None, 3)),
100
+ "base_lin_acc": Array2D(dtype="float32", shape=(None, 3)),
101
+ "base_ang_vel": Array2D(dtype="float32", shape=(None, 3)),
102
+ "base_ang_vel_err": Array2D(dtype="float32", shape=(None, 3)),
103
+ "base_ori_euler_xyz": Array2D(dtype="float32", shape=(None, 3)),
104
+ "base_ori_quat_wxyz": Array2D(dtype="float32", shape=(None, 4)),
105
+ "base_ori_SO3": Array2D(dtype="float32", shape=(None, 9)),
106
+ "base_lin_vel:base": Array2D(dtype="float32", shape=(None, 3)),
107
+ "base_lin_vel_err:base": Array2D(dtype="float32", shape=(None, 3)),
108
+ "base_lin_acc:base": Array2D(dtype="float32", shape=(None, 3)),
109
+ "base_ang_vel:base": Array2D(dtype="float32", shape=(None, 3)),
110
+ "feet_pos": Array2D(dtype="float32", shape=(None, 12)),
111
+ "feet_pos:base": Array2D(dtype="float32", shape=(None, 12)),
112
+ "feet_vel": Array2D(dtype="float32", shape=(None, 12)),
113
+ "feet_vel:base": Array2D(dtype="float32", shape=(None, 12)),
114
+ "contact_state": Array2D(dtype="float32", shape=(None, 4)),
115
+ "contact_forces": Array2D(dtype="float32", shape=(None, 12)),
116
+ "contact_forces:base": Array2D(dtype="float32", shape=(None, 12)),
117
+ "tau_ctrl_setpoint": Array2D(dtype="float32", shape=(None, 12)),
118
+ "gravity_vector": Array2D(dtype="float32", shape=(None, 3)),
119
+ # IMU
120
+ # TODO: fill all.
121
+ }
122
+
123
+ requested_obs = {obs_name: full_obs[obs_name] for obs_name in self.config.obs_names}
124
+ features = datasets.Features(requested_obs)
125
+
126
+ return datasets.DatasetInfo(
127
+ description=_DESCRIPTION,
128
+ features=features,
129
+ # supervised_keys=("sentence", "label"), They'll be used if as_supervised=True in builder.as_dataset.
130
+ # Homepage of the dataset for documentation
131
+ homepage=_HOMEPAGE,
132
+ # License for the dataset if available
133
+ license=_LICENSE,
134
+ # Citation for the dataset
135
+ citation=_CITATION,
136
+ )
137
+
138
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> list[datasets.SplitGenerator]:
139
+ urls_to_download = self._URLS
140
+ downloaded_files = dl_manager.download_and_extract(urls_to_download)
141
+
142
+ loaders = {}
143
+ for k, path in downloaded_files:
144
+ loaders[k] = ProprioceptiveDataset(
145
+ data_file=downloaded_files["train"],
146
+ x_obs_names=["qpos", "qvel"],
147
+ y_obs_names=["qpos_js", "qvel_js"],
148
+ load_to_memory=True,
149
+ )
150
+ return [
151
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"data": loaders['train']}),
152
+ datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"data": loaders['val']}),
153
+ datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"data": loaders['test']}),
154
+ ]
155
+