vovantuan commited on
Commit
ffa722f
·
verified ·
1 Parent(s): 097f68c

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
action_tokenizer.py ADDED
@@ -0,0 +1,431 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ action_tokenizer.py
3
+
4
+ Extension class; wraps base LLM/VLM tokenizer with logic to discretize and tokenize continuous robot actions.
5
+ """
6
+ from typing import List, Union, Dict, Optional
7
+ import numpy as np
8
+ from transformers import PreTrainedTokenizerBase
9
+ from scipy.stats import norm
10
+ import torch
11
+
12
+ ACTION_TOKEN = '<ACTION{:05d}>'
13
+
14
+ class ActionTokenizer:
15
+ def __init__(
16
+ self,
17
+ tokenizer: PreTrainedTokenizerBase,
18
+ num_bins: int = 256,
19
+ min_action: int = -1,
20
+ max_action: int = 1,
21
+ ):
22
+ self._vocab_size = num_bins
23
+ self.tokenizer = tokenizer
24
+ self.min_action, self.max_action = min_action, max_action
25
+ self.bin_centers = np.linspace(min_action, max_action, num_bins)
26
+
27
+ # add special action tokens to language tokenizer
28
+ token_list = [ACTION_TOKEN.format(i) for i in range(self._vocab_size)]
29
+ self.token_array = np.array(token_list)
30
+
31
+ num_new_tokens = self.tokenizer.add_tokens(token_list, special_tokens=True)
32
+ print(f"Add {num_new_tokens} TRANSLATION TOKENS, tokenizer vocab size {self.tokenizer.vocab_size} / {len(tokenizer)}")
33
+
34
+ self.action_token_begin_idx = self.token_start_idx = self.tokenizer.convert_tokens_to_ids(self.token_array[0])
35
+ self.token_end_idx = self.tokenizer.convert_tokens_to_ids(self.token_array[-1])
36
+
37
+ def __call__(self, action: np.ndarray) -> List[str]:
38
+ """Discretize continuous actions to tokens.
39
+ action: np.ndarray, (n, 7), continuous actions in Cartesian or Spherical coordinates.
40
+ return: np.ndarray, (n, 7), tokens.
41
+ """
42
+ action = np.clip(action, a_min=float(self.min_action), a_max=float(self.max_action))
43
+ ids = np.digitize(action, self.bin_centers, right=True) # [0, 255]
44
+ return self.token_array[ids]
45
+
46
+ def decode_token_ids_to_actions(self, action_token_id: np.ndarray) -> np.ndarray:
47
+ """decode token ids to continuous actions.
48
+ action_token_id: np.ndarray, (n, 7), token ids.
49
+ return: np.ndarray, (n, 7), continuous actions
50
+ """
51
+ ids = action_token_id - self.action_token_begin_idx
52
+ ids = np.clip(ids, a_min=0, a_max=self._vocab_size - 1)
53
+ return self.bin_centers[ids]
54
+
55
+ @property
56
+ def vocab_size(self) -> int:
57
+ return self._vocab_size
58
+
59
+ class TranslationTokenizer:
60
+ def __init__(
61
+ self,
62
+ tokenizer: PreTrainedTokenizerBase,
63
+ num_bins: Dict,
64
+ bin_policy: Optional[Dict] = None,
65
+ use_spherical: bool = True,
66
+ ):
67
+ self.tokenizer = tokenizer
68
+ self.num_theta_bins = num_bins["theta_bins"]
69
+ self.num_phi_bins = num_bins["phi_bins"]
70
+ self.num_r_bins = num_bins["r_bins"]
71
+ self.use_spherical = use_spherical
72
+
73
+ # for indexing
74
+ self.NP = self.num_phi_bins * self.num_r_bins
75
+
76
+ # add special action tokens to language tokenizer
77
+ self._vocab_size = self.num_theta_bins * self.num_phi_bins * self.num_r_bins
78
+ token_list = [ACTION_TOKEN.format(i) for i in range(self._vocab_size)]
79
+ self.token_array = np.array(token_list)
80
+
81
+ num_new_tokens = self.tokenizer.add_tokens(token_list, special_tokens=True)
82
+ print(f"Add {num_new_tokens} TRANSLATION TOKENS, tokenizer vocab size {self.tokenizer.vocab_size} / {len(tokenizer)}")
83
+
84
+ self.token_start_idx = self.tokenizer.convert_tokens_to_ids(self.token_array[0])
85
+ self.token_end_idx = self.tokenizer.convert_tokens_to_ids(self.token_array[-1])
86
+ self.set_bins(bin_policy)
87
+
88
+ def set_bins(self, bin_policy):
89
+ self.theta_bins = np.array(bin_policy["theta_bins"])
90
+ self.phi_bins = np.array(bin_policy["phi_bins"])
91
+ self.r_bins = np.array(bin_policy["r_bins"])
92
+
93
+ def cartesian_to_spherical(self, x, y, z):
94
+ theta = np.arctan2(np.sqrt(x**2 + y**2), z) # polar angle
95
+ phi = np.arctan2(y, x) # azimuthal angle
96
+ r = np.sqrt(x**2 + y**2 + z**2)
97
+ return theta, phi, r
98
+
99
+ def spherical_to_cartesian(self, theta, phi, r):
100
+ x = r * np.sin(theta) * np.cos(phi)
101
+ y = r * np.sin(theta) * np.sin(phi)
102
+ z = r * np.cos(theta)
103
+ return x, y, z
104
+
105
+ def __call__(self, action: np.ndarray) -> List[str]:
106
+ """Discretize continuous actions to tokens.
107
+ action: np.ndarray, (n, 3), continuous actions in Cartesian or Spherical coordinates.
108
+ return: np.ndarray, (n,), tokens.
109
+ """
110
+ if self.use_spherical:
111
+ theta, phi, r = self.cartesian_to_spherical(action[:, 0], action[:, 1], action[:, 2])
112
+ else:
113
+ theta, phi, r = action[:, 0], action[:, 1], action[:, 2]
114
+
115
+ disc_theta = np.digitize(theta, self.theta_bins[1:-1]) # b
116
+ disc_phi = np.digitize(phi, self.phi_bins[1:-1])
117
+ disc_r = np.digitize(r, self.r_bins[1:-1])
118
+ ids = disc_theta * self.NP + disc_phi * self.num_r_bins + disc_r
119
+ return self.token_array[ids]
120
+
121
+ def decode_token_ids_to_actions(self, action_token_id: np.ndarray) -> np.ndarray:
122
+ """decode token ids to continuous actions.
123
+ action_token_id: np.ndarray, (n,), token ids.
124
+ return: np.ndarray, (n, 3), continuous actions
125
+ """
126
+ action_token_id = np.clip(action_token_id, self.token_start_idx, self.token_end_idx)
127
+ ids = action_token_id - self.token_start_idx
128
+ disc_theta, disc_phi, disc_r = ids // self.NP, (ids % self.NP) // self.num_r_bins, ids % self.num_r_bins
129
+
130
+ theta = 0.5 * (self.theta_bins[disc_theta] + self.theta_bins[disc_theta + 1])
131
+ phi = 0.5 * (self.phi_bins[disc_phi] + self.phi_bins[disc_phi + 1])
132
+ r = 0.5 * (self.r_bins[disc_r] + self.r_bins[disc_r + 1])
133
+
134
+ # clip action to [-1, 1], due to the spherical coordinate action space is the circumscribed sphere of the Cartesian action space.
135
+ x, y, z = self.spherical_to_cartesian(theta, phi, r) if self.use_spherical else (theta, phi, r)
136
+ x, y, z = np.clip([x, y, z], -1, 1)
137
+ return np.stack((x, y, z), axis=1)
138
+
139
+ @property
140
+ def vocab_size(self) -> int:
141
+ return self._vocab_size
142
+
143
+ class RotationTokenizer:
144
+ def __init__(
145
+ self,
146
+ tokenizer: PreTrainedTokenizerBase,
147
+ num_bins: Dict,
148
+ bin_policy: Optional[Dict] = None,
149
+ array_begin_idx=None,
150
+ ):
151
+ self.tokenizer = tokenizer
152
+ self.num_roll_bins = num_bins["roll_bins"] # M
153
+ self.num_pitch_bins = num_bins["pitch_bins"] # N
154
+ self.num_yaw_bins = num_bins["yaw_bins"] # P
155
+ self.array_begin_idx = array_begin_idx
156
+
157
+ # for indexing
158
+ self.NP = self.num_pitch_bins * self.num_yaw_bins
159
+
160
+ # add special action tokens to language tokenizer
161
+ self._vocab_size = self.num_roll_bins * self.num_pitch_bins * self.num_yaw_bins
162
+ token_list = [ACTION_TOKEN.format(i + self.array_begin_idx) for i in range(self._vocab_size)]
163
+ self.token_array = np.array(token_list)
164
+
165
+ num_new_tokens = self.tokenizer.add_tokens(token_list, special_tokens=True)
166
+ print(f"Add {num_new_tokens} ROTATION TOKENS to tokenizer, tokenizer vocab size {self.tokenizer.vocab_size} / {len(tokenizer)}")
167
+
168
+ self.token_start_idx = self.tokenizer.convert_tokens_to_ids(self.token_array[0])
169
+ self.token_end_idx = self.tokenizer.convert_tokens_to_ids(self.token_array[-1])
170
+ self.set_bins(bin_policy)
171
+
172
+ def set_bins(self, bin_policy):
173
+ self.roll_bins = np.array(bin_policy["roll_bins"])
174
+ self.pitch_bins = np.array(bin_policy["pitch_bins"])
175
+ self.yaw_bins = np.array(bin_policy["yaw_bins"])
176
+
177
+ def __call__(self, action: np.ndarray) -> List[str]:
178
+ """Discretize continuous actions to tokens.
179
+ action: np.ndarray, (n, 3), continuous actions in Cartesian or Spherical coordinates.
180
+ return: np.ndarray, (n,), tokens.
181
+ """
182
+ roll, pitch, yaw = action[:, 0], action[:, 1], action[:, 2]
183
+ disc_roll = np.clip(np.digitize(roll, self.roll_bins) - 1, 0, self.num_roll_bins - 1)
184
+ disc_pitch = np.clip(np.digitize(pitch, self.pitch_bins) - 1, 0, self.num_pitch_bins - 1)
185
+ disc_yaw = np.clip(np.digitize(yaw, self.yaw_bins) - 1, 0, self.num_yaw_bins - 1)
186
+
187
+ ids = disc_roll * self.NP + disc_pitch * self.num_yaw_bins + disc_yaw
188
+ return self.token_array[ids]
189
+
190
+ def decode_token_ids_to_actions(self, action_token_id: Union[np.int64, np.ndarray]) -> np.ndarray:
191
+ """decode token ids to continuous actions.
192
+ action_token_id: np.ndarray, (n,), token ids.
193
+ return: np.ndarray, (n, 3), continuous actions
194
+ """
195
+ action_token_id = np.clip(action_token_id, a_min=self.token_start_idx, a_max=self.token_end_idx)
196
+ ids = action_token_id - self.token_start_idx
197
+ disc_roll, disc_pitch, disc_yaw = ids // self.NP, (ids % self.NP) // self.num_yaw_bins, ids % self.num_yaw_bins
198
+
199
+ roll = 0.5 * (self.roll_bins[disc_roll] + self.roll_bins[disc_roll + 1])
200
+ pitch = 0.5 * (self.pitch_bins[disc_pitch] + self.pitch_bins[disc_pitch + 1])
201
+ yaw = 0.5 * (self.yaw_bins[disc_yaw] + self.yaw_bins[disc_yaw + 1])
202
+ return np.stack((roll, pitch, yaw), axis=1)
203
+
204
+ @property
205
+ def vocab_size(self) -> int:
206
+ return self._vocab_size
207
+
208
+ class GripperTokenzier:
209
+ def __init__(
210
+ self,
211
+ tokenizer: PreTrainedTokenizerBase,
212
+ num_bins: int = 2,
213
+ array_begin_idx = None,
214
+ ) -> None:
215
+ self.tokenizer = tokenizer
216
+ self.num_bins = num_bins
217
+ self.array_begin_idx = array_begin_idx
218
+ token_list = [ACTION_TOKEN.format(i + self.array_begin_idx) for i in range(self.num_bins)]
219
+ self.token_array = np.array(token_list)
220
+
221
+ num_new_tokens = self.tokenizer.add_tokens(token_list, special_tokens=True)
222
+ print(f"Add {num_new_tokens} GRIPPER TOKENS to tokenizer, tokenizer vocab size {self.tokenizer.vocab_size} / {len(tokenizer)}")
223
+
224
+ self.token_start_idx = self.tokenizer.convert_tokens_to_ids(self.token_array[0])
225
+ self.token_end_idx = self.tokenizer.convert_tokens_to_ids(self.token_array[-1])
226
+
227
+ def __call__(self, action: np.ndarray) -> List[str]:
228
+ """Discretize continuous actions to tokens.
229
+ action: np.ndarray, (n,), continuous actions in Cartesian or Spherical coordinates.
230
+ return: np.ndarray, (n,), tokens.
231
+ """
232
+ ids = np.where(action >= 0.5, 1, 0)
233
+ return self.token_array[ids]
234
+
235
+ def decode_token_ids_to_actions(self, action_token_id: np.ndarray) -> np.ndarray:
236
+ """decode token ids to continuous actions.
237
+ action_token_id: np.ndarray, (n,), token ids.
238
+ return: np.ndarray, (n, 1), continuous actions
239
+ """
240
+ action_token_id = np.clip(action_token_id, self.token_start_idx, self.token_end_idx)
241
+ ids = action_token_id - self.token_start_idx
242
+ actions = np.where(ids == 0, 0., 1.)
243
+ return actions[:, None]
244
+
245
+ @property
246
+ def vocab_size(self) -> int:
247
+ return self.num_bins
248
+
249
+ class SpatialActionTokenizer:
250
+ range_bins = {
251
+ "translation": {
252
+ "theta_bins": (0.0, np.pi),
253
+ "phi_bins": (-np.pi, np.pi),
254
+ "r_bins": (0.0, np.sqrt(3)),
255
+ },
256
+ "rotation": {
257
+ "roll_bins": (-1.0, 1.0),
258
+ "pitch_bins": (-1.0, 1.0),
259
+ "yaw_bins": (-1.0, 1.0),
260
+ },
261
+ }
262
+ def __init__(
263
+ self,
264
+ tokenizer: PreTrainedTokenizerBase,
265
+ num_bins: Dict,
266
+ gs_params: Dict = None,
267
+ bin_policy: Dict = None,
268
+ use_spherical: bool = True,
269
+ min_sigma: float = 0.0,
270
+ min_action: float = -1.0,
271
+ max_action: float = 1.0,
272
+ ):
273
+ """set bin_policy if exist, otherwise, caculate bin_policy from gs_params or use uniform bin grids.
274
+ gs_params: Optional[Dict],
275
+ bin_policy: Optional[Dict],
276
+ """
277
+ self.tokenizer = tokenizer
278
+ self.min_action, self.max_action = min_action, max_action
279
+ self.num_bins = num_bins
280
+ self.min_sigma = min_sigma
281
+
282
+ # set bin policy
283
+ self.bin_policy = bin_policy if bin_policy else self.get_bin_policy(gs_params, self.min_sigma)
284
+ self.translation_tokenizer = TranslationTokenizer(
285
+ self.tokenizer,
286
+ self.num_bins["translation"],
287
+ self.bin_policy["translation"],
288
+ use_spherical=use_spherical
289
+ )
290
+
291
+ self.rotation_tokenizer = RotationTokenizer(
292
+ self.tokenizer,
293
+ self.num_bins["rotation"],
294
+ self.bin_policy["rotation"],
295
+ array_begin_idx=self.translation_tokenizer.vocab_size,
296
+ )
297
+
298
+ self.gripper_tokenizer = GripperTokenzier(
299
+ self.tokenizer,
300
+ self.num_bins["gripper"],
301
+ array_begin_idx=self.translation_tokenizer.vocab_size + self.rotation_tokenizer.vocab_size
302
+ )
303
+ self._vocab_size = self.translation_tokenizer.vocab_size + self.rotation_tokenizer.vocab_size + self.gripper_tokenizer.vocab_size
304
+
305
+ def __call__(self, action: np.ndarray) -> List[str]:
306
+ """Discretize continuous actions to tokens.
307
+ action: np.ndarray, (n, 7), continuous actions in Cartesian coordinates.
308
+ return: np.ndarray, (n, 3), tokens.
309
+ """
310
+ if len(action.shape) == 1:
311
+ assert action.shape[0] == 7, f"action dim mismatch, got action shape: {action.shape}"
312
+ action = action.reshape(1, 7)
313
+ assert action.shape[1] == 7, f"action dim mismatch, got action shape: {action.shape}"
314
+
315
+ action = np.clip(action, a_min=self.min_action, a_max=self.max_action)
316
+ trans_tokens = self.translation_tokenizer(action[:, :3]) # (n,)
317
+ rot_tokens = self.rotation_tokenizer(action[:, 3:6]) # (n,)
318
+ grip_tokens = self.gripper_tokenizer(action[:, 6]) # (n,)
319
+ return np.stack((trans_tokens, rot_tokens, grip_tokens), axis=1) # (n, 3)
320
+
321
+ def decode_token_ids_to_actions(self, action_token_ids: np.ndarray) -> np.ndarray:
322
+ """decode token ids to continuous actions.
323
+ action_token_ids: np.ndarray, (n, 3), token ids.
324
+ """
325
+ if len(action_token_ids.shape) == 1:
326
+ assert action_token_ids.shape[0] == 3, f"action token id numbers mismatich, need 3 got {action_token_ids.shape[0]}"
327
+ action_token_ids = action_token_ids.reshape(1, 3)
328
+ assert action_token_ids.shape[1] == 3, f"token id numbers mismatich, need 3 got {action_token_ids.shape[1]}"
329
+
330
+ trans_action = self.translation_tokenizer.decode_token_ids_to_actions(action_token_ids[:, 0]) # (n, 3)
331
+ rot_action = self.rotation_tokenizer.decode_token_ids_to_actions(action_token_ids[:, 1]) # (n, 3)
332
+ grip_action = self.gripper_tokenizer.decode_token_ids_to_actions(action_token_ids[:, 2]) # (n, 1)
333
+ return np.concatenate((trans_action, rot_action, grip_action), axis=1) # (n, 7)
334
+
335
+ @property
336
+ def vocab_size(self) -> int:
337
+ return self._vocab_size
338
+
339
+ @property
340
+ def action_token_begin_idx(self) -> int:
341
+ return self.translation_tokenizer.token_start_idx
342
+
343
+ def get_bin_policy(self, gs_params=None, min_sigma=0.0):
344
+ bin_policy = {
345
+ "translation": {"theta_bins": None, "phi_bins": None, "r_bins": None},
346
+ "rotation": {"roll_bins": None, "pitch_bins": None, "yaw_bins": None}
347
+ }
348
+ if gs_params is None:
349
+ for bin_type in self.range_bins.keys():
350
+ for bin_key in self.range_bins[bin_type].keys():
351
+ bin_policy[bin_type][bin_key] = np.linspace(*self.range_bins[bin_type][bin_key], self.num_bins[bin_type][bin_key] + 1)
352
+ print(f"use unifrom bin grids ... \n{bin_policy}")
353
+ else:
354
+ for bin_type in self.range_bins.keys():
355
+ for bin_key in self.range_bins[bin_type].keys():
356
+ mu = gs_params[bin_key.split("_")[0].lower()]["mu"]
357
+ sigma = max(gs_params[bin_key.split("_")[0].lower()]["sigma"], min_sigma)
358
+ bin_bound_prob = np.linspace(
359
+ norm.cdf(self.range_bins[bin_type][bin_key][0], loc=mu, scale=sigma),
360
+ norm.cdf(self.range_bins[bin_type][bin_key][1], loc=mu, scale=sigma),
361
+ self.num_bins[bin_type][bin_key] + 1,
362
+ )
363
+ bin_boundary = norm.ppf(bin_bound_prob, loc=mu, scale=sigma)
364
+ bin_policy[bin_type][bin_key] = np.clip(
365
+ bin_boundary,
366
+ self.range_bins[bin_type][bin_key][0],
367
+ self.range_bins[bin_type][bin_key][1],
368
+ ).tolist() # for serialize
369
+ print(f"caculate bin grids from gaussians \n{bin_policy}")
370
+ return bin_policy
371
+
372
+ def get_norm_meshgrid(self, bin_policy):
373
+ grids = []
374
+ policy = {k1: {k2: np.array(v2) for k2, v2 in v1.items()} for k1, v1 in bin_policy.items()}
375
+ # NOTE: use unify k,v order of range_bins (tpr, rpy)
376
+ for bin_type in self.range_bins.keys():
377
+ bounds = []
378
+ for bin_key in self.range_bins[bin_type].keys():
379
+ minb, maxb = self.range_bins[bin_type][bin_key][0], self.range_bins[bin_type][bin_key][1]
380
+ bin_boundary = policy[bin_type][bin_key]
381
+ bin_center = (bin_boundary[:-1] + bin_boundary[1:]) / 2
382
+ bin_center = np.concatenate([np.array([minb]),bin_center,np.array([maxb])]) # padding
383
+ bin_center = (bin_center - minb) / (maxb - minb) # nomalize (m, n, k)
384
+ bounds.append(bin_center)
385
+ # generate grids
386
+ grid_x, grid_y, grid_z = np.meshgrid(*bounds)
387
+ grids += [np.stack([grid_x, grid_y, grid_z], -1).reshape(-1, 3)]
388
+ return grids[0], grids[1] # (N, 3)
389
+
390
+ def spatial_embedding_adaption(self, gs_params, embeddings: torch.nn.Embedding, min_sigma=0.0, adpt_feature=False):
391
+ """
392
+ gs_params0, gs_params1: Dict
393
+ embeddings: tensor (S,E)
394
+ """
395
+ from scipy.interpolate import griddata
396
+ new_policy = self.get_bin_policy(gs_params, min_sigma=min_sigma)
397
+ trans_grids0, rot_grids0 = self.get_norm_meshgrid(self.bin_policy)
398
+ trans_grids1, rot_grids1 = self.get_norm_meshgrid(new_policy)
399
+
400
+ print("overwrite bin policy and tokenizer bins ...")
401
+ self.bin_policy = new_policy
402
+ self.min_sigma = min_sigma
403
+ self.translation_tokenizer.set_bins(new_policy["translation"])
404
+ self.rotation_tokenizer.set_bins(new_policy["rotation"])
405
+
406
+ if adpt_feature:
407
+ emb_data = embeddings.weight.data # (S, e)
408
+ _, E = emb_data.shape
409
+
410
+ # translation
411
+ m, n, k = (self.num_bins["translation"][k] for k in ["theta_bins", "phi_bins", "r_bins"])
412
+ N = m*n*k
413
+ trans_emb_data = emb_data[:N,].reshape(m, n, k, -1).permute(3, 0, 1, 2) # (e, m, n, k)
414
+ pad_emb = torch.nn.functional.pad(trans_emb_data, (1, 1, 1, 1, 1, 1), "replicate").permute(1, 2, 3, 0).reshape(-1, E)
415
+ adpt_trans_emb = griddata(trans_grids0, pad_emb.float(), trans_grids1, method='linear')
416
+ adpt_trans_emb = adpt_trans_emb.reshape(m+2, n+2, k+2, E)[1:-1, 1:-1, 1:-1,]
417
+
418
+ # rotation
419
+ m1, n1, k1 = (self.num_bins["rotation"][k] for k in ["roll_bins", "pitch_bins", "yaw_bins"])
420
+ M = m1*n1*k1
421
+ rot_emb_data = emb_data[N : N + M,].reshape(m1, n1, k1, -1).permute(3, 0, 1, 2) # (e, m, n, k)
422
+ pad_emb = torch.nn.functional.pad(rot_emb_data, (1, 1, 1, 1, 1, 1), "replicate").permute(1, 2, 3, 0).reshape(-1, E)
423
+ adpt_rot_emb = griddata(rot_grids0, pad_emb.float(), rot_grids1, method='linear')
424
+ adpt_rot_emb = adpt_rot_emb.reshape(m1+2, n1+2, k1+2, E)[1:-1, 1:-1, 1:-1,]
425
+
426
+ # set data
427
+ device, dtype = embeddings.weight.data.device, embeddings.weight.data.dtype
428
+ embeddings.weight.data[:N] = torch.Tensor(adpt_trans_emb.reshape(-1, E), device=device).to(dtype)
429
+ embeddings.weight.data[N:N+M] = torch.Tensor(adpt_rot_emb.reshape(-1, E), device=device).to(dtype)
430
+ print("DONE! adapt spatial embedding to new gaussian distributation finished.")
431
+ print(embeddings.weight.data)
config.json ADDED
@@ -0,0 +1,320 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "/gpfs/data/fs72723/vtuan/fine-tuning-vla/SpatialVLA/pretrained/spatialvla-4b-224-pt",
3
+ "_vocab_size": 265347,
4
+ "action_token_begin_idx": 257153,
5
+ "architectures": [
6
+ "SpatialVLAForConditionalGeneration"
7
+ ],
8
+ "auto_map": {
9
+ "AutoConfig": "configuration_spatialvla.SpatialVLAConfig",
10
+ "AutoModel": "modeling_spatialvla.SpatialVLAForConditionalGeneration"
11
+ },
12
+ "bos_token_id": 2,
13
+ "ego3d_patch_reso": 2,
14
+ "eos_token_id": 1,
15
+ "hidden_size": 2048,
16
+ "image_token_index": 257152,
17
+ "model_type": "spatialvla",
18
+ "n_freqs": 8,
19
+ "num_hidden_layers": 26,
20
+ "pad_token_id": 0,
21
+ "projection_dim": 2304,
22
+ "spatial_token_num": 8194,
23
+ "text_config": {
24
+ "_attn_implementation_autoset": true,
25
+ "architectures": [
26
+ "Gemma2ForCausalLM"
27
+ ],
28
+ "eos_token_id": [
29
+ 1,
30
+ 107
31
+ ],
32
+ "hidden_act": "gelu_pytorch_tanh",
33
+ "hidden_size": 2304,
34
+ "intermediate_size": 9216,
35
+ "model_type": "gemma2",
36
+ "num_hidden_layers": 26,
37
+ "num_image_tokens": 256,
38
+ "num_key_value_heads": 4,
39
+ "tie_word_embeddings": false,
40
+ "torch_dtype": "bfloat16",
41
+ "vocab_size": 265347
42
+ },
43
+ "torch_dtype": "bfloat16",
44
+ "transformers_version": "4.47.0",
45
+ "use_spatial_token": true,
46
+ "use_vision_zoe": true,
47
+ "vision_config": {
48
+ "hidden_size": 1152,
49
+ "intermediate_size": 4304,
50
+ "model_type": "siglip_vision_model",
51
+ "num_attention_heads": 16,
52
+ "num_hidden_layers": 27,
53
+ "num_image_tokens": 256,
54
+ "num_positions": 256,
55
+ "patch_size": 14,
56
+ "projection_dim": 2304,
57
+ "torch_dtype": "bfloat16",
58
+ "vision_use_head": false
59
+ },
60
+ "vision_zoe_config": {
61
+ "_attn_implementation_autoset": true,
62
+ "_name_or_path": "Intel/zoedepth-nyu-kitti",
63
+ "add_cross_attention": false,
64
+ "add_projection": false,
65
+ "architectures": [
66
+ "ZoeDepthForDepthEstimation"
67
+ ],
68
+ "attractor_alpha": 1000,
69
+ "attractor_gamma": 2,
70
+ "attractor_kind": "mean",
71
+ "backbone": null,
72
+ "backbone_config": {
73
+ "_attn_implementation_autoset": false,
74
+ "_name_or_path": "",
75
+ "add_cross_attention": false,
76
+ "add_fpn": false,
77
+ "architectures": null,
78
+ "attention_probs_dropout_prob": 0.0,
79
+ "auxiliary_channels": 256,
80
+ "auxiliary_concat_input": false,
81
+ "auxiliary_loss_weight": 0.4,
82
+ "auxiliary_num_convs": 1,
83
+ "bad_words_ids": null,
84
+ "begin_suppress_tokens": null,
85
+ "bos_token_id": null,
86
+ "chunk_size_feed_forward": 0,
87
+ "cross_attention_hidden_size": null,
88
+ "decoder_start_token_id": null,
89
+ "diversity_penalty": 0.0,
90
+ "do_sample": false,
91
+ "drop_path_rate": 0.1,
92
+ "early_stopping": false,
93
+ "encoder_no_repeat_ngram_size": 0,
94
+ "eos_token_id": null,
95
+ "exponential_decay_length_penalty": null,
96
+ "finetuning_task": null,
97
+ "forced_bos_token_id": null,
98
+ "forced_eos_token_id": null,
99
+ "hidden_act": "gelu",
100
+ "hidden_dropout_prob": 0.0,
101
+ "hidden_size": 1024,
102
+ "id2label": {
103
+ "0": "LABEL_0",
104
+ "1": "LABEL_1"
105
+ },
106
+ "image_size": 384,
107
+ "initializer_range": 0.02,
108
+ "intermediate_size": 4096,
109
+ "is_decoder": false,
110
+ "is_encoder_decoder": false,
111
+ "label2id": {
112
+ "LABEL_0": 0,
113
+ "LABEL_1": 1
114
+ },
115
+ "layer_norm_eps": 1e-12,
116
+ "layer_scale_init_value": 0.1,
117
+ "length_penalty": 1.0,
118
+ "max_length": 20,
119
+ "min_length": 0,
120
+ "model_type": "beit",
121
+ "no_repeat_ngram_size": 0,
122
+ "num_attention_heads": 16,
123
+ "num_beam_groups": 1,
124
+ "num_beams": 1,
125
+ "num_channels": 3,
126
+ "num_hidden_layers": 24,
127
+ "num_return_sequences": 1,
128
+ "out_features": [
129
+ "stage6",
130
+ "stage12",
131
+ "stage18",
132
+ "stage24"
133
+ ],
134
+ "out_indices": [
135
+ 6,
136
+ 12,
137
+ 18,
138
+ 24
139
+ ],
140
+ "output_attentions": false,
141
+ "output_hidden_states": false,
142
+ "output_scores": false,
143
+ "pad_token_id": null,
144
+ "patch_size": 16,
145
+ "pool_scales": [
146
+ 1,
147
+ 2,
148
+ 3,
149
+ 6
150
+ ],
151
+ "prefix": null,
152
+ "problem_type": null,
153
+ "pruned_heads": {},
154
+ "remove_invalid_values": false,
155
+ "repetition_penalty": 1.0,
156
+ "reshape_hidden_states": false,
157
+ "return_dict": true,
158
+ "return_dict_in_generate": false,
159
+ "semantic_loss_ignore_index": 255,
160
+ "sep_token_id": null,
161
+ "stage_names": [
162
+ "stem",
163
+ "stage1",
164
+ "stage2",
165
+ "stage3",
166
+ "stage4",
167
+ "stage5",
168
+ "stage6",
169
+ "stage7",
170
+ "stage8",
171
+ "stage9",
172
+ "stage10",
173
+ "stage11",
174
+ "stage12",
175
+ "stage13",
176
+ "stage14",
177
+ "stage15",
178
+ "stage16",
179
+ "stage17",
180
+ "stage18",
181
+ "stage19",
182
+ "stage20",
183
+ "stage21",
184
+ "stage22",
185
+ "stage23",
186
+ "stage24"
187
+ ],
188
+ "suppress_tokens": null,
189
+ "task_specific_params": null,
190
+ "temperature": 1.0,
191
+ "tf_legacy_loss": false,
192
+ "tie_encoder_decoder": false,
193
+ "tie_word_embeddings": true,
194
+ "tokenizer_class": null,
195
+ "top_k": 50,
196
+ "top_p": 1.0,
197
+ "torch_dtype": null,
198
+ "torchscript": false,
199
+ "typical_p": 1.0,
200
+ "use_absolute_position_embeddings": false,
201
+ "use_auxiliary_head": true,
202
+ "use_bfloat16": false,
203
+ "use_mask_token": false,
204
+ "use_mean_pooling": true,
205
+ "use_relative_position_bias": true,
206
+ "use_shared_relative_position_bias": false,
207
+ "vocab_size": 8192
208
+ },
209
+ "backbone_hidden_size": 1024,
210
+ "bad_words_ids": null,
211
+ "batch_norm_eps": 1e-05,
212
+ "begin_suppress_tokens": null,
213
+ "bin_centers_type": "softplus",
214
+ "bin_configurations": [
215
+ {
216
+ "max_depth": 10.0,
217
+ "min_depth": 0.001,
218
+ "n_bins": 64,
219
+ "name": "nyu"
220
+ },
221
+ {
222
+ "max_depth": 80.0,
223
+ "min_depth": 0.001,
224
+ "n_bins": 64,
225
+ "name": "kitti"
226
+ }
227
+ ],
228
+ "bin_embedding_dim": 128,
229
+ "bos_token_id": null,
230
+ "bottleneck_features": 256,
231
+ "chunk_size_feed_forward": 0,
232
+ "cross_attention_hidden_size": null,
233
+ "decoder_start_token_id": null,
234
+ "diversity_penalty": 0.0,
235
+ "do_sample": false,
236
+ "early_stopping": false,
237
+ "encoder_no_repeat_ngram_size": 0,
238
+ "eos_token_id": null,
239
+ "exponential_decay_length_penalty": null,
240
+ "finetuning_task": null,
241
+ "forced_bos_token_id": null,
242
+ "forced_eos_token_id": null,
243
+ "fusion_hidden_size": 256,
244
+ "head_in_index": -1,
245
+ "hidden_act": "gelu",
246
+ "id2label": {
247
+ "0": "LABEL_0",
248
+ "1": "LABEL_1"
249
+ },
250
+ "initializer_range": 0.02,
251
+ "is_decoder": false,
252
+ "is_encoder_decoder": false,
253
+ "label2id": {
254
+ "LABEL_0": 0,
255
+ "LABEL_1": 1
256
+ },
257
+ "length_penalty": 1.0,
258
+ "max_length": 20,
259
+ "max_temp": 50.0,
260
+ "min_length": 0,
261
+ "min_temp": 0.0212,
262
+ "model_type": "zoedepth",
263
+ "neck_hidden_sizes": [
264
+ 256,
265
+ 512,
266
+ 1024,
267
+ 1024
268
+ ],
269
+ "no_repeat_ngram_size": 0,
270
+ "num_attractors": [
271
+ 16,
272
+ 8,
273
+ 4,
274
+ 1
275
+ ],
276
+ "num_beam_groups": 1,
277
+ "num_beams": 1,
278
+ "num_patch_transformer_layers": 4,
279
+ "num_relative_features": 32,
280
+ "num_return_sequences": 1,
281
+ "output_attentions": false,
282
+ "output_hidden_states": false,
283
+ "output_scores": false,
284
+ "pad_token_id": null,
285
+ "patch_transformer_hidden_size": 128,
286
+ "patch_transformer_intermediate_size": 1024,
287
+ "patch_transformer_num_attention_heads": 4,
288
+ "prefix": null,
289
+ "problem_type": null,
290
+ "pruned_heads": {},
291
+ "readout_type": "project",
292
+ "reassemble_factors": [
293
+ 4,
294
+ 2,
295
+ 1,
296
+ 0.5
297
+ ],
298
+ "remove_invalid_values": false,
299
+ "repetition_penalty": 1.0,
300
+ "return_dict": true,
301
+ "return_dict_in_generate": false,
302
+ "sep_token_id": null,
303
+ "suppress_tokens": null,
304
+ "task_specific_params": null,
305
+ "temperature": 1.0,
306
+ "tf_legacy_loss": false,
307
+ "tie_encoder_decoder": false,
308
+ "tie_word_embeddings": true,
309
+ "tokenizer_class": null,
310
+ "top_k": 50,
311
+ "top_p": 1.0,
312
+ "torch_dtype": "bfloat16",
313
+ "torchscript": false,
314
+ "typical_p": 1.0,
315
+ "use_batch_norm_in_fusion_residual": false,
316
+ "use_bfloat16": false,
317
+ "use_bias_in_fusion_residual": null,
318
+ "use_pretrained_backbone": false
319
+ }
320
+ }
configuration_spatialvla.py ADDED
@@ -0,0 +1,119 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024 Microsoft Research & University of Wisconsin-Madison and the HuggingFace Inc. team. All rights reserved.
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ import warnings
15
+
16
+ from transformers.configuration_utils import PretrainedConfig
17
+ from transformers.utils import logging
18
+ from transformers import CONFIG_MAPPING, AutoConfig
19
+
20
+ logger = logging.get_logger(__name__)
21
+
22
+ class SpatialVLAConfig(PretrainedConfig):
23
+ model_type = "spatialvla"
24
+ sub_configs = {"text_config": AutoConfig, "vision_config": AutoConfig, "vision_zoe_config": AutoConfig}
25
+
26
+ def __init__(
27
+ self,
28
+ vision_config=None,
29
+ text_config=None,
30
+ ignore_index=-100,
31
+ image_token_index=256000,
32
+ vocab_size=257152,
33
+ projection_dim=2048,
34
+ hidden_size=2048,
35
+ vision_zoe_config=None,
36
+ action_token_begin_idx=None,
37
+ spatial_token_num=259,
38
+ use_spatial_token=False,
39
+ ego3d_patch_reso=4,
40
+ n_freqs=8,
41
+ use_vision_zoe=True,
42
+ **kwargs,
43
+ ):
44
+ self._ignore_index = ignore_index
45
+ self.image_token_index = image_token_index
46
+ self._vocab_size = vocab_size
47
+ self.projection_dim = projection_dim
48
+ self.hidden_size = hidden_size
49
+ self.vision_config = vision_config
50
+ self.is_encoder_decoder = False
51
+
52
+ if isinstance(self.vision_config, dict):
53
+ vision_config["model_type"] = (
54
+ vision_config["model_type"] if "model_type" in vision_config else "siglip_vision_model"
55
+ )
56
+ self.vision_config = CONFIG_MAPPING[vision_config["model_type"]](**vision_config)
57
+ elif vision_config is None:
58
+ self.vision_config = CONFIG_MAPPING["siglip_vision_model"](
59
+ intermediate_size=4096,
60
+ hidden_size=1152,
61
+ patch_size=14,
62
+ image_size=224,
63
+ num_hidden_layers=27,
64
+ num_attention_heads=16,
65
+ vocab_size=257152,
66
+ vision_use_head=False,
67
+ )
68
+
69
+ self.text_config = text_config
70
+ if isinstance(self.text_config, dict):
71
+ text_config["model_type"] = text_config["model_type"] if "model_type" in text_config else "gemma2"
72
+ self.text_config = CONFIG_MAPPING[text_config["model_type"]](**text_config)
73
+ elif text_config is None:
74
+ self.text_config = CONFIG_MAPPING["gemma2"](
75
+ hidden_size=2048,
76
+ num_hidden_layers=18,
77
+ intermediate_size=16384,
78
+ num_attention_heads=8,
79
+ num_key_value_heads=1,
80
+ is_encoder_decoder=False,
81
+ vocab_size=vocab_size,
82
+ )
83
+ self.text_config.num_image_tokens = (self.vision_config.image_size // self.vision_config.patch_size) ** 2
84
+ self.vision_config.projection_dim = projection_dim
85
+
86
+ # vision zoe config
87
+ self.vision_zoe_config = vision_zoe_config
88
+ if isinstance(self.vision_zoe_config, dict):
89
+ vision_zoe_config["model_type"] = vision_zoe_config["model_type"] if "model_type" in vision_zoe_config else "zoedepth"
90
+ self.vision_zoe_config = CONFIG_MAPPING[vision_zoe_config["model_type"]](**vision_zoe_config)
91
+ else:
92
+ pass
93
+
94
+ # additional attributes
95
+ self.action_token_begin_idx = action_token_begin_idx
96
+ self.spatial_token_num = spatial_token_num
97
+ self.use_spatial_token = use_spatial_token
98
+ self.ego3d_patch_reso = ego3d_patch_reso
99
+ self.n_freqs = n_freqs
100
+ self.use_vision_zoe = use_vision_zoe
101
+
102
+ super().__init__(**kwargs)
103
+
104
+ @property
105
+ def ignore_index(self):
106
+ warnings.warn(
107
+ "The `ignore_index` attribute is deprecated and will be removed in v4.47.",
108
+ FutureWarning,
109
+ )
110
+ return self._ignore_index
111
+
112
+ @ignore_index.setter
113
+ def ignore_index(self, value):
114
+ self._ignore_index = value
115
+
116
+ def to_dict(self):
117
+ output = super().to_dict()
118
+ output.pop("_ignore_index", None)
119
+ return output
generation_config.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 2,
4
+ "cache_implementation": "hybrid",
5
+ "eos_token_id": 1,
6
+ "pad_token_id": 0,
7
+ "transformers_version": "4.47.0"
8
+ }
global_step1500/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ce878dda01256583cd15295929482f38c1148619e46df7c26b0f0e34c6dfa803
3
+ size 6748683836
global_step1500/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:269682afaeef9e9663ceb1d9a5e3e8dc689d89e975645082855d4cd13db9b74a
3
+ size 6748638460
global_step1500/mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9cd8e2a36d66af131144db03177c7b4a1c4d94a16a9a1904253aac9edfe79332
3
+ size 8056300410
latest ADDED
@@ -0,0 +1 @@
 
 
1
+ global_step1500
model-00001-of-00002.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5cb00fbcd893cbc7c03cda903e815bce85191c5a99254fa5c8338c02b345be27
3
+ size 4969426016
model-00002-of-00002.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dd924ff02616c2ebe6bf74967ea7e6403e509df5ff7bde1ba5cda87dc851a6c8
3
+ size 3086476734
model.safetensors.index.json ADDED
The diff for this file is too large to render. See raw diff
 
modeling_gemma2.py ADDED
@@ -0,0 +1,1283 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # custom gemma2 to support flash_attention_2,
2
+ # source from https://github.com/huggingface/transformers/blob/v4.47.0/src/transformers/models/gemma2/modeling_gemma2.py
3
+ # coding=utf-8
4
+ # Copyright 2024 Google Inc. HuggingFace Inc. team. All rights reserved.
5
+ #
6
+ #
7
+ # Licensed under the Apache License, Version 2.0 (the "License");
8
+ # you may not use this file except in compliance with the License.
9
+ # You may obtain a copy of the License at
10
+ #
11
+ # http://www.apache.org/licenses/LICENSE-2.0
12
+ #
13
+ # Unless required by applicable law or agreed to in writing, software
14
+ # distributed under the License is distributed on an "AS IS" BASIS,
15
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16
+ # See the License for the specific language governing permissions and
17
+ # limitations under the License.
18
+ from typing import List, Optional, Tuple, Union
19
+
20
+ import torch
21
+ import torch.nn as nn
22
+
23
+ from transformers.activations import ACT2FN
24
+ from transformers.cache_utils import Cache, HybridCache
25
+ from transformers.generation import GenerationMixin
26
+ from transformers.modeling_outputs import (
27
+ BaseModelOutputWithPast,
28
+ CausalLMOutputWithPast,
29
+ SequenceClassifierOutputWithPast,
30
+ TokenClassifierOutput,
31
+ )
32
+ from transformers.modeling_utils import PreTrainedModel
33
+ from transformers.utils import (
34
+ add_code_sample_docstrings,
35
+ add_start_docstrings,
36
+ add_start_docstrings_to_model_forward,
37
+ is_flash_attn_2_available,
38
+ is_flash_attn_greater_or_equal,
39
+ is_torch_greater_or_equal,
40
+ logging,
41
+ replace_return_docstrings,
42
+ is_flash_attn_greater_or_equal_2_10,
43
+ )
44
+ from transformers import Gemma2Config
45
+
46
+
47
+ if is_flash_attn_2_available():
48
+ from transformers.modeling_flash_attention_utils import _flash_attention_forward
49
+
50
+ if is_torch_greater_or_equal("2.5"):
51
+ from torch.nn.attention.flex_attention import flex_attention
52
+
53
+ logger = logging.get_logger(__name__)
54
+
55
+
56
+ _CHECKPOINT_FOR_DOC = "google/gemma2-7b"
57
+ _CONFIG_FOR_DOC = "Gemma2Config"
58
+
59
+
60
+ class Gemma2RMSNorm(nn.Module):
61
+ def __init__(self, dim: int, eps: float = 1e-6):
62
+ super().__init__()
63
+ self.eps = eps
64
+ self.weight = nn.Parameter(torch.zeros(dim))
65
+
66
+ def _norm(self, x):
67
+ return x * torch.rsqrt(x.pow(2).mean(-1, keepdim=True) + self.eps)
68
+
69
+ def forward(self, x):
70
+ output = self._norm(x.float())
71
+ # Llama does x.to(float16) * w whilst Gemma2 is (x * w).to(float16)
72
+ # See https://github.com/huggingface/transformers/pull/29402
73
+ output = output * (1.0 + self.weight.float())
74
+ return output.type_as(x)
75
+
76
+ def extra_repr(self):
77
+ return f"{tuple(self.weight.shape)}, eps={self.eps}"
78
+
79
+
80
+ class Gemma2MLP(nn.Module):
81
+ def __init__(self, config):
82
+ super().__init__()
83
+ self.config = config
84
+ self.hidden_size = config.hidden_size
85
+ self.intermediate_size = config.intermediate_size
86
+ self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
87
+ self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
88
+ self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
89
+ self.act_fn = ACT2FN[config.hidden_activation]
90
+
91
+ def forward(self, x):
92
+ return self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
93
+
94
+
95
+ class Gemma2RotaryEmbedding(nn.Module):
96
+ def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None):
97
+ super().__init__()
98
+
99
+ self.dim = dim
100
+ self.max_position_embeddings = max_position_embeddings
101
+ self.base = base
102
+ inv_freq = 1.0 / (self.base ** (torch.arange(0, self.dim, 2, dtype=torch.int64).float() / self.dim))
103
+ self.register_buffer("inv_freq", tensor=inv_freq, persistent=False)
104
+
105
+ @torch.no_grad()
106
+ def forward(self, x, position_ids, seq_len=None):
107
+ # x: [bs, num_attention_heads, seq_len, head_size]
108
+ self.inv_freq.to(x.device)
109
+ inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1)
110
+ position_ids_expanded = position_ids[:, None, :].float()
111
+ # Force float32 since bfloat16 loses precision on long contexts
112
+ # See https://github.com/huggingface/transformers/pull/29285
113
+ device_type = x.device.type
114
+ device_type = device_type if isinstance(device_type, str) and device_type != "mps" else "cpu"
115
+ with torch.autocast(device_type=device_type, enabled=False):
116
+ freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
117
+ emb = torch.cat((freqs, freqs), dim=-1)
118
+ cos = emb.cos()
119
+ sin = emb.sin()
120
+ return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
121
+
122
+
123
+ def rotate_half(x):
124
+ """Rotates half the hidden dims of the input."""
125
+ x1 = x[..., : x.shape[-1] // 2]
126
+ x2 = x[..., x.shape[-1] // 2 :]
127
+ return torch.cat((-x2, x1), dim=-1)
128
+
129
+
130
+ def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
131
+ """Applies Rotary Position Embedding to the query and key tensors.
132
+
133
+ Args:
134
+ q (`torch.Tensor`): The query tensor.
135
+ k (`torch.Tensor`): The key tensor.
136
+ cos (`torch.Tensor`): The cosine part of the rotary embedding.
137
+ sin (`torch.Tensor`): The sine part of the rotary embedding.
138
+ position_ids (`torch.Tensor`, *optional*):
139
+ Deprecated and unused.
140
+ unsqueeze_dim (`int`, *optional*, defaults to 1):
141
+ The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
142
+ sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
143
+ that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
144
+ k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
145
+ cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
146
+ the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
147
+ Returns:
148
+ `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
149
+ """
150
+ cos = cos.unsqueeze(unsqueeze_dim)
151
+ sin = sin.unsqueeze(unsqueeze_dim)
152
+ q_embed = (q * cos) + (rotate_half(q) * sin)
153
+ k_embed = (k * cos) + (rotate_half(k) * sin)
154
+ return q_embed, k_embed
155
+
156
+
157
+ def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
158
+ """
159
+ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
160
+ num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
161
+ """
162
+ batch, num_key_value_heads, slen, head_dim = hidden_states.shape
163
+ if n_rep == 1:
164
+ return hidden_states
165
+ hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
166
+ return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
167
+
168
+
169
+ def eager_attention_forward(
170
+ config: Gemma2Config,
171
+ query: torch.Tensor,
172
+ key: torch.Tensor,
173
+ value: torch.Tensor,
174
+ mask: Optional[torch.Tensor],
175
+ **_kwargs,
176
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
177
+ key_states = repeat_kv(key, config.num_key_value_groups)
178
+ value_states = repeat_kv(value, config.num_key_value_groups)
179
+
180
+ attn_weights = torch.matmul(query, key_states.transpose(2, 3)) * config.scaling
181
+
182
+ if config.attn_logit_softcapping is not None:
183
+ attn_weights = attn_weights / config.attn_logit_softcapping
184
+ attn_weights = torch.tanh(attn_weights)
185
+ attn_weights = attn_weights * config.attn_logit_softcapping
186
+ if mask is not None: # no matter the length, we just slice it
187
+ causal_mask = mask[:, :, :, : key_states.shape[-2]]
188
+ attn_weights = attn_weights + causal_mask
189
+
190
+ # upcast attention to fp32
191
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype)
192
+ attn_weights = nn.functional.dropout(attn_weights, p=config.attention_dropout, training=config.training)
193
+ attn_output = torch.matmul(attn_weights, value_states)
194
+ attn_output = attn_output.transpose(1, 2).contiguous()
195
+ return attn_output, attn_weights
196
+
197
+
198
+ def flash_attention_forward(
199
+ config: Gemma2Config,
200
+ query: torch.Tensor,
201
+ key: torch.Tensor,
202
+ value: torch.Tensor,
203
+ mask: Optional[torch.Tensor],
204
+ target_dtype: torch.dtype = torch.float16,
205
+ **_kwargs,
206
+ ) -> Tuple[torch.Tensor, None]:
207
+ # NOTE: None mask cause un defined https://github.com/huggingface/transformers/blob/c8c8dffbe45ebef0a8dba4a51024e5e5e498596b/src/transformers/models/gemma2/modeling_gemma2.py#L211
208
+ seq_len = query.shape[2]
209
+ if mask is not None:
210
+ query = query[:, :, :seq_len]
211
+ value = value[:, :, :seq_len]
212
+
213
+ # TODO: These transpose are quite inefficient but Flash Attention requires the layout
214
+ # [batch_size, sequence_length, num_heads, head_dim]. We would need to refactor rotary embedding
215
+ query_states = query.transpose(1, 2)
216
+ key_states = key.transpose(1, 2)
217
+ value_states = value.transpose(1, 2)
218
+
219
+ dropout_rate = config.attention_dropout if config.training else 0.0
220
+
221
+ input_dtype = query_states.dtype
222
+ if input_dtype == torch.float32:
223
+ query_states = query_states.to(target_dtype)
224
+ key_states = key_states.to(target_dtype)
225
+ value_states = value_states.to(target_dtype)
226
+
227
+ attn_output = _flash_attention_forward(
228
+ query_states,
229
+ key_states,
230
+ value_states,
231
+ mask,
232
+ seq_len,
233
+ dropout=dropout_rate,
234
+ softmax_scale=config.scaling,
235
+ is_causal=config.is_causal,
236
+ sliding_window=config.sliding_window,
237
+ use_top_left_mask=config._flash_attn_uses_top_left_mask,
238
+ softcap=config.attn_logit_softcapping if is_flash_attn_greater_or_equal("2.6.0") else None,
239
+ )
240
+
241
+ return attn_output, None
242
+
243
+
244
+ def flex_attention_forward(
245
+ config: Gemma2Config,
246
+ query: torch.Tensor,
247
+ key: torch.Tensor,
248
+ value: torch.Tensor,
249
+ mask: Optional[torch.Tensor],
250
+ output_attentions: bool = False,
251
+ **_kwargs,
252
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:
253
+ def tanh_softcap(score, b, h, q_idx, kv_idx):
254
+ soft_cap = config.attn_logit_softcapping
255
+ score = soft_cap * torch.tanh(score / soft_cap)
256
+ if mask is not None:
257
+ return score + mask[b][0][q_idx][kv_idx]
258
+ return score
259
+
260
+ attn_output = flex_attention(
261
+ query,
262
+ key,
263
+ value,
264
+ score_mod=tanh_softcap,
265
+ enable_gqa=True,
266
+ scale=config.scaling,
267
+ return_lse=output_attentions,
268
+ )
269
+ if not output_attentions:
270
+ attn_weights = None
271
+ else:
272
+ attn_output, attn_weights = attn_output
273
+
274
+ attn_output = attn_output.transpose(1, 2).contiguous()
275
+ return attn_output, attn_weights
276
+
277
+
278
+ def sdpa_attention_forward(
279
+ config: Gemma2Config,
280
+ query: torch.Tensor,
281
+ key: torch.Tensor,
282
+ value: torch.Tensor,
283
+ mask: Optional[torch.Tensor],
284
+ **_kwargs,
285
+ ) -> Tuple[torch.Tensor, None]:
286
+ key = repeat_kv(key, config.num_key_value_groups)
287
+ value = repeat_kv(value, config.num_key_value_groups)
288
+
289
+ causal_mask = mask
290
+ if mask is not None:
291
+ causal_mask = causal_mask[:, :, :, : key.shape[-2]]
292
+
293
+ # SDPA with memory-efficient backend is currently (torch==2.1.2) bugged with non-contiguous inputs with custom attn_mask,
294
+ # Reference: https://github.com/pytorch/pytorch/issues/112577.
295
+ if query.device.type == "cuda" and causal_mask is not None:
296
+ query = query.contiguous()
297
+ key = key.contiguous()
298
+ value = value.contiguous()
299
+
300
+ # We dispatch to SDPA's Flash Attention or Efficient kernels via this `is_causal` if statement instead of an inline conditional assignment
301
+ # in SDPA to support both torch.compile's dynamic shapes and full graph options. An inline conditional prevents dynamic shapes from compiling.
302
+ is_causal = True if causal_mask is None and query.shape[1] > 1 else False
303
+
304
+ attn_output = torch.nn.functional.scaled_dot_product_attention(
305
+ query,
306
+ key,
307
+ value,
308
+ attn_mask=causal_mask,
309
+ dropout_p=config.attention_dropout if config.training else 0.0,
310
+ is_causal=is_causal,
311
+ scale=config.scaling,
312
+ )
313
+ attn_output = attn_output.transpose(1, 2).contiguous()
314
+ return attn_output, None
315
+
316
+
317
+ GEMMA2_ATTENTION_FUNCTION = {
318
+ "flash_attention_2": flash_attention_forward,
319
+ "flex_attention": flex_attention_forward,
320
+ "eager": eager_attention_forward,
321
+ "sdpa": sdpa_attention_forward,
322
+ }
323
+
324
+
325
+ class Gemma2Attention(nn.Module):
326
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
327
+
328
+ def __init__(self, config: Gemma2Config, layer_idx: Optional[int] = None):
329
+ super().__init__()
330
+ self.config = config
331
+ self.layer_idx = layer_idx
332
+
333
+ self.attention_dropout = config.attention_dropout
334
+ self.hidden_size = config.hidden_size
335
+ self.num_heads = config.num_attention_heads
336
+ self.head_dim = config.head_dim
337
+ self.num_key_value_heads = config.num_key_value_heads
338
+ self.num_key_value_groups = self.num_heads // self.num_key_value_heads
339
+ self.max_position_embeddings = config.max_position_embeddings
340
+ self.rope_theta = config.rope_theta
341
+ self.is_causal = True
342
+ self.scaling = config.query_pre_attn_scalar**-0.5
343
+ self.sliding_window = config.sliding_window if not bool(layer_idx % 2) else None
344
+ self.attn_logit_softcapping = config.attn_logit_softcapping
345
+ if self.hidden_size % self.num_heads != 0:
346
+ raise ValueError(
347
+ f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}"
348
+ f" and `num_heads`: {self.num_heads})."
349
+ )
350
+
351
+ self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=config.attention_bias)
352
+ self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=config.attention_bias)
353
+ self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=config.attention_bias)
354
+ self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=config.attention_bias)
355
+ self.rotary_emb = Gemma2RotaryEmbedding(
356
+ self.head_dim,
357
+ max_position_embeddings=self.max_position_embeddings,
358
+ base=self.rope_theta,
359
+ )
360
+
361
+ # NOTE: gemma2 do not include _flash_attn_uses_top_left_mask for flash attention
362
+ self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10()
363
+
364
+ def forward(
365
+ self,
366
+ hidden_states: torch.Tensor,
367
+ attention_mask: Optional[torch.Tensor] = None,
368
+ position_ids: Optional[torch.LongTensor] = None,
369
+ past_key_value: Optional[Cache] = None,
370
+ output_attentions: bool = False,
371
+ use_cache: bool = False,
372
+ cache_position: Optional[torch.LongTensor] = None,
373
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
374
+ bsz, q_len, _ = hidden_states.size()
375
+
376
+ query_states = self.q_proj(hidden_states)
377
+ key_states = self.k_proj(hidden_states)
378
+ value_states = self.v_proj(hidden_states)
379
+
380
+ query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
381
+ key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
382
+ value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
383
+
384
+ cos, sin = self.rotary_emb(value_states, position_ids)
385
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
386
+
387
+ if past_key_value is not None:
388
+ # sin and cos are specific to RoPE models; cache_position needed for the static cache
389
+ cache_kwargs = {
390
+ "sin": sin,
391
+ "cos": cos,
392
+ "sliding_window": self.sliding_window,
393
+ "cache_position": cache_position,
394
+ }
395
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
396
+
397
+ if output_attentions and self.config._attn_implementation in ["sdpa", "flash_attention_2"]:
398
+ logger.warning_once("Setting `attention_type` to `flex_attention` because `output_attentions=True`")
399
+ attention_type = "flex_attention"
400
+ else:
401
+ attention_type = self.config._attn_implementation
402
+
403
+ attn_output, attn_weights = GEMMA2_ATTENTION_FUNCTION[attention_type](
404
+ self, query_states, key_states, value_states, attention_mask, output_attentions=output_attentions
405
+ )
406
+
407
+ attn_output = attn_output.reshape(bsz, q_len, -1).contiguous()
408
+ attn_output = self.o_proj(attn_output)
409
+
410
+ if not output_attentions:
411
+ attn_weights = None
412
+
413
+ return attn_output, attn_weights, past_key_value
414
+
415
+
416
+ class Gemma2FlashAttention2(Gemma2Attention):
417
+ def __init__(self, config: Gemma2Config, layer_idx: Optional[int] = None):
418
+ super().__init__(config, layer_idx)
419
+ self.config._attn_implementation = "flash_attention_2"
420
+ logger.warning_once(
421
+ "The `Gemma2FlashAttention2` class is deprecated in favor of simply modifying the `config._attn_implementation`"
422
+ "attribute of the `GemmaAttention` class! It will be removed in v4.48"
423
+ )
424
+
425
+
426
+ class Gemma2SdpaAttention(Gemma2Attention):
427
+ def __init__(self, config: Gemma2Config, layer_idx: Optional[int] = None):
428
+ super().__init__(config, layer_idx)
429
+ self.config._attn_implementation = "sdpa"
430
+ logger.warning_once(
431
+ "The `Gemma2FlashAttention2` class is deprecated in favor of simply modifying the `config._attn_implementation`"
432
+ "attribute of the `GemmaAttention` class! It will be removed in v4.48"
433
+ )
434
+
435
+
436
+ class Gemma2DecoderLayer(nn.Module):
437
+ def __init__(self, config: Gemma2Config, layer_idx: int):
438
+ super().__init__()
439
+ self.hidden_size = config.hidden_size
440
+ self.config = config
441
+ self.is_sliding = not bool(layer_idx % 2)
442
+ self.self_attn = Gemma2Attention(config=config, layer_idx=layer_idx)
443
+ self.mlp = Gemma2MLP(config)
444
+ self.input_layernorm = Gemma2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
445
+ self.post_attention_layernorm = Gemma2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
446
+
447
+ self.pre_feedforward_layernorm = Gemma2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
448
+ self.post_feedforward_layernorm = Gemma2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
449
+ self.sliding_window = config.sliding_window
450
+
451
+ def forward(
452
+ self,
453
+ hidden_states: torch.Tensor,
454
+ attention_mask: Optional[torch.Tensor] = None,
455
+ position_ids: Optional[torch.LongTensor] = None,
456
+ past_key_value: Optional[Cache] = None,
457
+ output_attentions: Optional[bool] = False,
458
+ use_cache: Optional[bool] = False,
459
+ cache_position: Optional[torch.LongTensor] = None,
460
+ ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
461
+ if self.is_sliding and attention_mask is not None: # efficient SDPA and no padding
462
+ # Flash-attn is a 2D tensor
463
+ if self.config._attn_implementation == "flash_attention_2":
464
+ if past_key_value is not None: # when decoding
465
+ attention_mask = attention_mask[:, -self.sliding_window :]
466
+ else:
467
+ min_dtype = torch.finfo(hidden_states.dtype).min
468
+ sliding_window_mask = torch.tril(
469
+ torch.ones_like(attention_mask, dtype=torch.bool), diagonal=-self.sliding_window
470
+ )
471
+ attention_mask = torch.where(sliding_window_mask, min_dtype, attention_mask)
472
+ if attention_mask.shape[-1] <= 1: # when decoding
473
+ attention_mask = attention_mask[:, :, :, -self.sliding_window :]
474
+
475
+ residual = hidden_states
476
+
477
+ hidden_states = self.input_layernorm(hidden_states)
478
+
479
+ # Self Attention
480
+ hidden_states, self_attn_weights, present_key_value = self.self_attn(
481
+ hidden_states=hidden_states,
482
+ attention_mask=attention_mask,
483
+ position_ids=position_ids,
484
+ past_key_value=past_key_value,
485
+ output_attentions=output_attentions,
486
+ use_cache=use_cache,
487
+ cache_position=cache_position,
488
+ )
489
+ hidden_states = self.post_attention_layernorm(hidden_states)
490
+ hidden_states = residual + hidden_states
491
+
492
+ residual = hidden_states
493
+ hidden_states = self.pre_feedforward_layernorm(hidden_states)
494
+ hidden_states = self.mlp(hidden_states)
495
+ hidden_states = self.post_feedforward_layernorm(hidden_states)
496
+ hidden_states = residual + hidden_states
497
+
498
+ outputs = (hidden_states,)
499
+
500
+ if output_attentions:
501
+ outputs += (self_attn_weights,)
502
+
503
+ if use_cache:
504
+ outputs += (present_key_value,)
505
+
506
+ return outputs
507
+
508
+
509
+ GEMMA2_START_DOCSTRING = r"""
510
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
511
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
512
+ etc.)
513
+
514
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
515
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
516
+ and behavior.
517
+
518
+ Parameters:
519
+ config ([`Gemma2Config`]):
520
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
521
+ load the weights associated with the model, only the configuration. Check out the
522
+ [`~PreTrainedModel.from_pretrained`] method to load the model weights.
523
+ """
524
+
525
+
526
+ @add_start_docstrings(
527
+ "The bare Gemma2 Model outputting raw hidden-states without any specific head on top.",
528
+ GEMMA2_START_DOCSTRING,
529
+ )
530
+ class Gemma2PreTrainedModel(PreTrainedModel):
531
+ config_class = Gemma2Config
532
+ base_model_prefix = "model"
533
+ supports_gradient_checkpointing = True
534
+ _no_split_modules = ["Gemma2DecoderLayer"]
535
+ _skip_keys_device_placement = ["past_key_values"]
536
+ _supports_flash_attn_2 = True
537
+ _supports_sdpa = True
538
+ _supports_cache_class = True
539
+ _supports_quantized_cache = False
540
+ _supports_static_cache = True
541
+
542
+ def _init_weights(self, module):
543
+ std = self.config.initializer_range
544
+ if isinstance(module, nn.Linear):
545
+ module.weight.data.normal_(mean=0.0, std=std)
546
+ if module.bias is not None:
547
+ module.bias.data.zero_()
548
+ elif isinstance(module, nn.Embedding):
549
+ module.weight.data.normal_(mean=0.0, std=std)
550
+ if module.padding_idx is not None:
551
+ module.weight.data[module.padding_idx].zero_()
552
+
553
+ @classmethod
554
+ def _check_and_enable_sdpa(cls, config, hard_check_only: bool = False):
555
+ """
556
+ Overloads `PreTrainedModel._check_and_enable_sdpa` so as to DISABLE torch SDPA by default on Gemma2 models.
557
+ SDPA reduces the model performance on Gemma2 because of the logits softcapping.
558
+ """
559
+ config = super()._check_and_enable_sdpa(config, hard_check_only=hard_check_only)
560
+
561
+ # if using the default path -> swap sdpa by eager
562
+ if not hard_check_only and config._attn_implementation == "sdpa":
563
+ config._attn_implementation = "eager"
564
+
565
+ return config
566
+
567
+
568
+ GEMMA2_INPUTS_DOCSTRING = r"""
569
+ Args:
570
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
571
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
572
+ it.
573
+
574
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
575
+ [`PreTrainedTokenizer.__call__`] for details.
576
+
577
+ [What are input IDs?](../glossary#input-ids)
578
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
579
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
580
+
581
+ - 1 for tokens that are **not masked**,
582
+ - 0 for tokens that are **masked**.
583
+
584
+ [What are attention masks?](../glossary#attention-mask)
585
+
586
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
587
+ [`PreTrainedTokenizer.__call__`] for details.
588
+
589
+ If `past_key_values` is used, optionally only the last `input_ids` have to be input (see
590
+ `past_key_values`).
591
+
592
+ If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
593
+ and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
594
+ information on the default strategy.
595
+
596
+ - 1 indicates the head is **not masked**,
597
+ - 0 indicates the head is **masked**.
598
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
599
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
600
+ config.n_positions - 1]`.
601
+
602
+ [What are position IDs?](../glossary#position-ids)
603
+ past_key_values (`Cache` or `tuple(tuple(torch.FloatTensor))`, *optional*):
604
+ Pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
605
+ blocks) that can be used to speed up sequential decoding. This typically consists in the `past_key_values`
606
+ returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`.
607
+
608
+ Two formats are allowed:
609
+ - a [`~cache_utils.Cache`] instance, see our
610
+ [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache);
611
+ - Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of
612
+ shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`). This is also known as the legacy
613
+ cache format.
614
+
615
+ The model will output the same cache format that is fed as input. If no `past_key_values` are passed, the
616
+ legacy cache format will be returned.
617
+
618
+ If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't
619
+ have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids`
620
+ of shape `(batch_size, sequence_length)`.
621
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
622
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
623
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
624
+ model's internal embedding lookup matrix.
625
+ use_cache (`bool`, *optional*):
626
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
627
+ `past_key_values`).
628
+ output_attentions (`bool`, *optional*):
629
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
630
+ tensors for more detail.
631
+ output_hidden_states (`bool`, *optional*):
632
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
633
+ more detail.
634
+ return_dict (`bool`, *optional*):
635
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
636
+ cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
637
+ Indices depicting the position of the input sequence tokens in the sequence. Contrarily to `position_ids`,
638
+ this tensor is not affected by padding. It is used to update the cache in the correct position and to infer
639
+ the complete sequence length.
640
+ """
641
+
642
+
643
+ @add_start_docstrings(
644
+ "The bare Gemma2 Model outputting raw hidden-states without any specific head on top.",
645
+ GEMMA2_START_DOCSTRING,
646
+ )
647
+ class Gemma2Model(Gemma2PreTrainedModel):
648
+ """
649
+ Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`Gemma2DecoderLayer`]
650
+
651
+ Args:
652
+ config: Gemma2Config
653
+ """
654
+
655
+ def __init__(self, config: Gemma2Config):
656
+ super().__init__(config)
657
+ self.padding_idx = config.pad_token_id
658
+ self.vocab_size = config.vocab_size
659
+
660
+ self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
661
+ self.layers = nn.ModuleList(
662
+ [Gemma2DecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
663
+ )
664
+ self.norm = Gemma2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
665
+
666
+ self.gradient_checkpointing = False
667
+ if getattr(config, "pretraining_tp", 1) != 1:
668
+ logger.warn("`pretraining_tp` is deprecated, please use `model.tensor_parallel` instead.")
669
+
670
+ # Initialize weights and apply final processing
671
+ self.post_init()
672
+
673
+ def get_input_embeddings(self):
674
+ return self.embed_tokens
675
+
676
+ def set_input_embeddings(self, value):
677
+ self.embed_tokens = value
678
+
679
+ @add_start_docstrings_to_model_forward(GEMMA2_INPUTS_DOCSTRING)
680
+ def forward(
681
+ self,
682
+ input_ids: torch.LongTensor = None,
683
+ attention_mask: Optional[torch.Tensor] = None,
684
+ position_ids: Optional[torch.LongTensor] = None,
685
+ past_key_values: Optional[HybridCache] = None,
686
+ inputs_embeds: Optional[torch.FloatTensor] = None,
687
+ use_cache: Optional[bool] = None,
688
+ output_attentions: Optional[bool] = None,
689
+ output_hidden_states: Optional[bool] = None,
690
+ return_dict: Optional[bool] = None,
691
+ cache_position: Optional[torch.LongTensor] = None,
692
+ ) -> Union[Tuple, BaseModelOutputWithPast]:
693
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
694
+ output_hidden_states = (
695
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
696
+ )
697
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
698
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
699
+
700
+ if (input_ids is None) ^ (inputs_embeds is not None):
701
+ raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
702
+
703
+ if self.gradient_checkpointing and self.training and use_cache:
704
+ logger.warning_once(
705
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`."
706
+ )
707
+ use_cache = False
708
+
709
+ if inputs_embeds is None:
710
+ inputs_embeds = self.embed_tokens(input_ids)
711
+
712
+ if use_cache and past_key_values is None and not self.training:
713
+ batch_size, seq_len, _ = inputs_embeds.shape
714
+ past_key_values = HybridCache(
715
+ self.config,
716
+ batch_size=batch_size,
717
+ max_cache_len=seq_len,
718
+ device=self.device,
719
+ dtype=inputs_embeds.dtype,
720
+ )
721
+
722
+ if cache_position is None:
723
+ past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
724
+ cache_position = torch.arange(
725
+ past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
726
+ )
727
+
728
+ if position_ids is None:
729
+ position_ids = cache_position.unsqueeze(0)
730
+
731
+ causal_mask = self._update_causal_mask(
732
+ attention_mask, inputs_embeds, cache_position, past_key_values, output_attentions
733
+ )
734
+
735
+ # embed positions
736
+ hidden_states = inputs_embeds
737
+
738
+ # normalized
739
+ # Gemma2 downcasts the below to float16, causing sqrt(3072)=55.4256 to become 55.5
740
+ # See https://github.com/huggingface/transformers/pull/29402
741
+ normalizer = torch.tensor(self.config.hidden_size**0.5, dtype=hidden_states.dtype)
742
+ hidden_states = hidden_states * normalizer
743
+
744
+ # decoder layers
745
+ all_hidden_states = () if output_hidden_states else None
746
+ all_self_attns = () if output_attentions else None
747
+
748
+ for decoder_layer in self.layers[: self.config.num_hidden_layers]:
749
+ if output_hidden_states:
750
+ all_hidden_states += (hidden_states,)
751
+
752
+ if self.gradient_checkpointing and self.training:
753
+ layer_outputs = self._gradient_checkpointing_func(
754
+ decoder_layer.__call__,
755
+ hidden_states,
756
+ causal_mask,
757
+ position_ids,
758
+ past_key_values,
759
+ output_attentions,
760
+ use_cache,
761
+ cache_position,
762
+ )
763
+ else:
764
+ layer_outputs = decoder_layer(
765
+ hidden_states,
766
+ attention_mask=causal_mask,
767
+ position_ids=position_ids,
768
+ past_key_value=past_key_values,
769
+ output_attentions=output_attentions,
770
+ use_cache=use_cache,
771
+ cache_position=cache_position,
772
+ )
773
+
774
+ hidden_states = layer_outputs[0]
775
+
776
+ if output_attentions:
777
+ all_self_attns += (layer_outputs[1],)
778
+
779
+ hidden_states = self.norm(hidden_states)
780
+
781
+ if output_hidden_states:
782
+ all_hidden_states += (hidden_states,)
783
+
784
+ next_cache = past_key_values if use_cache else None
785
+
786
+ if not return_dict:
787
+ return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None)
788
+ return BaseModelOutputWithPast(
789
+ last_hidden_state=hidden_states,
790
+ past_key_values=next_cache,
791
+ hidden_states=all_hidden_states,
792
+ attentions=all_self_attns,
793
+ )
794
+
795
+ @torch.no_grad()
796
+ def _update_causal_mask(
797
+ self,
798
+ attention_mask: torch.Tensor,
799
+ input_tensor: torch.Tensor,
800
+ cache_position: torch.Tensor,
801
+ past_key_values: HybridCache,
802
+ output_attentions: bool,
803
+ ):
804
+ # Flash Attention currently doesn't support static cache but Gemma2 work only with static cache.
805
+ # So we will pass in attention mask as is in any case, not only when ther's padding. Then we'll use its shape
806
+ # to cut out keys/values trailing 0 used in static cache. This workaround should be compile compatible
807
+ # as it doesn't cause dynamic control issues.
808
+ if self.config._attn_implementation == "flash_attention_2":
809
+ return attention_mask
810
+
811
+ dtype, device = input_tensor.dtype, input_tensor.device
812
+ sequence_length = input_tensor.shape[1]
813
+ if isinstance(past_key_values, HybridCache):
814
+ target_length = past_key_values.get_max_cache_shape()
815
+ else:
816
+ target_length = attention_mask.shape[-1] if attention_mask is not None else input_tensor.shape[1]
817
+
818
+ # In case the provided `attention` mask is 2D, we generate a causal mask here (4D).
819
+ causal_mask = self._prepare_4d_causal_attention_mask_with_cache_position(
820
+ attention_mask,
821
+ sequence_length=sequence_length,
822
+ target_length=target_length,
823
+ dtype=dtype,
824
+ device=device,
825
+ cache_position=cache_position,
826
+ batch_size=input_tensor.shape[0],
827
+ )
828
+ return causal_mask
829
+
830
+ @staticmethod
831
+ def _prepare_4d_causal_attention_mask_with_cache_position(
832
+ attention_mask: torch.Tensor,
833
+ sequence_length: int,
834
+ target_length: int,
835
+ dtype: torch.dtype,
836
+ device: torch.device,
837
+ cache_position: torch.Tensor,
838
+ batch_size: int,
839
+ **kwargs,
840
+ ):
841
+ """
842
+ Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape
843
+ `(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing.
844
+
845
+ Args:
846
+ attention_mask (`torch.Tensor`):
847
+ A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape
848
+ `(batch_size, 1, query_length, key_value_length)`.
849
+ sequence_length (`int`):
850
+ The sequence length being processed.
851
+ target_length (`int`):
852
+ The target length: when generating with static cache, the mask should be as long as the static cache,
853
+ to account for the 0 padding, the part of the cache that is not filled yet.
854
+ dtype (`torch.dtype`):
855
+ The dtype to use for the 4D attention mask.
856
+ device (`torch.device`):
857
+ The device to plcae the 4D attention mask on.
858
+ cache_position (`torch.Tensor`):
859
+ Indices depicting the position of the input sequence tokens in the sequence.
860
+ batch_size (`torch.Tensor`):
861
+ Batch size.
862
+ """
863
+ if attention_mask is not None and attention_mask.dim() == 4:
864
+ # In this case we assume that the mask comes already in inverted form and requires no inversion or slicing.
865
+ causal_mask = attention_mask
866
+ else:
867
+ min_dtype = torch.finfo(dtype).min
868
+ causal_mask = torch.full(
869
+ (sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=device
870
+ )
871
+ if sequence_length != 1:
872
+ causal_mask = torch.triu(causal_mask, diagonal=1)
873
+ causal_mask *= torch.arange(target_length, device=device) > cache_position.reshape(-1, 1)
874
+ causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1)
875
+ if attention_mask is not None:
876
+ causal_mask = causal_mask.clone() # copy to contiguous memory for in-place edit
877
+ mask_length = attention_mask.shape[-1]
878
+ padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :]
879
+ padding_mask = padding_mask == 0
880
+ causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill(
881
+ padding_mask, min_dtype
882
+ )
883
+
884
+ return causal_mask
885
+
886
+
887
+ class Gemma2ForCausalLM(Gemma2PreTrainedModel, GenerationMixin):
888
+ _tied_weights_keys = ["lm_head.weight"]
889
+ _tp_plan = {"lm_head": "colwise_rep"}
890
+
891
+ def __init__(self, config):
892
+ super().__init__(config)
893
+ self.model = Gemma2Model(config)
894
+ self.vocab_size = config.vocab_size
895
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
896
+
897
+ # Initialize weights and apply final processing
898
+ self.post_init()
899
+
900
+ def get_input_embeddings(self):
901
+ return self.model.embed_tokens
902
+
903
+ def set_input_embeddings(self, value):
904
+ self.model.embed_tokens = value
905
+
906
+ def get_output_embeddings(self):
907
+ return self.lm_head
908
+
909
+ def set_output_embeddings(self, new_embeddings):
910
+ self.lm_head = new_embeddings
911
+
912
+ def set_decoder(self, decoder):
913
+ self.model = decoder
914
+
915
+ def get_decoder(self):
916
+ return self.model
917
+
918
+ @add_start_docstrings_to_model_forward(GEMMA2_INPUTS_DOCSTRING)
919
+ @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
920
+ def forward(
921
+ self,
922
+ input_ids: torch.LongTensor = None,
923
+ attention_mask: Optional[torch.Tensor] = None,
924
+ position_ids: Optional[torch.LongTensor] = None,
925
+ past_key_values: Optional[HybridCache] = None,
926
+ inputs_embeds: Optional[torch.FloatTensor] = None,
927
+ labels: Optional[torch.LongTensor] = None,
928
+ use_cache: Optional[bool] = None,
929
+ output_attentions: Optional[bool] = None,
930
+ output_hidden_states: Optional[bool] = None,
931
+ return_dict: Optional[bool] = None,
932
+ cache_position: Optional[torch.LongTensor] = None,
933
+ num_logits_to_keep: int = 0,
934
+ **loss_kwargs,
935
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
936
+ r"""
937
+ Args:
938
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
939
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
940
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
941
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
942
+
943
+ num_logits_to_keep (`int`, *optional*):
944
+ Calculate logits for the last `num_logits_to_keep` tokens. If `0`, calculate logits for all
945
+ `input_ids` (special case). Only last token logits are needed for generation, and calculating them only for that
946
+ token can save memory, which becomes pretty significant for long sequences or large vocabulary size.
947
+
948
+ Returns:
949
+
950
+ Example:
951
+
952
+ ```python
953
+ >>> from transformers import AutoTokenizer, GemmaForCausalLM
954
+
955
+ >>> model = GemmaForCausalLM.from_pretrained("google/gemma-2-9b")
956
+ >>> tokenizer = AutoTokenizer.from_pretrained("google/gemma-2-9b")
957
+
958
+ >>> prompt = "What is your favorite condiment?"
959
+ >>> inputs = tokenizer(prompt, return_tensors="pt")
960
+
961
+ >>> # Generate
962
+ >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
963
+ >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
964
+ "What is your favorite condiment?"
965
+ ```"""
966
+
967
+ if self.training and self.config._attn_implementation != "eager":
968
+ logger.warning_once(
969
+ "It is strongly recommended to train Gemma2 models with the `eager` attention implementation "
970
+ f"instead of `{self.config._attn_implementation}`. Use `eager` with `AutoModelForCausalLM.from_pretrained('<path-to-checkpoint>', attn_implementation='eager')`."
971
+ )
972
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
973
+ output_hidden_states = (
974
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
975
+ )
976
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
977
+ # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
978
+ outputs = self.model(
979
+ input_ids=input_ids,
980
+ attention_mask=attention_mask,
981
+ position_ids=position_ids,
982
+ past_key_values=past_key_values,
983
+ inputs_embeds=inputs_embeds,
984
+ use_cache=use_cache,
985
+ output_attentions=output_attentions,
986
+ output_hidden_states=output_hidden_states,
987
+ return_dict=return_dict,
988
+ cache_position=cache_position,
989
+ )
990
+
991
+ hidden_states = outputs[0]
992
+ # Only compute necessary logits, and do not upcast them to float if we are not computing the loss
993
+ logits = self.lm_head(hidden_states[:, -num_logits_to_keep:, :])
994
+ if self.config.final_logit_softcapping is not None:
995
+ logits = logits / self.config.final_logit_softcapping
996
+ logits = torch.tanh(logits)
997
+ logits = logits * self.config.final_logit_softcapping
998
+
999
+ loss = None
1000
+ if labels is not None:
1001
+ loss = self.loss_function(logits, labels, self.vocab_size, **loss_kwargs)
1002
+
1003
+ if not return_dict:
1004
+ output = (logits,) + outputs[1:]
1005
+ return (loss,) + output if loss is not None else output
1006
+
1007
+ return CausalLMOutputWithPast(
1008
+ loss=loss,
1009
+ logits=logits,
1010
+ past_key_values=outputs.past_key_values,
1011
+ hidden_states=outputs.hidden_states,
1012
+ attentions=outputs.attentions,
1013
+ )
1014
+
1015
+ def prepare_inputs_for_generation(
1016
+ self,
1017
+ input_ids,
1018
+ past_key_values=None,
1019
+ attention_mask=None,
1020
+ inputs_embeds=None,
1021
+ cache_position=None,
1022
+ position_ids=None,
1023
+ use_cache=True,
1024
+ num_logits_to_keep=None,
1025
+ **kwargs,
1026
+ ):
1027
+ # Overwritten: has a special cache type, `HybridCache`
1028
+
1029
+ # If we have cache: let's slice `input_ids` through `cache_position`, to keep only the unprocessed tokens
1030
+ # Exception 1: when passing input_embeds, input_ids may be missing entries
1031
+ # Exception 2: some generation methods do special slicing of input_ids, so we don't need to do it here
1032
+ if past_key_values is not None:
1033
+ if inputs_embeds is not None: # Exception 1
1034
+ input_ids = input_ids[:, -cache_position.shape[0] :]
1035
+ elif input_ids.shape[1] != cache_position.shape[0]: # Default case (the "else", a no op, is Exception 2)
1036
+ input_ids = input_ids[:, cache_position]
1037
+ if attention_mask is not None and position_ids is None:
1038
+ # create position_ids on the fly for batch generation
1039
+ position_ids = attention_mask.long().cumsum(-1) - 1
1040
+ position_ids.masked_fill_(attention_mask == 0, 1)
1041
+ if past_key_values:
1042
+ position_ids = position_ids[:, -input_ids.shape[1] :]
1043
+ # This `clone` call is needed to avoid recapturing cuda graphs with `torch.compile`'s
1044
+ # `mode="reduce-overhead`, as otherwise the input `position_ids` would have various stride
1045
+ # during the decoding. Here, simply using `.contiguous()` is not sufficient as in the
1046
+ # batch size = 1 case, `position_ids` is already contiguous but with varying stride
1047
+ # which retriggers a capture.
1048
+ position_ids = position_ids.clone(memory_format=torch.contiguous_format)
1049
+
1050
+ # if `inputs_embeds` are passed, we only want to use them in the 1st generation step
1051
+ if inputs_embeds is not None and cache_position[0] == 0:
1052
+ model_inputs = {"inputs_embeds": inputs_embeds, "input_ids": None}
1053
+ else:
1054
+ # The clone here is for the same reason as for `position_ids`.
1055
+ model_inputs = {"input_ids": input_ids.clone(memory_format=torch.contiguous_format), "inputs_embeds": None}
1056
+
1057
+ if (
1058
+ isinstance(past_key_values, HybridCache)
1059
+ and attention_mask.ndim == 2
1060
+ and not self.config._attn_implementation == "flash_attention_2"
1061
+ ):
1062
+ if model_inputs["inputs_embeds"] is not None:
1063
+ batch_size, sequence_length, _ = model_inputs["inputs_embeds"].shape
1064
+ device = model_inputs["inputs_embeds"].device
1065
+ else:
1066
+ batch_size, sequence_length = model_inputs["input_ids"].shape
1067
+ device = model_inputs["input_ids"].device
1068
+
1069
+ attention_mask = self.model._prepare_4d_causal_attention_mask_with_cache_position(
1070
+ attention_mask,
1071
+ sequence_length=sequence_length,
1072
+ target_length=past_key_values.get_max_cache_shape(),
1073
+ dtype=self.lm_head.weight.dtype,
1074
+ device=device,
1075
+ cache_position=cache_position,
1076
+ batch_size=batch_size,
1077
+ )
1078
+
1079
+ if num_logits_to_keep is not None:
1080
+ model_inputs["num_logits_to_keep"] = num_logits_to_keep
1081
+
1082
+ model_inputs.update(
1083
+ {
1084
+ "position_ids": position_ids,
1085
+ "cache_position": cache_position,
1086
+ "past_key_values": past_key_values,
1087
+ "use_cache": use_cache,
1088
+ "attention_mask": attention_mask,
1089
+ }
1090
+ )
1091
+ return model_inputs
1092
+
1093
+
1094
+ @add_start_docstrings(
1095
+ """
1096
+ The Gemma2 Model transformer with a sequence classification head on top (linear layer).
1097
+
1098
+ [`Gemma2ForSequenceClassification`] uses the last token in order to do the classification, as other causal models
1099
+ (e.g. GPT-2) do.
1100
+
1101
+ Since it does classification on the last token, it requires to know the position of the last token. If a
1102
+ `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
1103
+ no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
1104
+ padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
1105
+ each row of the batch).
1106
+ """,
1107
+ GEMMA2_START_DOCSTRING,
1108
+ )
1109
+ class Gemma2ForSequenceClassification(Gemma2PreTrainedModel):
1110
+ def __init__(self, config):
1111
+ super().__init__(config)
1112
+ self.num_labels = config.num_labels
1113
+ self.model = Gemma2Model(config)
1114
+ self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False)
1115
+
1116
+ # Initialize weights and apply final processing
1117
+ self.post_init()
1118
+
1119
+ def get_input_embeddings(self):
1120
+ return self.model.embed_tokens
1121
+
1122
+ def set_input_embeddings(self, value):
1123
+ self.model.embed_tokens = value
1124
+
1125
+ @add_start_docstrings_to_model_forward(GEMMA2_INPUTS_DOCSTRING)
1126
+ def forward(
1127
+ self,
1128
+ input_ids: Optional[torch.LongTensor] = None,
1129
+ attention_mask: Optional[torch.Tensor] = None,
1130
+ position_ids: Optional[torch.LongTensor] = None,
1131
+ past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None,
1132
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1133
+ labels: Optional[torch.LongTensor] = None,
1134
+ use_cache: Optional[bool] = None,
1135
+ output_attentions: Optional[bool] = None,
1136
+ output_hidden_states: Optional[bool] = None,
1137
+ return_dict: Optional[bool] = None,
1138
+ ) -> Union[Tuple, SequenceClassifierOutputWithPast]:
1139
+ r"""
1140
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1141
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1142
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1143
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1144
+ """
1145
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1146
+
1147
+ transformer_outputs = self.model(
1148
+ input_ids,
1149
+ attention_mask=attention_mask,
1150
+ position_ids=position_ids,
1151
+ past_key_values=past_key_values,
1152
+ inputs_embeds=inputs_embeds,
1153
+ use_cache=use_cache,
1154
+ output_attentions=output_attentions,
1155
+ output_hidden_states=output_hidden_states,
1156
+ return_dict=return_dict,
1157
+ )
1158
+ hidden_states = transformer_outputs[0]
1159
+ logits = self.score(hidden_states)
1160
+
1161
+ if input_ids is not None:
1162
+ batch_size = input_ids.shape[0]
1163
+ else:
1164
+ batch_size = inputs_embeds.shape[0]
1165
+
1166
+ if self.config.pad_token_id is None and batch_size != 1:
1167
+ raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.")
1168
+ if self.config.pad_token_id is None:
1169
+ sequence_lengths = -1
1170
+ else:
1171
+ if input_ids is not None:
1172
+ # if no pad token found, use modulo instead of reverse indexing for ONNX compatibility
1173
+ sequence_lengths = torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1
1174
+ sequence_lengths = sequence_lengths % input_ids.shape[-1]
1175
+ sequence_lengths = sequence_lengths.to(logits.device)
1176
+ else:
1177
+ sequence_lengths = -1
1178
+
1179
+ pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths]
1180
+
1181
+ loss = None
1182
+ if labels is not None:
1183
+ loss = self.loss_function(logits=logits, labels=labels, pooled_logits=pooled_logits, config=self.config)
1184
+
1185
+ if not return_dict:
1186
+ output = (pooled_logits,) + transformer_outputs[1:]
1187
+ return ((loss,) + output) if loss is not None else output
1188
+
1189
+ return SequenceClassifierOutputWithPast(
1190
+ loss=loss,
1191
+ logits=pooled_logits,
1192
+ past_key_values=transformer_outputs.past_key_values,
1193
+ hidden_states=transformer_outputs.hidden_states,
1194
+ attentions=transformer_outputs.attentions,
1195
+ )
1196
+
1197
+
1198
+ @add_start_docstrings(
1199
+ """
1200
+ The Gemma2 Model transformer with a token classification head on top (a linear layer on top of the hidden-states
1201
+ output) e.g. for Named-Entity-Recognition (NER) tasks.
1202
+ """,
1203
+ GEMMA2_START_DOCSTRING,
1204
+ )
1205
+ class Gemma2ForTokenClassification(Gemma2PreTrainedModel):
1206
+ def __init__(self, config):
1207
+ super().__init__(config)
1208
+ self.num_labels = config.num_labels
1209
+ self.model = Gemma2Model(config)
1210
+ if getattr(config, "classifier_dropout", None) is not None:
1211
+ classifier_dropout = config.classifier_dropout
1212
+ elif getattr(config, "hidden_dropout", None) is not None:
1213
+ classifier_dropout = config.hidden_dropout
1214
+ else:
1215
+ classifier_dropout = 0.1
1216
+ self.dropout = nn.Dropout(classifier_dropout)
1217
+ self.score = nn.Linear(config.hidden_size, config.num_labels)
1218
+
1219
+ # Initialize weights and apply final processing
1220
+ self.post_init()
1221
+
1222
+ def get_input_embeddings(self):
1223
+ return self.model.embed_tokens
1224
+
1225
+ def set_input_embeddings(self, value):
1226
+ self.model.embed_tokens = value
1227
+
1228
+ @add_start_docstrings_to_model_forward(GEMMA2_INPUTS_DOCSTRING)
1229
+ @add_code_sample_docstrings(
1230
+ checkpoint=_CHECKPOINT_FOR_DOC,
1231
+ output_type=TokenClassifierOutput,
1232
+ config_class=_CONFIG_FOR_DOC,
1233
+ )
1234
+ def forward(
1235
+ self,
1236
+ input_ids: Optional[torch.LongTensor] = None,
1237
+ attention_mask: Optional[torch.Tensor] = None,
1238
+ position_ids: Optional[torch.LongTensor] = None,
1239
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
1240
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1241
+ labels: Optional[torch.LongTensor] = None,
1242
+ use_cache: Optional[bool] = None,
1243
+ output_attentions: Optional[bool] = None,
1244
+ output_hidden_states: Optional[bool] = None,
1245
+ return_dict: Optional[bool] = None,
1246
+ ) -> Union[Tuple, TokenClassifierOutput]:
1247
+ r"""
1248
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1249
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1250
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1251
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1252
+ """
1253
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1254
+
1255
+ outputs = self.model(
1256
+ input_ids,
1257
+ attention_mask=attention_mask,
1258
+ position_ids=position_ids,
1259
+ past_key_values=past_key_values,
1260
+ inputs_embeds=inputs_embeds,
1261
+ use_cache=use_cache,
1262
+ output_attentions=output_attentions,
1263
+ output_hidden_states=output_hidden_states,
1264
+ return_dict=return_dict,
1265
+ )
1266
+ sequence_output = outputs[0]
1267
+ sequence_output = self.dropout(sequence_output)
1268
+ logits = self.score(sequence_output)
1269
+
1270
+ loss = None
1271
+ if labels is not None:
1272
+ loss = self.loss_function(logits, labels, self.config)
1273
+
1274
+ if not return_dict:
1275
+ output = (logits,) + outputs[2:]
1276
+ return ((loss,) + output) if loss is not None else output
1277
+
1278
+ return TokenClassifierOutput(
1279
+ loss=loss,
1280
+ logits=logits,
1281
+ hidden_states=outputs.hidden_states,
1282
+ attentions=outputs.attentions,
1283
+ )
modeling_spatialvla.py ADDED
@@ -0,0 +1,578 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024 the HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ from dataclasses import dataclass
16
+ from typing import List, Optional, Tuple, Union
17
+
18
+ import os
19
+ import torch
20
+ import torch.utils.checkpoint
21
+ from torch import nn
22
+ from torch.linalg import inv
23
+ import torchvision.transforms.functional as TF
24
+ import torch.nn.functional as F
25
+ from transformers.cache_utils import Cache, HybridCache, StaticCache
26
+ from transformers.generation import GenerationMixin
27
+ from transformers.modeling_utils import PreTrainedModel, PretrainedConfig
28
+ from transformers.utils import (
29
+ ModelOutput,
30
+ logging,
31
+ )
32
+ from .configuration_spatialvla import SpatialVLAConfig
33
+ from .modeling_gemma2 import Gemma2ForCausalLM
34
+ from transformers import AutoModel, ZoeDepthForDepthEstimation
35
+
36
+ SIGLIP_MEAN, SIGLIP_STD = (0.5, 0.5, 0.5), (0.5, 0.5, 0.5)
37
+ ZOE_MEAN, ZOE_STD = (0.5, 0.5, 0.5), (0.5, 0.5, 0.5)
38
+
39
+ logger = logging.get_logger(__name__)
40
+
41
+ from transformers import StoppingCriteria, StoppingCriteriaList
42
+
43
+ class StopOnReasoningTag(StoppingCriteria):
44
+ def __init__(self, tokenizer, tag="<Reasoning>"):
45
+ self.tag_token_ids = tokenizer.tokenizer.encode(tag, add_special_tokens=False)[:-1]
46
+ # self.tag_token_ids = tmp.input_ids[:-1]
47
+ self.tag_length = len(self.tag_token_ids)
48
+
49
+ def __call__(self, input_ids, scores, **kwargs):
50
+ # Get the last tokens of the generated sequence
51
+ generated_tokens = input_ids[0].tolist()
52
+ # print("Hehe:", type(generated_tokens))
53
+ # print("lmao:", generated_tokens.shape)
54
+ # print("generated tokens",generated_tokens)
55
+ # print("tag length",self.tag_length)
56
+ # print("tag token",self.tag_token_ids)
57
+
58
+ # if len(generated_tokens) < tag_length:
59
+ # return False
60
+ return generated_tokens[-self.tag_length:] == self.tag_token_ids
61
+
62
+
63
+
64
+ class Ego3DPositionEmbeddingMLP(nn.Module):
65
+ """Absolute pos embedding, learned.
66
+ https://github.com/kwea123/nerf_pl/blob/52aeb387da64a9ad9a0f914ea9b049ffc598b20c/models/nerf.py#L4
67
+ """
68
+
69
+ def __init__(self, in_channels=3, num_pos_feats=768, n_freqs=8, logscale=True):
70
+ super(Ego3DPositionEmbeddingMLP, self).__init__()
71
+ self.n_freqs = n_freqs
72
+ self.freq_out_channels = in_channels * (2 * n_freqs + 1)
73
+ if logscale:
74
+ freq_bands = 2 ** torch.linspace(0, n_freqs - 1, n_freqs)
75
+ else:
76
+ freq_bands = torch.linspace(1, 2 ** (n_freqs - 1), n_freqs)
77
+
78
+ center = torch.tensor([0., 0., 2.]).repeat(in_channels // 3)
79
+ self.register_buffer("freq_bands", freq_bands, persistent=False)
80
+ self.register_buffer("center", center, persistent=False)
81
+
82
+ self.position_embedding_head = nn.Sequential(
83
+ nn.Linear(self.freq_out_channels, num_pos_feats),
84
+ nn.LayerNorm(num_pos_feats),
85
+ nn.ReLU(),
86
+ nn.Linear(num_pos_feats, num_pos_feats),
87
+ )
88
+ self._reset_parameters()
89
+
90
+ def _reset_parameters(self):
91
+ """init with small weights to maintain stable training."""
92
+ for p in self.parameters():
93
+ if p.dim() > 1:
94
+ nn.init.xavier_uniform_(p, gain=0.01)
95
+
96
+ @torch.no_grad()
97
+ def frequency_encoding(self, xyz):
98
+ """
99
+ Embeds x to (x, sin(2^k x), cos(2^k x), ...)
100
+ Different from the paper, "x" is also in the output
101
+ See https://github.com/bmild/nerf/issues/12
102
+ x \in [-2, 2]
103
+ y \in [-2, 2]
104
+ z \in [0., 4]
105
+ Inputs:
106
+ x: (b n m)
107
+ Outputs:
108
+ out: (b n o)
109
+ """
110
+ xyz_n = ((xyz - self.center) / 2.0).to(self.freq_bands.dtype)
111
+ xyz_feq = xyz_n.unsqueeze(-1) * self.freq_bands # (b n m 1)
112
+ sin_xyz, cos_xyz = torch.sin(xyz_feq), torch.cos(xyz_feq) # (b n m nf)
113
+ encoding = torch.cat([xyz_n.unsqueeze(-1), sin_xyz, cos_xyz], -1).reshape(*xyz.shape[:2], -1)
114
+ return encoding
115
+
116
+ def forward(self, xyz):
117
+ """Forward pass, xyz is (B, N, 3or6), output (B, N, F)."""
118
+ freq_encoding = self.frequency_encoding(xyz)
119
+ position_embedding = self.position_embedding_head(freq_encoding)
120
+ return position_embedding
121
+
122
+ def process_zoe(pixel_values, pad_mode="reflect", output_size=(384, 512)):
123
+ """https://github.com/huggingface/transformers/blob/v4.45.2/src/transformers/models/zoedepth/image_processing_zoedepth.py"""
124
+ # h, w = images.shape[-2:]
125
+ # pad
126
+ ph, pw = 31, 31 # int((h / 2)**0.5 * 3), int((w / 2)**0.5 * 3) # 32, 31
127
+ images = F.pad(pixel_values, (pw, pw, ph, ph), mode=pad_mode)
128
+ # resize
129
+ size = (384, 384) # get_resize_output_image_size
130
+ images = F.interpolate(images, size=size, mode="bicubic", align_corners=True)
131
+ # zoe: padding -> resize -> nomalize. we follow `nomalize -> padding -> resize` from siglip
132
+ images = TF.normalize(images, mean=ZOE_MEAN, std=ZOE_STD)
133
+ return images, ph, pw
134
+
135
+ @dataclass
136
+ class SpatialVLACausalLMOutputWithPast(ModelOutput):
137
+ loss: Optional[torch.FloatTensor] = None
138
+ logits: torch.FloatTensor = None
139
+ past_key_values: Optional[Union[List[torch.FloatTensor], Cache]] = None
140
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
141
+ attentions: Optional[Tuple[torch.FloatTensor]] = None
142
+ image_hidden_states: Optional[torch.FloatTensor] = None
143
+
144
+ class SpatialVLAMultiModalProjector(nn.Module):
145
+ def __init__(self, config: SpatialVLAConfig):
146
+ super().__init__()
147
+ self.linear = nn.Linear(config.vision_config.hidden_size, config.vision_config.projection_dim, bias=True)
148
+
149
+ def forward(self, image_features):
150
+ hidden_states = self.linear(image_features)
151
+ return hidden_states
152
+
153
+ class SpatialVLAPreTrainedModel(PreTrainedModel):
154
+ config_class = SpatialVLAConfig
155
+ base_model_prefix = "model"
156
+ supports_gradient_checkpointing = True
157
+ _no_split_modules = ["SpatialVLAMultiModalProjector", "ZoeDepthForDepthEstimation", "Ego3DPositionEmbeddingMLP"]
158
+ _skip_keys_device_placement = "past_key_values"
159
+ _supports_cache_class = True
160
+ _supports_quantized_cache = True
161
+ _supports_static_cache = True
162
+ _supports_cache_class = True
163
+ _supports_flash_attn_2 = True
164
+ _supports_sdpa = True
165
+
166
+ def _init_weights(self, module):
167
+ std = (
168
+ self.config.initializer_range
169
+ if hasattr(self.config, "initializer_range")
170
+ else self.config.text_config.initializer_range
171
+ )
172
+
173
+ if hasattr(module, "class_embedding"):
174
+ module.class_embedding.data.normal_(mean=0.0, std=std)
175
+
176
+ if isinstance(module, (nn.Linear, nn.Conv2d)):
177
+ module.weight.data.normal_(mean=0.0, std=std)
178
+ if module.bias is not None:
179
+ module.bias.data.zero_()
180
+ elif isinstance(module, nn.Embedding):
181
+ module.weight.data.normal_(mean=0.0, std=std)
182
+ if module.padding_idx is not None:
183
+ module.weight.data[module.padding_idx].zero_()
184
+
185
+ class SpatialVLAForConditionalGeneration(SpatialVLAPreTrainedModel, GenerationMixin):
186
+ def __init__(self, config: SpatialVLAConfig, vision_model=None, vision_zoe_model=None, projector_model=None, language_model=None):
187
+ super().__init__(config)
188
+
189
+ self.vision_tower = vision_model or AutoModel.from_config(config=config.vision_config)
190
+ self.multi_modal_projector = projector_model or SpatialVLAMultiModalProjector(config)
191
+ self.vocab_size = config.text_config.vocab_size
192
+ if language_model is None:
193
+ language_model = Gemma2ForCausalLM(config=config.text_config)
194
+ if language_model._tied_weights_keys is not None:
195
+ self._tied_weights_keys = [f"language_model.{k}" for k in language_model._tied_weights_keys]
196
+ self.language_model = language_model
197
+
198
+ if config.use_vision_zoe:
199
+ self.vision_zoe_model = vision_zoe_model or ZoeDepthForDepthEstimation(config.vision_zoe_config)
200
+ self.position_embedding_3d = Ego3DPositionEmbeddingMLP(
201
+ config.ego3d_patch_reso**2 * 3, num_pos_feats=config.vision_config.hidden_size, n_freqs=config.n_freqs
202
+ )
203
+ # register buffer
204
+ patch_size, reso, image_size = config.vision_config.patch_size, config.ego3d_patch_reso, config.vision_config.image_size
205
+ y, x = torch.meshgrid(torch.arange(0, image_size, patch_size // reso), torch.arange(0, image_size, patch_size // reso), indexing="ij") # (h//sp w//sp)
206
+ y, x = y + patch_size / reso / 2, x + patch_size / reso / 2
207
+ uv_h = torch.stack([x, y, torch.ones_like(x)], dim=0).reshape(3, -1) # (3 hw)
208
+ self.register_buffer("uv_h", uv_h, persistent=False)
209
+
210
+ # shared spatial embeddings for <ACTION> <IMG>
211
+ if config.use_spatial_token:
212
+ self.spatial_embed_tokens = nn.Embedding(self.config.spatial_token_num, config.text_config.hidden_size)
213
+ else:
214
+ self.spatial_embed_tokens = None
215
+ self.pad_token_id = self.config.pad_token_id if self.config.pad_token_id is not None else -1
216
+ self.processor = None
217
+
218
+ def criteria_value(self):
219
+ self.criteria = StoppingCriteriaList([StopOnReasoningTag(self.processor, "<Reasoning>")])
220
+
221
+
222
+ def backproject_patch(self, K: torch.Tensor, depth: torch.Tensor, patch_size=14, reso=2) -> torch.Tensor:
223
+ """
224
+ Backproject depth map to 3D points in camera coordinate.
225
+ Args:
226
+ K: camera intrinsic matrix (b 3 3)
227
+ depth: depth map (b 1 h w)
228
+ patch_size: patch size for siglip
229
+ reso: reso^2 -> sample points in each patch
230
+ patch sz = 14 ......
231
+ ┌────────┬────────┐
232
+ │ ─ ─ │ ─ ─ │
233
+ │ points │ ├─ ─ ─
234
+ │ ─ ─ │ ─ ─ │
235
+ ├────────┼────────┤
236
+ │ ─ ─ │ ─ ─ │
237
+ │ │ │
238
+ │ ─ ─ │ ─ ─ │
239
+ └────────┴────────┘
240
+ reso=2───►points=4
241
+
242
+
243
+ """
244
+ b, c, h, w = depth.shape
245
+ hp, wp = h // patch_size, w // patch_size
246
+ sub_hp = sub_wp = reso
247
+ patch_depth = F.interpolate(depth, size=(hp * reso, wp * reso), mode="area").reshape(b, c, -1)
248
+ p_cam = (inv(K.float()) @ self.uv_h.float()) * patch_depth # (b 3 3) @ (3 hw) -> (b 3 hw) * (b 1 hw) -> (b 3 hw)
249
+ patch_p_cam = p_cam.reshape(b, 3, hp, sub_hp, wp, sub_wp).permute(0, 2, 4, 3, 5, 1).reshape(b, hp * wp, -1)
250
+ return patch_p_cam
251
+
252
+ def get_input_embeddings(self):
253
+ return self.language_model.get_input_embeddings()
254
+
255
+ def set_input_embeddings(self, value):
256
+ self.language_model.set_input_embeddings(value)
257
+
258
+ def get_output_embeddings(self):
259
+ return self.language_model.get_output_embeddings()
260
+
261
+ def set_output_embeddings(self, new_embeddings):
262
+ self.language_model.set_output_embeddings(new_embeddings)
263
+
264
+ def set_decoder(self, decoder):
265
+ self.language_model.set_decoder(decoder)
266
+
267
+ def get_decoder(self):
268
+ return self.language_model.get_decoder()
269
+
270
+ def tie_weights(self):
271
+ return self.language_model.tie_weights()
272
+
273
+ def resize_token_embeddings(
274
+ self,
275
+ new_num_tokens: Optional[int] = None,
276
+ pad_to_multiple_of: Optional[int] = None,
277
+ mean_resizing: bool = True,
278
+ ) -> nn.Embedding:
279
+ model_embeds = self.language_model.resize_token_embeddings(new_num_tokens, pad_to_multiple_of, mean_resizing)
280
+ vocab_size = model_embeds.weight.shape[0]
281
+ self.config.text_config.vocab_size = self.vocab_size = self.config._vocab_size = vocab_size
282
+ self.tie_weights()
283
+ return model_embeds
284
+
285
+ def _update_causal_mask(
286
+ self,
287
+ attention_mask,
288
+ token_type_ids,
289
+ past_key_values,
290
+ cache_position,
291
+ input_ids=None,
292
+ inputs_embeds=None,
293
+ is_training: bool = False,
294
+ ):
295
+ if self.config.text_config._attn_implementation == "flash_attention_2":
296
+ if attention_mask is not None and 0.0 in attention_mask:
297
+ return attention_mask
298
+ return None
299
+
300
+ using_static_cache = isinstance(past_key_values, StaticCache)
301
+ min_dtype = torch.finfo(self.dtype).min
302
+ inputs_lead_dim = input_ids.shape[0] if input_ids is not None else inputs_embeds.shape[0]
303
+ sequence_length = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
304
+ if using_static_cache:
305
+ target_length = past_key_values.get_max_cache_shape()
306
+ elif isinstance(past_key_values, HybridCache):
307
+ target_length = past_key_values.get_max_cache_shape()
308
+ else:
309
+ target_length = (
310
+ attention_mask.shape[-1]
311
+ if isinstance(attention_mask, torch.Tensor)
312
+ else cache_position[0] + sequence_length + 1
313
+ )
314
+
315
+ if attention_mask is not None and attention_mask.dim() == 4:
316
+ return attention_mask
317
+
318
+ causal_mask = torch.full((sequence_length, target_length), fill_value=min_dtype, dtype=self.dtype, device=cache_position.device)
319
+ if sequence_length != 1:
320
+ if is_training: causal_mask = torch.triu(causal_mask, diagonal=1)
321
+ else: causal_mask[:, :sequence_length] = 0.0
322
+
323
+ causal_mask *= torch.arange(target_length, device=cache_position.device) > cache_position.reshape(-1, 1)
324
+ causal_mask = causal_mask[None, None, :, :].expand(inputs_lead_dim, 1, -1, -1)
325
+ if attention_mask is not None:
326
+ causal_mask = causal_mask.clone() # copy to contiguous memory for in-place edit
327
+ mask_length = attention_mask.shape[-1]
328
+ padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :].to(causal_mask.device)
329
+ padding_mask = padding_mask == 0
330
+ causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill(padding_mask, min_dtype)
331
+ if is_training:
332
+ causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill(token_type_ids[:, None, None, :].to(causal_mask.device) == 0, 0)
333
+ return causal_mask
334
+
335
+ def get_image_features(self, pixel_values: torch.FloatTensor, intrinsic: torch.FloatTensor):
336
+ siglip_pixel_values = TF.normalize(pixel_values, mean=SIGLIP_MEAN, std=SIGLIP_STD)
337
+ image_outputs = self.vision_tower(siglip_pixel_values)
338
+
339
+ # ego3d position encoding
340
+ if self.config.use_vision_zoe:
341
+ zoe_pixel_values, ph, pw = process_zoe(pixel_values, pad_mode="reflect")
342
+ with torch.no_grad():
343
+ pvh, pvw = pixel_values.shape[-2:]
344
+ depth = self.vision_zoe_model(pixel_values=zoe_pixel_values).predicted_depth
345
+ depth = F.interpolate(
346
+ depth.unsqueeze(1),
347
+ size=(pvh+2*ph, pvw+2*pw),
348
+ mode="bicubic",
349
+ align_corners=True,
350
+ )[..., ph:-ph, pw:-pw]
351
+ xyz = self.backproject_patch(
352
+ intrinsic, depth, patch_size=self.config.vision_config.patch_size, reso=self.config.ego3d_patch_reso
353
+ ) # (b, n, 3*4)
354
+ pos_embed_3d = self.position_embedding_3d(xyz)
355
+ selected_image_feature = image_outputs.last_hidden_state + pos_embed_3d
356
+ else:
357
+ selected_image_feature = image_outputs.last_hidden_state
358
+ image_features = self.multi_modal_projector(selected_image_feature)
359
+ image_features = image_features / (self.config.text_config.hidden_size**0.5)
360
+ return image_features
361
+
362
+ def forward(
363
+ self,
364
+ input_ids: torch.LongTensor = None,
365
+ pixel_values: torch.FloatTensor = None,
366
+ actions: Optional[torch.FloatTensor] = None,
367
+ intrinsic: Optional[torch.Tensor] = None,
368
+ attention_mask: Optional[torch.Tensor] = None,
369
+ position_ids: Optional[torch.LongTensor] = None,
370
+ past_key_values: Optional[Union[List[torch.FloatTensor], Cache]] = None,
371
+ token_type_ids: Optional[torch.LongTensor] = None,
372
+ cache_position: Optional[torch.LongTensor] = None,
373
+ inputs_embeds: Optional[torch.FloatTensor] = None,
374
+ labels: Optional[torch.LongTensor] = None,
375
+ use_cache: Optional[bool] = None,
376
+ output_attentions: Optional[bool] = None,
377
+ output_hidden_states: Optional[bool] = None,
378
+ return_dict: Optional[bool] = None,
379
+ num_logits_to_keep: int = 0,
380
+ ) -> Union[Tuple, SpatialVLACausalLMOutputWithPast]:
381
+
382
+ output_attentions = output_attentions or self.config.output_attentions
383
+ output_hidden_states = output_hidden_states or self.config.output_hidden_states
384
+ return_dict = return_dict or self.config.use_return_dict
385
+
386
+ is_training = token_type_ids is not None and labels is not None
387
+
388
+ if inputs_embeds is None: inputs_embeds = self.get_input_embeddings()(input_ids).clone() # avoid checkpint grad True
389
+
390
+ if self.config.use_spatial_token:
391
+ spatial_selected = (input_ids >= self.config.action_token_begin_idx) & (input_ids < self.config.action_token_begin_idx + self.config.spatial_token_num)
392
+ inputs_embeds[spatial_selected] = inputs_embeds[spatial_selected] * 0.0 + self.spatial_embed_tokens(input_ids[spatial_selected] - self.config.action_token_begin_idx)
393
+
394
+ if cache_position is None:
395
+ past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
396
+ cache_position = torch.arange(past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device)
397
+
398
+ if position_ids is None:
399
+ position_ids = cache_position.unsqueeze(0) + 1 # Paligemma positions are 1-indexed
400
+
401
+ # merge
402
+ if pixel_values is not None:
403
+ image_features = self.get_image_features(pixel_values, intrinsic)
404
+ special_image_mask = (input_ids == self.config.image_token_index).unsqueeze(-1)
405
+ special_image_mask = special_image_mask.expand_as(inputs_embeds).to(inputs_embeds.device)
406
+ if inputs_embeds[special_image_mask].numel() != image_features.numel():
407
+ image_tokens_in_text = torch.sum(input_ids == self.config.image_token_index)
408
+ raise ValueError(
409
+ f"Number of images does not match number of special image tokens in the input text. "
410
+ f"Got {image_tokens_in_text} image tokens in the text but {image_features.shape[0] * image_features.shape[1]} "
411
+ "tokens from image embeddings."
412
+ )
413
+ image_features = image_features.to(inputs_embeds.device, inputs_embeds.dtype)
414
+ inputs_embeds = inputs_embeds.masked_scatter(special_image_mask, image_features)
415
+
416
+ # mask out pad-token-ids in labels for BC
417
+ if labels is not None and self.pad_token_id in labels:
418
+ logger.warning_once(
419
+ "`labels` contains `pad_token_id` which will be masked with `config.ignore_index`. ",
420
+ "You have to mask out `pad_token_id` when preparing `labels`, this behavior will be removed in v.4.46.",
421
+ )
422
+ labels = torch.where(input_ids == self.pad_token_id, self.config.ignore_index, labels)
423
+
424
+ causal_mask = self._update_causal_mask(
425
+ attention_mask, token_type_ids, past_key_values, cache_position, input_ids, inputs_embeds, is_training
426
+ )
427
+ outputs = self.language_model(
428
+ attention_mask=causal_mask,
429
+ position_ids=position_ids,
430
+ past_key_values=past_key_values,
431
+ inputs_embeds=inputs_embeds,
432
+ use_cache=use_cache,
433
+ output_attentions=output_attentions,
434
+ output_hidden_states=output_hidden_states,
435
+ return_dict=return_dict,
436
+ cache_position=cache_position,
437
+ num_logits_to_keep=num_logits_to_keep,
438
+ )
439
+
440
+ logits = outputs.logits
441
+ # print("logits", logits.shape)
442
+ loss = None
443
+ if labels is not None:
444
+ logits = logits.float()
445
+ shift_logits = logits[..., :-1, :]
446
+ shift_labels = labels[..., 1:]
447
+
448
+ mask = (shift_labels >= self.action_tokenizer.translation_tokenizer.token_start_idx) & (
449
+ shift_labels <= self.action_tokenizer.gripper_tokenizer.token_end_idx
450
+ )
451
+ if attention_mask is not None:
452
+ shift_attention_mask = attention_mask[:, -shift_logits.shape[1] :].to(logits.device)
453
+ shift_logits_action = shift_logits[(shift_attention_mask & mask).to(logits.device) != 0].contiguous()
454
+ shift_labels_action = shift_labels[(shift_attention_mask & mask).to(shift_labels.device) != 0].contiguous()
455
+ shift_logits_reason = shift_logits[(shift_attention_mask & ~mask).to(logits.device) != 0].contiguous()
456
+ shift_labels_reason = shift_labels[(shift_attention_mask & ~mask).to(shift_labels.device) != 0].contiguous()
457
+ else:
458
+ shift_logits_action = shift_logits[mask].contiguous()
459
+ shift_logits_reason = shift_logits[~mask].contiguous()
460
+
461
+ shift_labels_action = shift_labels[mask].contiguous()
462
+ shift_labels_reason = shift_labels[~mask].contiguous()
463
+
464
+
465
+ loss_fct = nn.CrossEntropyLoss()
466
+
467
+ flat_logits_action = shift_logits_action.view(-1, self.config.text_config.vocab_size)
468
+ flat_labels_action = shift_labels_action.view(-1).to(shift_logits.device)
469
+ loss_action = loss_fct(flat_logits_action, flat_labels_action)
470
+
471
+ flat_logits_reason = shift_logits_reason.view(-1, self.config.text_config.vocab_size)
472
+ flat_labels_reason = shift_labels_reason.view(-1).to(shift_logits.device)
473
+ l1_loss_reasoning = loss_fct(flat_logits_reason, flat_labels_reason)
474
+
475
+ loss = 0.7*loss_action + 0.3*l1_loss_reasoning
476
+ if not return_dict:
477
+ output = (logits,) + outputs[1:]
478
+ return (loss,) + output if loss is not None else output
479
+
480
+ return SpatialVLACausalLMOutputWithPast(
481
+ loss=loss,
482
+ logits=logits,
483
+ past_key_values=outputs.past_key_values,
484
+ hidden_states=outputs.hidden_states,
485
+ attentions=outputs.attentions,
486
+ image_hidden_states=image_features if pixel_values is not None else None,
487
+ )
488
+
489
+ # AR inference
490
+ def prepare_inputs_for_generation(
491
+ self,
492
+ input_ids,
493
+ past_key_values=None,
494
+ inputs_embeds=None,
495
+ cache_position=None,
496
+ position_ids=None,
497
+ pixel_values=None,
498
+ intrinsic=None,
499
+ attention_mask=None,
500
+ token_type_ids=None,
501
+ use_cache=True,
502
+ num_logits_to_keep=None,
503
+ labels=None,
504
+ **kwargs,
505
+ ):
506
+ model_inputs = self.language_model.prepare_inputs_for_generation(
507
+ input_ids,
508
+ past_key_values=past_key_values,
509
+ inputs_embeds=inputs_embeds,
510
+ attention_mask=attention_mask,
511
+ position_ids=position_ids,
512
+ cache_position=cache_position,
513
+ use_cache=use_cache,
514
+ num_logits_to_keep=num_logits_to_keep,
515
+ token_type_ids=token_type_ids,
516
+ **kwargs,
517
+ )
518
+ if model_inputs.get("position_ids") is not None:
519
+ model_inputs["position_ids"] += 1
520
+ if cache_position[0] == 0:
521
+ model_inputs["pixel_values"] = pixel_values
522
+ is_training = token_type_ids is not None and labels is not None
523
+ if cache_position[0] == 0 and isinstance(past_key_values, HybridCache):
524
+ causal_mask = self._update_causal_mask(attention_mask, token_type_ids, past_key_values, cache_position, input_ids, inputs_embeds, is_training)
525
+ model_inputs["attention_mask"] = causal_mask
526
+ model_inputs["intrinsic"] = intrinsic
527
+ return model_inputs
528
+
529
+ # @torch.no_grad()
530
+ @torch.inference_mode()
531
+ def predict_action(
532
+ self,
533
+ model_inputs,
534
+ ) -> torch.Tensor:
535
+
536
+
537
+ model_inputs = model_inputs.to(torch.bfloat16).to(self.device)
538
+ input_len = model_inputs["input_ids"].shape[-1]
539
+ generation_outputs = self.generate(**model_inputs, max_new_tokens=256, stopping_criteria=self.criteria,do_sample=False)
540
+ return generation_outputs[:,input_len:]
541
+
542
+
543
+
544
+
545
+
546
+ @classmethod
547
+ def from_pretrained(
548
+ cls,
549
+ pretrained_model_name_or_path: Optional[Union[str, os.PathLike]],
550
+ *model_args,
551
+ config: Optional[Union[PretrainedConfig, str, os.PathLike]] = None,
552
+ cache_dir: Optional[Union[str, os.PathLike]] = None,
553
+ ignore_mismatched_sizes: bool = False,
554
+ force_download: bool = False,
555
+ local_files_only: bool = False,
556
+ token: Optional[Union[str, bool]] = None,
557
+ revision: str = "main",
558
+ use_safetensors: Optional[bool] = None,
559
+ weights_only: bool = True,
560
+ **kwargs,
561
+ ):
562
+ model = super().from_pretrained(
563
+ pretrained_model_name_or_path,
564
+ *model_args,
565
+ config=config,
566
+ cache_dir=cache_dir,
567
+ ignore_mismatched_sizes=ignore_mismatched_sizes,
568
+ force_download=force_download,
569
+ local_files_only=local_files_only,
570
+ token=token,
571
+ revision=revision,
572
+ use_safetensors=use_safetensors,
573
+ weights_only=weights_only,
574
+ **kwargs,
575
+ )
576
+ if model.config.use_spatial_token:
577
+ model.language_model.model.embed_tokens.weight.data[-model.config.spatial_token_num:] = model.spatial_embed_tokens.weight.data
578
+ return model
preprocessor_config.json ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "auto_map": {
3
+ "AutoProcessor": "processing_spatialvla.SpatialVLAProcessor"
4
+ },
5
+ "do_convert_rgb": null,
6
+ "do_normalize": false,
7
+ "do_rescale": true,
8
+ "do_resize": true,
9
+ "image_mean": [
10
+ 0.5,
11
+ 0.5,
12
+ 0.5
13
+ ],
14
+ "image_processor_type": "SiglipImageProcessor",
15
+ "image_seq_length": 256,
16
+ "image_std": [
17
+ 0.5,
18
+ 0.5,
19
+ 0.5
20
+ ],
21
+ "processor_class": "SpatialVLAProcessor",
22
+ "resample": 3,
23
+ "rescale_factor": 0.00392156862745098,
24
+ "size": {
25
+ "height": 224,
26
+ "width": 224
27
+ }
28
+ }
processing_spatialvla.py ADDED
@@ -0,0 +1,259 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ import logging
16
+ from typing import List, Optional, Union, Dict
17
+ import numpy as np
18
+ import torch
19
+ from transformers.feature_extraction_utils import BatchFeature
20
+ from transformers.image_utils import ImageInput, is_valid_image
21
+ from transformers.processing_utils import Unpack, _validate_images_text_input_order, ProcessorMixin
22
+ from transformers.tokenization_utils_base import AddedToken, PreTokenizedInput, TextInput
23
+ from transformers.utils import logging
24
+ from transformers.models.paligemma.processing_paligemma import (
25
+ make_batched_images,
26
+ build_string_from_input,
27
+ _is_str_or_image,
28
+ PaliGemmaProcessorKwargs,
29
+ IMAGE_TOKEN,
30
+ EXTRA_TOKENS
31
+ )
32
+ from .action_tokenizer import SpatialActionTokenizer
33
+ logger = logging.get_logger(__name__)
34
+
35
+ class SpatialVLAProcessor(ProcessorMixin):
36
+ attributes = ["image_processor", "tokenizer"]
37
+ valid_kwargs = ["chat_template"]
38
+ image_processor_class = "SiglipImageProcessor"
39
+ tokenizer_class = ("GemmaTokenizer", "GemmaTokenizerFast")
40
+
41
+ def __init__(
42
+ self,
43
+ image_processor=None,
44
+ tokenizer=None,
45
+ chat_template=None,
46
+ statistics: Optional[dict] = None,
47
+ bin_policy=None,
48
+ intrinsic_config=None,
49
+ action_config=None,
50
+ num_obs_steps=1,
51
+ obs_delta=1,
52
+ action_chunk_size=1,
53
+ min_sigma=0.0,
54
+ **kwargs,
55
+ ):
56
+ if image_processor is None:
57
+ raise ValueError("You need to specify an `image_processor`.")
58
+ if tokenizer is None:
59
+ raise ValueError("You need to specify a `tokenizer`.")
60
+ if not hasattr(image_processor, "image_seq_length"):
61
+ raise ValueError("Image processor is missing an `image_seq_length` attribute.")
62
+
63
+ self.image_seq_length = image_processor.image_seq_length
64
+
65
+ if not hasattr(tokenizer, "image_token"):
66
+ image_token = AddedToken(IMAGE_TOKEN, normalized=False, special=True)
67
+ tokens_to_add = {"additional_special_tokens": [image_token]}
68
+ tokenizer.add_special_tokens(tokens_to_add)
69
+ self.image_token_id = tokenizer.convert_tokens_to_ids(IMAGE_TOKEN)
70
+ else:
71
+ self.image_token_id = tokenizer.image_token_id
72
+
73
+ tokenizer.add_tokens(EXTRA_TOKENS)
74
+ tokenizer.add_bos_token = False
75
+ tokenizer.add_eos_token = False
76
+
77
+ super().__init__(image_processor, tokenizer, chat_template=chat_template)
78
+
79
+ # action tokenizer
80
+ self.statistics = statistics if statistics else {}
81
+ self.bin_policy = bin_policy
82
+ self.min_sigma = min_sigma
83
+ self.intrinsic_config = intrinsic_config
84
+ self.action_config = action_config
85
+ self.num_obs_steps = num_obs_steps
86
+ self.obs_delta = obs_delta
87
+ self.action_chunk_size = action_chunk_size
88
+ self.dataset_intrinsics = {}
89
+ height, width = image_processor.size["height"], image_processor.size["width"]
90
+
91
+ # scale intrinsic matrix
92
+ for k, v in intrinsic_config.items():
93
+ K = torch.tensor(v["intrinsic"]).float()
94
+ K[:2] *= torch.tensor([width / v["width"], height / v["height"]])[:, None]
95
+ self.dataset_intrinsics[k] = K
96
+
97
+ self.action_tokenizer = SpatialActionTokenizer(
98
+ tokenizer=tokenizer, num_bins=action_config["num_bins"],
99
+ bin_policy=bin_policy, use_spherical=action_config["use_spherical"],
100
+ min_sigma=min_sigma,
101
+ )
102
+
103
+ def __call__(
104
+ self,
105
+ reasoning: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None,
106
+ images: ImageInput = None,
107
+ text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None,
108
+ unnorm_key: Optional[str] = None,
109
+ suffix_actions: Optional[np.array] = None, # (t e)
110
+ **kwargs: Unpack[PaliGemmaProcessorKwargs],
111
+ ) -> BatchFeature:
112
+ images, text = _validate_images_text_input_order(images, text)
113
+
114
+ output_kwargs = self._merge_kwargs(
115
+ PaliGemmaProcessorKwargs,
116
+ tokenizer_init_kwargs=self.tokenizer.init_kwargs,
117
+ **kwargs,
118
+ )
119
+ if suffix_actions is not None:
120
+ action_tokens = self.action_tokenizer(suffix_actions) # (n,3)
121
+ suffix="".join(action_tokens.flatten())
122
+
123
+ suffix = f"{suffix}<Reasoning>{reasoning}"
124
+
125
+ else:
126
+ suffix = output_kwargs["text_kwargs"].pop("suffix", None)
127
+
128
+ return_token_type_ids = True if suffix is not None else False
129
+
130
+ if images is None:
131
+ raise ValueError("`images` are expected as arguments to a `PaliGemmaProcessor` instance.")
132
+ if text is None:
133
+ logger.warning_once( "You are using PaliGemma without a text prefix. It will perform as a picture-captioning model.")
134
+ text = ""
135
+
136
+ if _is_str_or_image(text):
137
+ text = [text]
138
+ elif isinstance(text, list) and _is_str_or_image(text[0]):
139
+ pass
140
+
141
+ if text is not None and images is not None:
142
+ if not any(IMAGE_TOKEN in sample for sample in text):
143
+ if isinstance(text, List) and isinstance(images, List):
144
+ if len(images) != len(text):
145
+ raise ValueError(
146
+ f"Received {len(images)} images for {len(text)} prompts. Each prompt should be associated with an image or list of images."
147
+ )
148
+ if is_valid_image(images):
149
+ images = [[images]]
150
+ elif isinstance(images, list) and is_valid_image(images[0]):
151
+ images = [[image] for image in images]
152
+ elif not (isinstance(images, list) and isinstance(images[0], list) and is_valid_image(images[0][0])):
153
+ raise ValueError("images must be an image, list of images or list of list of images")
154
+ if suffix is not None and _is_str_or_image(suffix): suffix = [suffix]
155
+ if suffix is not None: suffix = [sfx + self.tokenizer.eos_token for sfx in suffix]
156
+ print(f"suffix: {suffix}")
157
+ input_strings = [
158
+ build_string_from_input(
159
+ prompt=prompt,
160
+ bos_token=self.tokenizer.bos_token,
161
+ image_seq_len=self.image_seq_length,
162
+ image_token=IMAGE_TOKEN,
163
+ num_images=len(image_list) if isinstance(image_list, list) else 1,
164
+ )
165
+ for prompt, image_list in zip(text, images)
166
+ ]
167
+ images = make_batched_images(images)
168
+ else:
169
+ expanded_samples = []
170
+ for sample in text:
171
+ expanded_sample = sample.replace(IMAGE_TOKEN, IMAGE_TOKEN * self.image_seq_length)
172
+ bos_rfind_index = expanded_sample.rfind(IMAGE_TOKEN)
173
+ bos_index = bos_rfind_index + len(IMAGE_TOKEN) if bos_rfind_index != -1 else 0
174
+ expanded_sample = (
175
+ expanded_sample[:bos_index] + self.tokenizer.bos_token + expanded_sample[bos_index:]
176
+ )
177
+ expanded_samples.append(expanded_sample)
178
+ input_strings = [f"{sample}\n" for sample in expanded_samples]
179
+ pixel_values = self.image_processor(images, **output_kwargs["images_kwargs"])["pixel_values"]
180
+
181
+ if output_kwargs["text_kwargs"].get("max_length", None) is not None:
182
+ output_kwargs["text_kwargs"]["max_length"] += self.image_seq_length
183
+
184
+ inputs = self.tokenizer(
185
+ input_strings,
186
+ text_pair=suffix,
187
+ return_token_type_ids=return_token_type_ids,
188
+ **output_kwargs["text_kwargs"],
189
+ )
190
+ # print(suffix)
191
+ intrinsic = self.dataset_intrinsics[unnorm_key] if unnorm_key in self.dataset_intrinsics else self.dataset_intrinsics["default"]
192
+ return_data = {**inputs, "pixel_values": pixel_values, "intrinsic": intrinsic}
193
+
194
+ if return_token_type_ids:
195
+ labels = inputs["input_ids"].masked_fill(inputs["token_type_ids"] == 0, -100)
196
+ return_data.update({"labels": labels})
197
+ return BatchFeature(data=return_data)
198
+
199
+ # Copied from transformers.models.clip.processing_clip.CLIPProcessor.batch_decode with CLIP->Gemma
200
+ def batch_decode(self, *args, **kwargs):
201
+ """
202
+ This method forwards all its arguments to GemmaTokenizerFast's [`~PreTrainedTokenizer.batch_decode`]. Please
203
+ refer to the docstring of this method for more information.
204
+ """
205
+ return self.tokenizer.batch_decode(*args, **kwargs)
206
+
207
+ # Copied from transformers.models.clip.processing_clip.CLIPProcessor.decode with CLIP->Gemma
208
+ def decode(self, *args, **kwargs):
209
+ """
210
+ This method forwards all its arguments to GemmaTokenizerFast's [`~PreTrainedTokenizer.decode`]. Please refer to
211
+ the docstring of this method for more information.
212
+ """
213
+ return self.tokenizer.decode(*args, **kwargs)
214
+
215
+ @property
216
+ def model_input_names(self):
217
+ tokenizer_input_names = self.tokenizer.model_input_names
218
+ image_processor_input_names = self.image_processor.model_input_names
219
+ return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
220
+
221
+ def decode_actions(
222
+ self,
223
+ generation_outputs: torch.Tensor,
224
+ unnorm_key: Optional[str] = None,
225
+ ) -> Dict[str, torch.Tensor]:
226
+ action_token_num = 3 # translation + rotation + gripper
227
+ predicted_action_token_ids = generation_outputs[0, : action_token_num * self.action_chunk_size].detach().cpu().long().numpy()
228
+ assert self.tokenizer.eos_token != predicted_action_token_ids[-1], "[error] actions contain EOS token, please check you truncation settings!"
229
+
230
+ if predicted_action_token_ids.shape[0] < action_token_num * self.action_chunk_size: # pad with zeros
231
+ logger.warning(f"Padding zero action!")
232
+ predicted_action_token_ids = np.concatenate(
233
+ [
234
+ predicted_action_token_ids,
235
+ np.zeros(action_token_num * self.action_chunk_size - predicted_action_token_ids.shape[0], dtype=np.longlong),
236
+ ]
237
+ )
238
+ predicted_action_token_ids = predicted_action_token_ids.reshape(-1, action_token_num)
239
+ normalized_action_chunks = self.action_tokenizer.decode_token_ids_to_actions(predicted_action_token_ids)
240
+
241
+ if unnorm_key is None:
242
+ logger.warning(f"unnorm_key {unnorm_key} is not in statistics, use next one")
243
+ unnorm_key = next(self.statistics.keys())
244
+ action_norm_stats = self.statistics[unnorm_key]["action"]
245
+
246
+ action_dim = len(action_norm_stats["q01"])
247
+ mask = np.array(action_norm_stats.get("mask", np.ones(action_dim)), dtype=bool)
248
+ action_high, action_low = np.array(action_norm_stats["q99"]), np.array(action_norm_stats["q01"])
249
+
250
+ actions = []
251
+ for normalized_actions in normalized_action_chunks:
252
+ action = np.where(
253
+ mask,
254
+ 0.5 * (normalized_actions + 1) * (action_high - action_low) + action_low,
255
+ normalized_actions,
256
+ )
257
+ actions.append(action)
258
+ actions = np.stack(actions)
259
+ return {"actions": actions, "action_ids": predicted_action_token_ids}
processor_config.json ADDED
@@ -0,0 +1,3702 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "action_chunk_size": 4,
3
+ "action_config": {
4
+ "distribution": "gaussian",
5
+ "num_bins": {
6
+ "gripper": 2,
7
+ "rotation": {
8
+ "pitch_bins": 16,
9
+ "roll_bins": 16,
10
+ "yaw_bins": 16
11
+ },
12
+ "total": 8194,
13
+ "translation": {
14
+ "phi_bins": 32,
15
+ "r_bins": 8,
16
+ "theta_bins": 16
17
+ }
18
+ },
19
+ "use_spherical": true
20
+ },
21
+ "auto_map": {
22
+ "AutoProcessor": "processing_spatialvla.SpatialVLAProcessor"
23
+ },
24
+ "bin_policy": {
25
+ "rotation": {
26
+ "pitch_bins": [
27
+ -1.0,
28
+ -0.6785015894338633,
29
+ -0.516796358161167,
30
+ -0.3978678314258641,
31
+ -0.29907867426319246,
32
+ -0.21158608510441518,
33
+ -0.13081651669135252,
34
+ -0.05392877158612959,
35
+ 0.02113881590329744,
36
+ 0.0961313749999302,
37
+ 0.17278161860263358,
38
+ 0.25310821063971767,
39
+ 0.33985580585203445,
40
+ 0.4373796767941653,
41
+ 0.5539451994131283,
42
+ 0.7100308525313351,
43
+ 0.9999999999999999
44
+ ],
45
+ "roll_bins": [
46
+ -1.0,
47
+ -0.7121298287894609,
48
+ -0.5564581819056097,
49
+ -0.440071773405789,
50
+ -0.3426461358467384,
51
+ -0.25595819395001274,
52
+ -0.17566893098554964,
53
+ -0.09904102149491184,
54
+ -0.024059205927849478,
55
+ 0.05100802578115137,
56
+ 0.12790631705350436,
57
+ 0.20869987492610076,
58
+ 0.2962359118858219,
59
+ 0.3951018734752948,
60
+ 0.5141779624401348,
61
+ 0.6762450862353777,
62
+ 1.0
63
+ ],
64
+ "yaw_bins": [
65
+ -1.0,
66
+ -0.6910047644696934,
67
+ -0.5313988287371314,
68
+ -0.4133376866679583,
69
+ -0.3150057290436059,
70
+ -0.22777658299365705,
71
+ -0.14715771012527992,
72
+ -0.07034330907230311,
73
+ 0.004712965738136004,
74
+ 0.07975252682496348,
75
+ 0.15651401950954372,
76
+ 0.23703420508371892,
77
+ 0.32409736463921823,
78
+ 0.4221473708283458,
79
+ 0.5396818128475004,
80
+ 0.6980345545587262,
81
+ 1.0
82
+ ]
83
+ },
84
+ "translation": {
85
+ "phi_bins": [
86
+ -3.1415926535897927,
87
+ -2.5597806593194092,
88
+ -2.1899702111786126,
89
+ -1.9071489188814448,
90
+ -1.6724463283141142,
91
+ -1.4683467869586326,
92
+ -1.2853487663890668,
93
+ -1.1176672338183495,
94
+ -0.961484031585327,
95
+ -0.8141204989748655,
96
+ -0.6736024210639718,
97
+ -0.5384120746595923,
98
+ -0.40733740832383114,
99
+ -0.279375002438531,
100
+ -0.15366425283265983,
101
+ -0.029440234757304742,
102
+ 0.0940021938080639,
103
+ 0.2173378027339352,
104
+ 0.34123726674747146,
105
+ 0.46639302836823826,
106
+ 0.5935473848733163,
107
+ 0.7235258808185444,
108
+ 0.857280204661428,
109
+ 0.9959469801163238,
110
+ 1.1409329906705301,
111
+ 1.2940454053271015,
112
+ 1.4577019170652383,
113
+ 1.6352913749303837,
114
+ 1.8318407243899377,
115
+ 2.0553733807372363,
116
+ 2.320069275631962,
117
+ 2.6552436426949604,
118
+ 3.141592653589793
119
+ ],
120
+ "r_bins": [
121
+ 2.220446049250313e-16,
122
+ 0.19677118231539265,
123
+ 0.3506298590504556,
124
+ 0.4881976731379496,
125
+ 0.621970275186659,
126
+ 0.7620978861167458,
127
+ 0.9228346010157172,
128
+ 1.1393317208802278,
129
+ 1.7320508075688767
130
+ ],
131
+ "theta_bins": [
132
+ 0.0,
133
+ 0.7067187338585303,
134
+ 0.9814199309359143,
135
+ 1.1752042640550222,
136
+ 1.3331175751173345,
137
+ 1.4713205387280388,
138
+ 1.5977846301055496,
139
+ 1.7172771763957553,
140
+ 1.8331248472067783,
141
+ 1.9480194771467687,
142
+ 2.0644993054216925,
143
+ 2.1853608246107656,
144
+ 2.314189357400805,
145
+ 2.456314355008026,
146
+ 2.621028843347318,
147
+ 2.828352346005421,
148
+ 3.141592653589793
149
+ ]
150
+ }
151
+ },
152
+ "intrinsic_config": {
153
+ "bridge_orig/1.0.0": {
154
+ "height": 480,
155
+ "intrinsic": [
156
+ [
157
+ 623.588,
158
+ 0,
159
+ 319.501
160
+ ],
161
+ [
162
+ 0,
163
+ 623.588,
164
+ 239.545
165
+ ],
166
+ [
167
+ 0,
168
+ 0,
169
+ 1
170
+ ]
171
+ ],
172
+ "width": 640
173
+ },
174
+ "default": {
175
+ "height": 480,
176
+ "intrinsic": [
177
+ [
178
+ 623.588,
179
+ 0,
180
+ 319.501
181
+ ],
182
+ [
183
+ 0,
184
+ 623.588,
185
+ 239.545
186
+ ],
187
+ [
188
+ 0,
189
+ 0,
190
+ 1
191
+ ]
192
+ ],
193
+ "width": 640
194
+ }
195
+ },
196
+ "min_sigma": 0.0,
197
+ "num_obs_steps": 1,
198
+ "obs_delta": 1,
199
+ "processor_class": "SpatialVLAProcessor",
200
+ "statistics": {
201
+ "austin_buds_dataset_converted_externally_to_rlds/0.1.0": {
202
+ "action": {
203
+ "mask": [
204
+ true,
205
+ true,
206
+ true,
207
+ true,
208
+ true,
209
+ true,
210
+ false
211
+ ],
212
+ "max": [
213
+ 1.0,
214
+ 1.0,
215
+ 1.0,
216
+ 0.0,
217
+ 0.0,
218
+ 0.0,
219
+ 1.0
220
+ ],
221
+ "mean": [
222
+ -0.07678329944610596,
223
+ 0.0036849123425781727,
224
+ 0.05644941329956055,
225
+ 0.0,
226
+ 0.0,
227
+ 0.0,
228
+ 0.3510494828224182
229
+ ],
230
+ "min": [
231
+ -1.0,
232
+ -1.0,
233
+ -1.0,
234
+ 0.0,
235
+ 0.0,
236
+ 0.0,
237
+ 0.0
238
+ ],
239
+ "q01": [
240
+ -1.0,
241
+ -0.9599999785423279,
242
+ -0.8714285492897034,
243
+ 0.0,
244
+ 0.0,
245
+ 0.0,
246
+ 0.0
247
+ ],
248
+ "q99": [
249
+ 1.0,
250
+ 0.8600000143051147,
251
+ 1.0,
252
+ 0.0,
253
+ 0.0,
254
+ 0.0,
255
+ 1.0
256
+ ],
257
+ "std": [
258
+ 0.6367746591567993,
259
+ 0.3788914680480957,
260
+ 0.47796377539634705,
261
+ 0.0,
262
+ 0.0,
263
+ 0.0,
264
+ 0.4772108495235443
265
+ ]
266
+ },
267
+ "num_trajectories": 50,
268
+ "num_transitions": 34112,
269
+ "proprio": {
270
+ "max": [
271
+ 0.0,
272
+ 0.0,
273
+ 0.0,
274
+ 0.0,
275
+ 0.0,
276
+ 0.0,
277
+ 0.0
278
+ ],
279
+ "mean": [
280
+ 0.0,
281
+ 0.0,
282
+ 0.0,
283
+ 0.0,
284
+ 0.0,
285
+ 0.0,
286
+ 0.0
287
+ ],
288
+ "min": [
289
+ 0.0,
290
+ 0.0,
291
+ 0.0,
292
+ 0.0,
293
+ 0.0,
294
+ 0.0,
295
+ 0.0
296
+ ],
297
+ "q01": [
298
+ 0.0,
299
+ 0.0,
300
+ 0.0,
301
+ 0.0,
302
+ 0.0,
303
+ 0.0,
304
+ 0.0
305
+ ],
306
+ "q99": [
307
+ 0.0,
308
+ 0.0,
309
+ 0.0,
310
+ 0.0,
311
+ 0.0,
312
+ 0.0,
313
+ 0.0
314
+ ],
315
+ "std": [
316
+ 0.0,
317
+ 0.0,
318
+ 0.0,
319
+ 0.0,
320
+ 0.0,
321
+ 0.0,
322
+ 0.0
323
+ ]
324
+ }
325
+ },
326
+ "austin_sailor_dataset_converted_externally_to_rlds/0.1.0": {
327
+ "action": {
328
+ "mask": [
329
+ true,
330
+ true,
331
+ true,
332
+ true,
333
+ true,
334
+ true,
335
+ false
336
+ ],
337
+ "max": [
338
+ 1.0,
339
+ 1.0,
340
+ 1.0,
341
+ 0.0,
342
+ 0.0,
343
+ 0.375,
344
+ 1.0
345
+ ],
346
+ "mean": [
347
+ 0.011825386434793472,
348
+ 0.0064610871486365795,
349
+ 0.060236409306526184,
350
+ 0.0,
351
+ 0.0,
352
+ 0.0016465834341943264,
353
+ 0.5260950326919556
354
+ ],
355
+ "min": [
356
+ -1.0,
357
+ -1.0,
358
+ -1.0,
359
+ 0.0,
360
+ 0.0,
361
+ -0.375,
362
+ 0.0
363
+ ],
364
+ "q01": [
365
+ -1.0,
366
+ -0.9828571677207947,
367
+ -0.6000000238418579,
368
+ 0.0,
369
+ 0.0,
370
+ -0.17249999940395355,
371
+ 0.0
372
+ ],
373
+ "q99": [
374
+ 1.0,
375
+ 0.9457142949104309,
376
+ 1.0,
377
+ 0.0,
378
+ 0.0,
379
+ 0.17892856895923615,
380
+ 1.0
381
+ ],
382
+ "std": [
383
+ 0.46348854899406433,
384
+ 0.41240164637565613,
385
+ 0.41186293959617615,
386
+ 0.0,
387
+ 0.0,
388
+ 0.0578608438372612,
389
+ 0.49893733859062195
390
+ ]
391
+ },
392
+ "num_trajectories": 240,
393
+ "num_transitions": 353094,
394
+ "proprio": {
395
+ "max": [
396
+ 0.0,
397
+ 0.0,
398
+ 0.0,
399
+ 0.0,
400
+ 0.0,
401
+ 0.0,
402
+ 0.0
403
+ ],
404
+ "mean": [
405
+ 0.0,
406
+ 0.0,
407
+ 0.0,
408
+ 0.0,
409
+ 0.0,
410
+ 0.0,
411
+ 0.0
412
+ ],
413
+ "min": [
414
+ 0.0,
415
+ 0.0,
416
+ 0.0,
417
+ 0.0,
418
+ 0.0,
419
+ 0.0,
420
+ 0.0
421
+ ],
422
+ "q01": [
423
+ 0.0,
424
+ 0.0,
425
+ 0.0,
426
+ 0.0,
427
+ 0.0,
428
+ 0.0,
429
+ 0.0
430
+ ],
431
+ "q99": [
432
+ 0.0,
433
+ 0.0,
434
+ 0.0,
435
+ 0.0,
436
+ 0.0,
437
+ 0.0,
438
+ 0.0
439
+ ],
440
+ "std": [
441
+ 0.0,
442
+ 0.0,
443
+ 0.0,
444
+ 0.0,
445
+ 0.0,
446
+ 0.0,
447
+ 0.0
448
+ ]
449
+ }
450
+ },
451
+ "austin_sirius_dataset_converted_externally_to_rlds/0.1.0": {
452
+ "action": {
453
+ "mask": [
454
+ true,
455
+ true,
456
+ true,
457
+ true,
458
+ true,
459
+ true,
460
+ false
461
+ ],
462
+ "max": [
463
+ 1.0002285242080688,
464
+ 0.960608720779419,
465
+ 1.105179786682129,
466
+ 0.0,
467
+ 0.0,
468
+ 0.341785728931427,
469
+ 1.0
470
+ ],
471
+ "mean": [
472
+ 0.077476866543293,
473
+ 0.031955525279045105,
474
+ 0.04244735836982727,
475
+ 0.0,
476
+ 0.0,
477
+ -0.01603454165160656,
478
+ 0.43260180950164795
479
+ ],
480
+ "min": [
481
+ -1.0183025598526,
482
+ -0.9800000190734863,
483
+ -0.9774575233459473,
484
+ 0.0,
485
+ 0.0,
486
+ -0.34607142210006714,
487
+ 0.0
488
+ ],
489
+ "q01": [
490
+ -0.780905865430832,
491
+ -0.5667179036140442,
492
+ -0.5254343223571777,
493
+ 0.0,
494
+ 0.0,
495
+ -0.28495091378688814,
496
+ 0.0
497
+ ],
498
+ "q99": [
499
+ 0.9569637751579284,
500
+ 0.6971374487876891,
501
+ 0.8124888157844541,
502
+ 0.0,
503
+ 0.0,
504
+ 0.1971428543329239,
505
+ 1.0
506
+ ],
507
+ "std": [
508
+ 0.3906330168247223,
509
+ 0.2998153865337372,
510
+ 0.2782270312309265,
511
+ 0.0,
512
+ 0.0,
513
+ 0.08120641857385635,
514
+ 0.49528202414512634
515
+ ]
516
+ },
517
+ "num_trajectories": 559,
518
+ "num_transitions": 279939,
519
+ "proprio": {
520
+ "max": [
521
+ 0.0,
522
+ 0.0,
523
+ 0.0,
524
+ 0.0,
525
+ 0.0,
526
+ 0.0,
527
+ 0.0
528
+ ],
529
+ "mean": [
530
+ 0.0,
531
+ 0.0,
532
+ 0.0,
533
+ 0.0,
534
+ 0.0,
535
+ 0.0,
536
+ 0.0
537
+ ],
538
+ "min": [
539
+ 0.0,
540
+ 0.0,
541
+ 0.0,
542
+ 0.0,
543
+ 0.0,
544
+ 0.0,
545
+ 0.0
546
+ ],
547
+ "q01": [
548
+ 0.0,
549
+ 0.0,
550
+ 0.0,
551
+ 0.0,
552
+ 0.0,
553
+ 0.0,
554
+ 0.0
555
+ ],
556
+ "q99": [
557
+ 0.0,
558
+ 0.0,
559
+ 0.0,
560
+ 0.0,
561
+ 0.0,
562
+ 0.0,
563
+ 0.0
564
+ ],
565
+ "std": [
566
+ 0.0,
567
+ 0.0,
568
+ 0.0,
569
+ 0.0,
570
+ 0.0,
571
+ 0.0,
572
+ 0.0
573
+ ]
574
+ }
575
+ },
576
+ "bc_z/0.1.0": {
577
+ "action": {
578
+ "mask": [
579
+ true,
580
+ true,
581
+ true,
582
+ true,
583
+ true,
584
+ true,
585
+ false
586
+ ],
587
+ "max": [
588
+ 0.2165454924106598,
589
+ 0.1251407265663147,
590
+ 0.10772687941789627,
591
+ 0.33544227480888367,
592
+ 0.28117990493774414,
593
+ 0.40614867210388184,
594
+ 1.0
595
+ ],
596
+ "mean": [
597
+ -0.009958645328879356,
598
+ 0.0008958434336818755,
599
+ 0.00499522453173995,
600
+ 0.000297540333122015,
601
+ -0.008734511211514473,
602
+ -0.03068969026207924,
603
+ 0.8344562649726868
604
+ ],
605
+ "min": [
606
+ -0.1677047461271286,
607
+ -0.14630407094955444,
608
+ -0.10066790133714676,
609
+ -0.29421567916870117,
610
+ -0.32101404666900635,
611
+ -0.4635624885559082,
612
+ 0.0
613
+ ],
614
+ "q01": [
615
+ -0.09220654994249344,
616
+ -0.06456145539879798,
617
+ -0.049121275544166565,
618
+ -0.11594625547528267,
619
+ -0.14152548640966414,
620
+ -0.2251061636209488,
621
+ 0.0
622
+ ],
623
+ "q99": [
624
+ 0.07628866866230968,
625
+ 0.058019736707210584,
626
+ 0.052540797740221024,
627
+ 0.11740604028105736,
628
+ 0.11703975558280955,
629
+ 0.16729306846857078,
630
+ 1.0
631
+ ],
632
+ "std": [
633
+ 0.030533093959093094,
634
+ 0.0231416504830122,
635
+ 0.020642085000872612,
636
+ 0.04156165570020676,
637
+ 0.04643021523952484,
638
+ 0.07697845250368118,
639
+ 0.36111101508140564
640
+ ]
641
+ },
642
+ "num_trajectories": 43264,
643
+ "num_transitions": 6015535,
644
+ "proprio": {
645
+ "max": [
646
+ 0.0,
647
+ 0.0,
648
+ 0.0,
649
+ 0.0,
650
+ 0.0,
651
+ 0.0,
652
+ 0.0
653
+ ],
654
+ "mean": [
655
+ 0.0,
656
+ 0.0,
657
+ 0.0,
658
+ 0.0,
659
+ 0.0,
660
+ 0.0,
661
+ 0.0
662
+ ],
663
+ "min": [
664
+ 0.0,
665
+ 0.0,
666
+ 0.0,
667
+ 0.0,
668
+ 0.0,
669
+ 0.0,
670
+ 0.0
671
+ ],
672
+ "q01": [
673
+ 0.0,
674
+ 0.0,
675
+ 0.0,
676
+ 0.0,
677
+ 0.0,
678
+ 0.0,
679
+ 0.0
680
+ ],
681
+ "q99": [
682
+ 0.0,
683
+ 0.0,
684
+ 0.0,
685
+ 0.0,
686
+ 0.0,
687
+ 0.0,
688
+ 0.0
689
+ ],
690
+ "std": [
691
+ 0.0,
692
+ 0.0,
693
+ 0.0,
694
+ 0.0,
695
+ 0.0,
696
+ 0.0,
697
+ 0.0
698
+ ]
699
+ }
700
+ },
701
+ "berkeley_autolab_ur5/0.1.0": {
702
+ "action": {
703
+ "mask": [
704
+ true,
705
+ true,
706
+ true,
707
+ true,
708
+ true,
709
+ true,
710
+ false
711
+ ],
712
+ "max": [
713
+ 0.019999999552965164,
714
+ 0.019999999552965164,
715
+ 0.019999999552965164,
716
+ 0.06666667014360428,
717
+ 0.06666667014360428,
718
+ 0.06666667014360428,
719
+ 1.0
720
+ ],
721
+ "mean": [
722
+ 0.0005683613708242774,
723
+ 0.0012176961172372103,
724
+ -0.0005296385497786105,
725
+ 0.00021029777417425066,
726
+ 6.069485243642703e-05,
727
+ 0.0012049867073073983,
728
+ 0.6298308372497559
729
+ ],
730
+ "min": [
731
+ -0.019999999552965164,
732
+ -0.019999999552965164,
733
+ -0.019999999552965164,
734
+ -0.06666667014360428,
735
+ -0.06666667014360428,
736
+ -0.06666667014360428,
737
+ 0.0
738
+ ],
739
+ "q01": [
740
+ -0.019999999552965164,
741
+ -0.019999999552965164,
742
+ -0.019999999552965164,
743
+ -0.02628571353852749,
744
+ -0.06666667014360428,
745
+ -0.03847619146108627,
746
+ 0.0
747
+ ],
748
+ "q99": [
749
+ 0.019999999552965164,
750
+ 0.019999999552965164,
751
+ 0.019999999552965164,
752
+ 0.031809523701667786,
753
+ 0.06666667014360428,
754
+ 0.036571428179740906,
755
+ 1.0
756
+ ],
757
+ "std": [
758
+ 0.011533073149621487,
759
+ 0.007990497164428234,
760
+ 0.009577799588441849,
761
+ 0.009432999417185783,
762
+ 0.016427574679255486,
763
+ 0.011054049246013165,
764
+ 0.482679545879364
765
+ ]
766
+ },
767
+ "num_trajectories": 1000,
768
+ "num_transitions": 97939,
769
+ "proprio": {
770
+ "max": [
771
+ 0.0,
772
+ 0.0,
773
+ 0.0,
774
+ 0.0,
775
+ 0.0,
776
+ 0.0,
777
+ 0.0
778
+ ],
779
+ "mean": [
780
+ 0.0,
781
+ 0.0,
782
+ 0.0,
783
+ 0.0,
784
+ 0.0,
785
+ 0.0,
786
+ 0.0
787
+ ],
788
+ "min": [
789
+ 0.0,
790
+ 0.0,
791
+ 0.0,
792
+ 0.0,
793
+ 0.0,
794
+ 0.0,
795
+ 0.0
796
+ ],
797
+ "q01": [
798
+ 0.0,
799
+ 0.0,
800
+ 0.0,
801
+ 0.0,
802
+ 0.0,
803
+ 0.0,
804
+ 0.0
805
+ ],
806
+ "q99": [
807
+ 0.0,
808
+ 0.0,
809
+ 0.0,
810
+ 0.0,
811
+ 0.0,
812
+ 0.0,
813
+ 0.0
814
+ ],
815
+ "std": [
816
+ 0.0,
817
+ 0.0,
818
+ 0.0,
819
+ 0.0,
820
+ 0.0,
821
+ 0.0,
822
+ 0.0
823
+ ]
824
+ }
825
+ },
826
+ "berkeley_cable_routing/0.1.0": {
827
+ "action": {
828
+ "mask": [
829
+ true,
830
+ true,
831
+ true,
832
+ true,
833
+ true,
834
+ true,
835
+ false
836
+ ],
837
+ "max": [
838
+ 0.9633283019065857,
839
+ 1.0,
840
+ 1.0,
841
+ 0.0,
842
+ 0.0,
843
+ 1.0,
844
+ 0.0
845
+ ],
846
+ "mean": [
847
+ -0.07139858603477478,
848
+ 0.023608991876244545,
849
+ 0.10241956263780594,
850
+ 0.0,
851
+ 0.0,
852
+ 0.04967105761170387,
853
+ 0.0
854
+ ],
855
+ "min": [
856
+ -0.9809081554412842,
857
+ -0.9554349184036255,
858
+ -0.9994775056838989,
859
+ 0.0,
860
+ 0.0,
861
+ -1.0,
862
+ 0.0
863
+ ],
864
+ "q01": [
865
+ -0.5534318816661835,
866
+ -0.4797285574674606,
867
+ -0.5314934802055359,
868
+ 0.0,
869
+ 0.0,
870
+ -0.8855219376087189,
871
+ 0.0
872
+ ],
873
+ "q99": [
874
+ 0.42652835428714786,
875
+ 0.5000944086909298,
876
+ 0.639823433756829,
877
+ 0.0,
878
+ 0.0,
879
+ 0.984243879914284,
880
+ 0.0
881
+ ],
882
+ "std": [
883
+ 0.18155010044574738,
884
+ 0.18109896779060364,
885
+ 0.21220752596855164,
886
+ 0.0,
887
+ 0.0,
888
+ 0.3475516438484192,
889
+ 0.0
890
+ ]
891
+ },
892
+ "num_trajectories": 1647,
893
+ "num_transitions": 42328,
894
+ "proprio": {
895
+ "max": [
896
+ 0.0,
897
+ 0.0,
898
+ 0.0,
899
+ 0.0,
900
+ 0.0,
901
+ 0.0,
902
+ 0.0
903
+ ],
904
+ "mean": [
905
+ 0.0,
906
+ 0.0,
907
+ 0.0,
908
+ 0.0,
909
+ 0.0,
910
+ 0.0,
911
+ 0.0
912
+ ],
913
+ "min": [
914
+ 0.0,
915
+ 0.0,
916
+ 0.0,
917
+ 0.0,
918
+ 0.0,
919
+ 0.0,
920
+ 0.0
921
+ ],
922
+ "q01": [
923
+ 0.0,
924
+ 0.0,
925
+ 0.0,
926
+ 0.0,
927
+ 0.0,
928
+ 0.0,
929
+ 0.0
930
+ ],
931
+ "q99": [
932
+ 0.0,
933
+ 0.0,
934
+ 0.0,
935
+ 0.0,
936
+ 0.0,
937
+ 0.0,
938
+ 0.0
939
+ ],
940
+ "std": [
941
+ 0.0,
942
+ 0.0,
943
+ 0.0,
944
+ 0.0,
945
+ 0.0,
946
+ 0.0,
947
+ 0.0
948
+ ]
949
+ }
950
+ },
951
+ "berkeley_fanuc_manipulation/0.1.0": {
952
+ "action": {
953
+ "mask": [
954
+ true,
955
+ true,
956
+ true,
957
+ true,
958
+ true,
959
+ true,
960
+ false
961
+ ],
962
+ "max": [
963
+ 0.009999999776482582,
964
+ 0.009999999776482582,
965
+ 0.009999999776482582,
966
+ 0.03490658476948738,
967
+ 0.03490658476948738,
968
+ 0.03490658476948738,
969
+ 1.0
970
+ ],
971
+ "mean": [
972
+ 0.0007744057802483439,
973
+ -0.00031240080716088414,
974
+ -0.0015001941937953234,
975
+ -0.0007515158504247665,
976
+ -0.00015832878125365824,
977
+ 0.00014327642566058785,
978
+ 0.699295699596405
979
+ ],
980
+ "min": [
981
+ -0.009999999776482582,
982
+ -0.009999999776482582,
983
+ -0.009999999776482582,
984
+ -0.03490658476948738,
985
+ -0.03490658476948738,
986
+ -0.03490658476948738,
987
+ 0.0
988
+ ],
989
+ "q01": [
990
+ -0.009999999776482582,
991
+ -0.009999999776482582,
992
+ -0.009999999776482582,
993
+ -0.03490658476948738,
994
+ 0.0,
995
+ -0.03490658476948738,
996
+ 0.0
997
+ ],
998
+ "q99": [
999
+ 0.009999999776482582,
1000
+ 0.009999999776482582,
1001
+ 0.009999999776482582,
1002
+ 0.03490658476948738,
1003
+ 0.0,
1004
+ 0.03490658476948738,
1005
+ 1.0
1006
+ ],
1007
+ "std": [
1008
+ 0.0034070133697241545,
1009
+ 0.00499219074845314,
1010
+ 0.005344326142221689,
1011
+ 0.007599010597914457,
1012
+ 0.004081932827830315,
1013
+ 0.008568963967263699,
1014
+ 0.45868709683418274
1015
+ ]
1016
+ },
1017
+ "num_trajectories": 415,
1018
+ "num_transitions": 62613,
1019
+ "proprio": {
1020
+ "max": [
1021
+ 0.0,
1022
+ 0.0,
1023
+ 0.0,
1024
+ 0.0,
1025
+ 0.0,
1026
+ 0.0,
1027
+ 0.0
1028
+ ],
1029
+ "mean": [
1030
+ 0.0,
1031
+ 0.0,
1032
+ 0.0,
1033
+ 0.0,
1034
+ 0.0,
1035
+ 0.0,
1036
+ 0.0
1037
+ ],
1038
+ "min": [
1039
+ 0.0,
1040
+ 0.0,
1041
+ 0.0,
1042
+ 0.0,
1043
+ 0.0,
1044
+ 0.0,
1045
+ 0.0
1046
+ ],
1047
+ "q01": [
1048
+ 0.0,
1049
+ 0.0,
1050
+ 0.0,
1051
+ 0.0,
1052
+ 0.0,
1053
+ 0.0,
1054
+ 0.0
1055
+ ],
1056
+ "q99": [
1057
+ 0.0,
1058
+ 0.0,
1059
+ 0.0,
1060
+ 0.0,
1061
+ 0.0,
1062
+ 0.0,
1063
+ 0.0
1064
+ ],
1065
+ "std": [
1066
+ 0.0,
1067
+ 0.0,
1068
+ 0.0,
1069
+ 0.0,
1070
+ 0.0,
1071
+ 0.0,
1072
+ 0.0
1073
+ ]
1074
+ }
1075
+ },
1076
+ "bridge_orig/1.0.0": {
1077
+ "action": {
1078
+ "mask": [
1079
+ true,
1080
+ true,
1081
+ true,
1082
+ true,
1083
+ true,
1084
+ true,
1085
+ false
1086
+ ],
1087
+ "max": [
1088
+ 0.41691166162490845,
1089
+ 0.25864794850349426,
1090
+ 0.21218234300613403,
1091
+ 3.122201919555664,
1092
+ 1.8618112802505493,
1093
+ 6.280478477478027,
1094
+ 1.0
1095
+ ],
1096
+ "mean": [
1097
+ 0.00023341714404523373,
1098
+ 0.00013004327774979174,
1099
+ -0.00012762591359205544,
1100
+ -0.0001556579809403047,
1101
+ -0.00040393328526988626,
1102
+ 0.00023558337124995887,
1103
+ 0.5764582753181458
1104
+ ],
1105
+ "min": [
1106
+ -0.4007510244846344,
1107
+ -0.13874775171279907,
1108
+ -0.22553899884223938,
1109
+ -3.2010786533355713,
1110
+ -1.8618112802505493,
1111
+ -6.279075622558594,
1112
+ 0.0
1113
+ ],
1114
+ "q01": [
1115
+ -0.02872725307941437,
1116
+ -0.04170349963009357,
1117
+ -0.026093858778476715,
1118
+ -0.08092105075716972,
1119
+ -0.09288699507713317,
1120
+ -0.20718276381492615,
1121
+ 0.0
1122
+ ],
1123
+ "q99": [
1124
+ 0.028309678435325586,
1125
+ 0.040855254605412394,
1126
+ 0.040161586627364146,
1127
+ 0.08192047759890528,
1128
+ 0.07792850524187081,
1129
+ 0.20382574498653397,
1130
+ 1.0
1131
+ ],
1132
+ "std": [
1133
+ 0.009765734896063805,
1134
+ 0.013689505867660046,
1135
+ 0.012667152099311352,
1136
+ 0.028534479439258575,
1137
+ 0.03063790127635002,
1138
+ 0.07691770792007446,
1139
+ 0.4973658621311188
1140
+ ]
1141
+ },
1142
+ "num_trajectories": 60064,
1143
+ "num_transitions": 2135463,
1144
+ "proprio": {
1145
+ "max": [
1146
+ 0.0,
1147
+ 0.0,
1148
+ 0.0,
1149
+ 0.0,
1150
+ 0.0,
1151
+ 0.0,
1152
+ 0.0
1153
+ ],
1154
+ "mean": [
1155
+ 0.0,
1156
+ 0.0,
1157
+ 0.0,
1158
+ 0.0,
1159
+ 0.0,
1160
+ 0.0,
1161
+ 0.0
1162
+ ],
1163
+ "min": [
1164
+ 0.0,
1165
+ 0.0,
1166
+ 0.0,
1167
+ 0.0,
1168
+ 0.0,
1169
+ 0.0,
1170
+ 0.0
1171
+ ],
1172
+ "q01": [
1173
+ 0.0,
1174
+ 0.0,
1175
+ 0.0,
1176
+ 0.0,
1177
+ 0.0,
1178
+ 0.0,
1179
+ 0.0
1180
+ ],
1181
+ "q99": [
1182
+ 0.0,
1183
+ 0.0,
1184
+ 0.0,
1185
+ 0.0,
1186
+ 0.0,
1187
+ 0.0,
1188
+ 0.0
1189
+ ],
1190
+ "std": [
1191
+ 0.0,
1192
+ 0.0,
1193
+ 0.0,
1194
+ 0.0,
1195
+ 0.0,
1196
+ 0.0,
1197
+ 0.0
1198
+ ]
1199
+ }
1200
+ },
1201
+ "cmu_stretch/0.1.0": {
1202
+ "action": {
1203
+ "mask": [
1204
+ true,
1205
+ true,
1206
+ true,
1207
+ true,
1208
+ true,
1209
+ true,
1210
+ false
1211
+ ],
1212
+ "max": [
1213
+ 0.02338407188653946,
1214
+ 0.0,
1215
+ 0.023404927924275398,
1216
+ 0.0,
1217
+ 0.0,
1218
+ 0.0,
1219
+ 1.0
1220
+ ],
1221
+ "mean": [
1222
+ 0.0003630445571616292,
1223
+ 0.0,
1224
+ 0.0016466928645968437,
1225
+ 0.0,
1226
+ 0.0,
1227
+ 0.0,
1228
+ 0.3987048268318176
1229
+ ],
1230
+ "min": [
1231
+ -0.019353797659277916,
1232
+ 0.0,
1233
+ -0.02019215188920498,
1234
+ 0.0,
1235
+ 0.0,
1236
+ 0.0,
1237
+ 0.0
1238
+ ],
1239
+ "q01": [
1240
+ -0.011175686959177256,
1241
+ 0.0,
1242
+ -0.0032206363626755773,
1243
+ 0.0,
1244
+ 0.0,
1245
+ 0.0,
1246
+ 0.0
1247
+ ],
1248
+ "q99": [
1249
+ 0.014501785952597848,
1250
+ 0.0,
1251
+ 0.015056106168776728,
1252
+ 0.0,
1253
+ 0.0,
1254
+ 0.0,
1255
+ 1.0
1256
+ ],
1257
+ "std": [
1258
+ 0.004081855062395334,
1259
+ 0.0,
1260
+ 0.003774340031668544,
1261
+ 0.0,
1262
+ 0.0,
1263
+ 0.0,
1264
+ 0.489638090133667
1265
+ ]
1266
+ },
1267
+ "num_trajectories": 135,
1268
+ "num_transitions": 25016,
1269
+ "proprio": {
1270
+ "max": [
1271
+ 0.0,
1272
+ 0.0,
1273
+ 0.0,
1274
+ 0.0,
1275
+ 0.0,
1276
+ 0.0,
1277
+ 0.0
1278
+ ],
1279
+ "mean": [
1280
+ 0.0,
1281
+ 0.0,
1282
+ 0.0,
1283
+ 0.0,
1284
+ 0.0,
1285
+ 0.0,
1286
+ 0.0
1287
+ ],
1288
+ "min": [
1289
+ 0.0,
1290
+ 0.0,
1291
+ 0.0,
1292
+ 0.0,
1293
+ 0.0,
1294
+ 0.0,
1295
+ 0.0
1296
+ ],
1297
+ "q01": [
1298
+ 0.0,
1299
+ 0.0,
1300
+ 0.0,
1301
+ 0.0,
1302
+ 0.0,
1303
+ 0.0,
1304
+ 0.0
1305
+ ],
1306
+ "q99": [
1307
+ 0.0,
1308
+ 0.0,
1309
+ 0.0,
1310
+ 0.0,
1311
+ 0.0,
1312
+ 0.0,
1313
+ 0.0
1314
+ ],
1315
+ "std": [
1316
+ 0.0,
1317
+ 0.0,
1318
+ 0.0,
1319
+ 0.0,
1320
+ 0.0,
1321
+ 0.0,
1322
+ 0.0
1323
+ ]
1324
+ }
1325
+ },
1326
+ "dlr_edan_shared_control_converted_externally_to_rlds/0.1.0": {
1327
+ "action": {
1328
+ "mask": [
1329
+ true,
1330
+ true,
1331
+ true,
1332
+ true,
1333
+ true,
1334
+ true,
1335
+ false
1336
+ ],
1337
+ "max": [
1338
+ 0.18991442024707794,
1339
+ 0.0739002525806427,
1340
+ 0.18064819276332855,
1341
+ 0.0866486132144928,
1342
+ 0.13464981317520142,
1343
+ 0.16910280287265778,
1344
+ 1.0
1345
+ ],
1346
+ "mean": [
1347
+ 0.0066478196531534195,
1348
+ -0.0007657355745323002,
1349
+ 0.006522845011204481,
1350
+ 0.0011679773451760411,
1351
+ -0.006395624950528145,
1352
+ -0.011903021484613419,
1353
+ 0.6985887289047241
1354
+ ],
1355
+ "min": [
1356
+ -0.10054297000169754,
1357
+ -0.08427435159683228,
1358
+ -0.13533438742160797,
1359
+ -0.17556548118591309,
1360
+ -0.18485672771930695,
1361
+ -0.2680685818195343,
1362
+ 0.0
1363
+ ],
1364
+ "q01": [
1365
+ -0.02987122368067503,
1366
+ -0.06013262912631035,
1367
+ -0.08286409199237824,
1368
+ -0.05924444157630205,
1369
+ -0.15986866518855095,
1370
+ -0.15636983573436739,
1371
+ 0.0
1372
+ ],
1373
+ "q99": [
1374
+ 0.08832092039287087,
1375
+ 0.042126184627413736,
1376
+ 0.11311905644834042,
1377
+ 0.0643695573508739,
1378
+ 0.03941855944693088,
1379
+ 0.156646853685379,
1380
+ 1.0
1381
+ ],
1382
+ "std": [
1383
+ 0.021393585950136185,
1384
+ 0.018142299726605415,
1385
+ 0.03374377265572548,
1386
+ 0.01743541844189167,
1387
+ 0.03394372761249542,
1388
+ 0.04641878604888916,
1389
+ 0.45885783433914185
1390
+ ]
1391
+ },
1392
+ "num_trajectories": 104,
1393
+ "num_transitions": 8928,
1394
+ "proprio": {
1395
+ "max": [
1396
+ 0.0,
1397
+ 0.0,
1398
+ 0.0,
1399
+ 0.0,
1400
+ 0.0,
1401
+ 0.0,
1402
+ 0.0
1403
+ ],
1404
+ "mean": [
1405
+ 0.0,
1406
+ 0.0,
1407
+ 0.0,
1408
+ 0.0,
1409
+ 0.0,
1410
+ 0.0,
1411
+ 0.0
1412
+ ],
1413
+ "min": [
1414
+ 0.0,
1415
+ 0.0,
1416
+ 0.0,
1417
+ 0.0,
1418
+ 0.0,
1419
+ 0.0,
1420
+ 0.0
1421
+ ],
1422
+ "q01": [
1423
+ 0.0,
1424
+ 0.0,
1425
+ 0.0,
1426
+ 0.0,
1427
+ 0.0,
1428
+ 0.0,
1429
+ 0.0
1430
+ ],
1431
+ "q99": [
1432
+ 0.0,
1433
+ 0.0,
1434
+ 0.0,
1435
+ 0.0,
1436
+ 0.0,
1437
+ 0.0,
1438
+ 0.0
1439
+ ],
1440
+ "std": [
1441
+ 0.0,
1442
+ 0.0,
1443
+ 0.0,
1444
+ 0.0,
1445
+ 0.0,
1446
+ 0.0,
1447
+ 0.0
1448
+ ]
1449
+ }
1450
+ },
1451
+ "dobbe/0.0.1": {
1452
+ "action": {
1453
+ "mask": [
1454
+ true,
1455
+ true,
1456
+ true,
1457
+ true,
1458
+ true,
1459
+ true,
1460
+ false
1461
+ ],
1462
+ "max": [
1463
+ 38.590423583984375,
1464
+ 17.932697296142578,
1465
+ 4.843764305114746,
1466
+ 1.4372116327285767,
1467
+ 0.4340403974056244,
1468
+ 1.2057193517684937,
1469
+ 0.9998947381973267
1470
+ ],
1471
+ "mean": [
1472
+ -0.00011206958151888102,
1473
+ 0.0011229681549593806,
1474
+ -0.00010193959315074608,
1475
+ -7.37128357286565e-05,
1476
+ -0.0006753374473191798,
1477
+ -5.664441778208129e-05,
1478
+ 0.6318688988685608
1479
+ ],
1480
+ "min": [
1481
+ -5.700923442840576,
1482
+ -21.605947494506836,
1483
+ -123.72489929199219,
1484
+ -1.7229845523834229,
1485
+ -0.4998578727245331,
1486
+ -0.8867913484573364,
1487
+ 1.4196479014572105e-06
1488
+ ],
1489
+ "q01": [
1490
+ -0.01119564864784479,
1491
+ -0.014266146533191203,
1492
+ -0.0071747214533388615,
1493
+ -0.009444301575422287,
1494
+ -0.03990109823644161,
1495
+ -0.017422311007976532,
1496
+ 4.003279136668425e-05
1497
+ ],
1498
+ "q99": [
1499
+ 0.01015154086053368,
1500
+ 0.017181577533483497,
1501
+ 0.007216989761218411,
1502
+ 0.010380979906767595,
1503
+ 0.03556173853576176,
1504
+ 0.018032474815845446,
1505
+ 0.9982578039169312
1506
+ ],
1507
+ "std": [
1508
+ 0.042660679668188095,
1509
+ 0.04428431764245033,
1510
+ 0.12224890291690826,
1511
+ 0.005388470832258463,
1512
+ 0.011246936395764351,
1513
+ 0.006288259290158749,
1514
+ 0.3973240256309509
1515
+ ]
1516
+ },
1517
+ "num_trajectories": 5208,
1518
+ "num_transitions": 1139911,
1519
+ "proprio": {
1520
+ "max": [
1521
+ 0.0,
1522
+ 0.0,
1523
+ 0.0,
1524
+ 0.0,
1525
+ 0.0,
1526
+ 0.0,
1527
+ 0.0
1528
+ ],
1529
+ "mean": [
1530
+ 0.0,
1531
+ 0.0,
1532
+ 0.0,
1533
+ 0.0,
1534
+ 0.0,
1535
+ 0.0,
1536
+ 0.0
1537
+ ],
1538
+ "min": [
1539
+ 0.0,
1540
+ 0.0,
1541
+ 0.0,
1542
+ 0.0,
1543
+ 0.0,
1544
+ 0.0,
1545
+ 0.0
1546
+ ],
1547
+ "q01": [
1548
+ 0.0,
1549
+ 0.0,
1550
+ 0.0,
1551
+ 0.0,
1552
+ 0.0,
1553
+ 0.0,
1554
+ 0.0
1555
+ ],
1556
+ "q99": [
1557
+ 0.0,
1558
+ 0.0,
1559
+ 0.0,
1560
+ 0.0,
1561
+ 0.0,
1562
+ 0.0,
1563
+ 0.0
1564
+ ],
1565
+ "std": [
1566
+ 0.0,
1567
+ 0.0,
1568
+ 0.0,
1569
+ 0.0,
1570
+ 0.0,
1571
+ 0.0,
1572
+ 0.0
1573
+ ]
1574
+ }
1575
+ },
1576
+ "droid/1.0.0": {
1577
+ "action": {
1578
+ "mask": [
1579
+ true,
1580
+ true,
1581
+ true,
1582
+ true,
1583
+ true,
1584
+ true,
1585
+ false
1586
+ ],
1587
+ "max": [
1588
+ 0.9999998211860657,
1589
+ 0.999991774559021,
1590
+ 0.9999973177909851,
1591
+ 0.9999874830245972,
1592
+ 0.9999954104423523,
1593
+ 0.9999998807907104,
1594
+ 1.0
1595
+ ],
1596
+ "mean": [
1597
+ 0.027425529435276985,
1598
+ -0.0026820411439985037,
1599
+ 0.01595238223671913,
1600
+ 0.0035501928068697453,
1601
+ -0.030532635748386383,
1602
+ -0.006685464642941952,
1603
+ 0.5860344171524048
1604
+ ],
1605
+ "min": [
1606
+ -0.9999999403953552,
1607
+ -0.9999951124191284,
1608
+ -0.9999960660934448,
1609
+ -0.9999980330467224,
1610
+ -0.9999982118606567,
1611
+ -0.9999998807907104,
1612
+ 0.0
1613
+ ],
1614
+ "q01": [
1615
+ -0.7776297926902771,
1616
+ -0.5803514122962952,
1617
+ -0.5795090794563293,
1618
+ -0.6464047729969025,
1619
+ -0.7041108310222626,
1620
+ -0.8895104378461838,
1621
+ 0.0
1622
+ ],
1623
+ "q99": [
1624
+ 0.7597932070493698,
1625
+ 0.5726242214441299,
1626
+ 0.7351000607013702,
1627
+ 0.6705610305070877,
1628
+ 0.6464948207139969,
1629
+ 0.8897542208433151,
1630
+ 1.0
1631
+ ],
1632
+ "std": [
1633
+ 0.25387412309646606,
1634
+ 0.18426834046840668,
1635
+ 0.22532416880130768,
1636
+ 0.21757009625434875,
1637
+ 0.22572560608386993,
1638
+ 0.2867794930934906,
1639
+ 0.4287726879119873
1640
+ ]
1641
+ },
1642
+ "num_trajectories": 92233,
1643
+ "num_transitions": 27044326,
1644
+ "proprio": {
1645
+ "max": [
1646
+ 0.0,
1647
+ 0.0,
1648
+ 0.0,
1649
+ 0.0,
1650
+ 0.0,
1651
+ 0.0,
1652
+ 0.0
1653
+ ],
1654
+ "mean": [
1655
+ 0.0,
1656
+ 0.0,
1657
+ 0.0,
1658
+ 0.0,
1659
+ 0.0,
1660
+ 0.0,
1661
+ 0.0
1662
+ ],
1663
+ "min": [
1664
+ 0.0,
1665
+ 0.0,
1666
+ 0.0,
1667
+ 0.0,
1668
+ 0.0,
1669
+ 0.0,
1670
+ 0.0
1671
+ ],
1672
+ "q01": [
1673
+ 0.0,
1674
+ 0.0,
1675
+ 0.0,
1676
+ 0.0,
1677
+ 0.0,
1678
+ 0.0,
1679
+ 0.0
1680
+ ],
1681
+ "q99": [
1682
+ 0.0,
1683
+ 0.0,
1684
+ 0.0,
1685
+ 0.0,
1686
+ 0.0,
1687
+ 0.0,
1688
+ 0.0
1689
+ ],
1690
+ "std": [
1691
+ 0.0,
1692
+ 0.0,
1693
+ 0.0,
1694
+ 0.0,
1695
+ 0.0,
1696
+ 0.0,
1697
+ 0.0
1698
+ ]
1699
+ }
1700
+ },
1701
+ "fmb_dataset/1.0.0": {
1702
+ "action": {
1703
+ "mask": [
1704
+ true,
1705
+ true,
1706
+ true,
1707
+ true,
1708
+ true,
1709
+ true,
1710
+ false
1711
+ ],
1712
+ "max": [
1713
+ 1.399999976158142,
1714
+ 1.0,
1715
+ 1.399999976158142,
1716
+ 1.0,
1717
+ 1.0,
1718
+ 1.0,
1719
+ 1.0
1720
+ ],
1721
+ "mean": [
1722
+ 0.05902976542711258,
1723
+ -0.06476633995771408,
1724
+ -0.09787469357252121,
1725
+ 0.004325387068092823,
1726
+ 0.00028963759541511536,
1727
+ -0.04457257315516472,
1728
+ 0.7336440086364746
1729
+ ],
1730
+ "min": [
1731
+ -1.399999976158142,
1732
+ -1.399999976158142,
1733
+ -1.0,
1734
+ -1.0,
1735
+ -1.0,
1736
+ -1.0,
1737
+ 0.0
1738
+ ],
1739
+ "q01": [
1740
+ -0.8257142901420593,
1741
+ -1.399999976158142,
1742
+ -1.0,
1743
+ -1.0,
1744
+ -0.3028571307659149,
1745
+ -1.0,
1746
+ 0.0
1747
+ ],
1748
+ "q99": [
1749
+ 1.0,
1750
+ 0.5257142782211304,
1751
+ 1.0,
1752
+ 1.0,
1753
+ 0.3400000035762787,
1754
+ 1.0,
1755
+ 1.0
1756
+ ],
1757
+ "std": [
1758
+ 0.28809186816215515,
1759
+ 0.2820416986942291,
1760
+ 0.4626740515232086,
1761
+ 0.3266514539718628,
1762
+ 0.10842999070882797,
1763
+ 0.34400978684425354,
1764
+ 0.4435289800167084
1765
+ ]
1766
+ },
1767
+ "num_trajectories": 8612,
1768
+ "num_transitions": 1137459,
1769
+ "proprio": {
1770
+ "max": [
1771
+ 0.0,
1772
+ 0.0,
1773
+ 0.0,
1774
+ 0.0,
1775
+ 0.0,
1776
+ 0.0,
1777
+ 0.0
1778
+ ],
1779
+ "mean": [
1780
+ 0.0,
1781
+ 0.0,
1782
+ 0.0,
1783
+ 0.0,
1784
+ 0.0,
1785
+ 0.0,
1786
+ 0.0
1787
+ ],
1788
+ "min": [
1789
+ 0.0,
1790
+ 0.0,
1791
+ 0.0,
1792
+ 0.0,
1793
+ 0.0,
1794
+ 0.0,
1795
+ 0.0
1796
+ ],
1797
+ "q01": [
1798
+ 0.0,
1799
+ 0.0,
1800
+ 0.0,
1801
+ 0.0,
1802
+ 0.0,
1803
+ 0.0,
1804
+ 0.0
1805
+ ],
1806
+ "q99": [
1807
+ 0.0,
1808
+ 0.0,
1809
+ 0.0,
1810
+ 0.0,
1811
+ 0.0,
1812
+ 0.0,
1813
+ 0.0
1814
+ ],
1815
+ "std": [
1816
+ 0.0,
1817
+ 0.0,
1818
+ 0.0,
1819
+ 0.0,
1820
+ 0.0,
1821
+ 0.0,
1822
+ 0.0
1823
+ ]
1824
+ }
1825
+ },
1826
+ "fractal20220817_data/0.1.0": {
1827
+ "action": {
1828
+ "mask": [
1829
+ true,
1830
+ true,
1831
+ true,
1832
+ true,
1833
+ true,
1834
+ true,
1835
+ false
1836
+ ],
1837
+ "max": [
1838
+ 2.9984593391418457,
1839
+ 22.09052848815918,
1840
+ 2.7507524490356445,
1841
+ 1.570636510848999,
1842
+ 1.5321086645126343,
1843
+ 1.5691522359848022,
1844
+ 1.0
1845
+ ],
1846
+ "mean": [
1847
+ 0.006987569388002157,
1848
+ 0.006265842355787754,
1849
+ -0.012625036761164665,
1850
+ 0.04333385452628136,
1851
+ -0.005756205413490534,
1852
+ 0.0009130453690886497,
1853
+ 0.5354204773902893
1854
+ ],
1855
+ "min": [
1856
+ -2.0204520225524902,
1857
+ -5.497899532318115,
1858
+ -2.031663417816162,
1859
+ -1.569917917251587,
1860
+ -1.569892168045044,
1861
+ -1.570419430732727,
1862
+ 0.0
1863
+ ],
1864
+ "q01": [
1865
+ -0.22453527510166169,
1866
+ -0.14820013284683228,
1867
+ -0.231589707583189,
1868
+ -0.3517994859814644,
1869
+ -0.4193011274933815,
1870
+ -0.43643461108207704,
1871
+ 0.0
1872
+ ],
1873
+ "q99": [
1874
+ 0.17824687153100965,
1875
+ 0.14938379630446405,
1876
+ 0.21842354819178575,
1877
+ 0.5892666035890578,
1878
+ 0.35272657424211445,
1879
+ 0.44796681255102094,
1880
+ 1.0
1881
+ ],
1882
+ "std": [
1883
+ 0.0692116841673851,
1884
+ 0.059702735394239426,
1885
+ 0.07353106141090393,
1886
+ 0.15610557794570923,
1887
+ 0.13164424896240234,
1888
+ 0.14593809843063354,
1889
+ 0.49711740016937256
1890
+ ]
1891
+ },
1892
+ "num_trajectories": 87212,
1893
+ "num_transitions": 3786400,
1894
+ "proprio": {
1895
+ "max": [
1896
+ 0.0,
1897
+ 0.0,
1898
+ 0.0,
1899
+ 0.0,
1900
+ 0.0,
1901
+ 0.0,
1902
+ 0.0
1903
+ ],
1904
+ "mean": [
1905
+ 0.0,
1906
+ 0.0,
1907
+ 0.0,
1908
+ 0.0,
1909
+ 0.0,
1910
+ 0.0,
1911
+ 0.0
1912
+ ],
1913
+ "min": [
1914
+ 0.0,
1915
+ 0.0,
1916
+ 0.0,
1917
+ 0.0,
1918
+ 0.0,
1919
+ 0.0,
1920
+ 0.0
1921
+ ],
1922
+ "q01": [
1923
+ 0.0,
1924
+ 0.0,
1925
+ 0.0,
1926
+ 0.0,
1927
+ 0.0,
1928
+ 0.0,
1929
+ 0.0
1930
+ ],
1931
+ "q99": [
1932
+ 0.0,
1933
+ 0.0,
1934
+ 0.0,
1935
+ 0.0,
1936
+ 0.0,
1937
+ 0.0,
1938
+ 0.0
1939
+ ],
1940
+ "std": [
1941
+ 0.0,
1942
+ 0.0,
1943
+ 0.0,
1944
+ 0.0,
1945
+ 0.0,
1946
+ 0.0,
1947
+ 0.0
1948
+ ]
1949
+ }
1950
+ },
1951
+ "furniture_bench_dataset_converted_externally_to_rlds/0.1.0": {
1952
+ "action": {
1953
+ "mask": [
1954
+ true,
1955
+ true,
1956
+ true,
1957
+ true,
1958
+ true,
1959
+ true,
1960
+ false
1961
+ ],
1962
+ "max": [
1963
+ 0.10000000149011612,
1964
+ 0.10000000149011612,
1965
+ 0.10000000149011612,
1966
+ 0.8651833534240723,
1967
+ 1.0909736156463623,
1968
+ 2.863185405731201,
1969
+ 1.0
1970
+ ],
1971
+ "mean": [
1972
+ 0.0001461071806261316,
1973
+ 0.0010830992832779884,
1974
+ 0.0006224963581189513,
1975
+ -0.0033032014034688473,
1976
+ -0.002688060747459531,
1977
+ 0.018242614343762398,
1978
+ 0.48854944109916687
1979
+ ],
1980
+ "min": [
1981
+ -0.10495579987764359,
1982
+ -0.10939455777406693,
1983
+ -0.10000000149011612,
1984
+ -0.971906840801239,
1985
+ -1.0475432872772217,
1986
+ -3.06000018119812,
1987
+ 0.0
1988
+ ],
1989
+ "q01": [
1990
+ -0.053988199681043625,
1991
+ -0.05049169331789017,
1992
+ -0.032499241530895236,
1993
+ -0.1953887003660202,
1994
+ -0.41674559473991396,
1995
+ -0.8886768388748169,
1996
+ 0.0
1997
+ ],
1998
+ "q99": [
1999
+ 0.05414841488003723,
2000
+ 0.04965164884924884,
2001
+ 0.060055799782276154,
2002
+ 0.18231668293476103,
2003
+ 0.39867786407470646,
2004
+ 0.8772023963928218,
2005
+ 1.0
2006
+ ],
2007
+ "std": [
2008
+ 0.016107233241200447,
2009
+ 0.014891570433974266,
2010
+ 0.014014236629009247,
2011
+ 0.05827433615922928,
2012
+ 0.11417083442211151,
2013
+ 0.33479660749435425,
2014
+ 0.4999157190322876
2015
+ ]
2016
+ },
2017
+ "num_trajectories": 5100,
2018
+ "num_transitions": 3948057,
2019
+ "proprio": {
2020
+ "max": [
2021
+ 0.0,
2022
+ 0.0,
2023
+ 0.0,
2024
+ 0.0,
2025
+ 0.0,
2026
+ 0.0,
2027
+ 0.0
2028
+ ],
2029
+ "mean": [
2030
+ 0.0,
2031
+ 0.0,
2032
+ 0.0,
2033
+ 0.0,
2034
+ 0.0,
2035
+ 0.0,
2036
+ 0.0
2037
+ ],
2038
+ "min": [
2039
+ 0.0,
2040
+ 0.0,
2041
+ 0.0,
2042
+ 0.0,
2043
+ 0.0,
2044
+ 0.0,
2045
+ 0.0
2046
+ ],
2047
+ "q01": [
2048
+ 0.0,
2049
+ 0.0,
2050
+ 0.0,
2051
+ 0.0,
2052
+ 0.0,
2053
+ 0.0,
2054
+ 0.0
2055
+ ],
2056
+ "q99": [
2057
+ 0.0,
2058
+ 0.0,
2059
+ 0.0,
2060
+ 0.0,
2061
+ 0.0,
2062
+ 0.0,
2063
+ 0.0
2064
+ ],
2065
+ "std": [
2066
+ 0.0,
2067
+ 0.0,
2068
+ 0.0,
2069
+ 0.0,
2070
+ 0.0,
2071
+ 0.0,
2072
+ 0.0
2073
+ ]
2074
+ }
2075
+ },
2076
+ "iamlab_cmu_pickup_insert_converted_externally_to_rlds/0.1.0": {
2077
+ "action": {
2078
+ "mask": [
2079
+ true,
2080
+ true,
2081
+ true,
2082
+ true,
2083
+ true,
2084
+ true,
2085
+ false
2086
+ ],
2087
+ "max": [
2088
+ 0.6634981632232666,
2089
+ 0.23428471386432648,
2090
+ 0.4308285415172577,
2091
+ 3.1415927410125732,
2092
+ 0.13647015392780304,
2093
+ 3.141592502593994,
2094
+ 1.0
2095
+ ],
2096
+ "mean": [
2097
+ 0.5274373292922974,
2098
+ 0.028582017868757248,
2099
+ 0.18712472915649414,
2100
+ 1.2339569330215454,
2101
+ 0.03226622939109802,
2102
+ -1.4199472665786743,
2103
+ 0.5550631880760193
2104
+ ],
2105
+ "min": [
2106
+ 0.3071657121181488,
2107
+ -0.29754969477653503,
2108
+ 0.06578229367733002,
2109
+ -3.1415927410125732,
2110
+ -0.04584203287959099,
2111
+ -3.141592502593994,
2112
+ 0.0
2113
+ ],
2114
+ "q01": [
2115
+ 0.3148897051811218,
2116
+ -0.20317550599575043,
2117
+ 0.06785467118024827,
2118
+ -3.140952730178833,
2119
+ -0.029743434861302376,
2120
+ -3.141091251373291,
2121
+ 0.0
2122
+ ],
2123
+ "q99": [
2124
+ 0.6472805738449097,
2125
+ 0.20846802592277527,
2126
+ 0.36855655312538155,
2127
+ 3.1409926891326903,
2128
+ 0.11424950212240226,
2129
+ 3.1410969257354737,
2130
+ 1.0
2131
+ ],
2132
+ "std": [
2133
+ 0.08108346909284592,
2134
+ 0.1116756722331047,
2135
+ 0.07747555524110794,
2136
+ 2.8737244606018066,
2137
+ 0.02774704433977604,
2138
+ 2.7678685188293457,
2139
+ 0.4969509243965149
2140
+ ]
2141
+ },
2142
+ "num_trajectories": 631,
2143
+ "num_transitions": 146241,
2144
+ "proprio": {
2145
+ "max": [
2146
+ 0.0,
2147
+ 0.0,
2148
+ 0.0,
2149
+ 0.0,
2150
+ 0.0,
2151
+ 0.0,
2152
+ 0.0
2153
+ ],
2154
+ "mean": [
2155
+ 0.0,
2156
+ 0.0,
2157
+ 0.0,
2158
+ 0.0,
2159
+ 0.0,
2160
+ 0.0,
2161
+ 0.0
2162
+ ],
2163
+ "min": [
2164
+ 0.0,
2165
+ 0.0,
2166
+ 0.0,
2167
+ 0.0,
2168
+ 0.0,
2169
+ 0.0,
2170
+ 0.0
2171
+ ],
2172
+ "q01": [
2173
+ 0.0,
2174
+ 0.0,
2175
+ 0.0,
2176
+ 0.0,
2177
+ 0.0,
2178
+ 0.0,
2179
+ 0.0
2180
+ ],
2181
+ "q99": [
2182
+ 0.0,
2183
+ 0.0,
2184
+ 0.0,
2185
+ 0.0,
2186
+ 0.0,
2187
+ 0.0,
2188
+ 0.0
2189
+ ],
2190
+ "std": [
2191
+ 0.0,
2192
+ 0.0,
2193
+ 0.0,
2194
+ 0.0,
2195
+ 0.0,
2196
+ 0.0,
2197
+ 0.0
2198
+ ]
2199
+ }
2200
+ },
2201
+ "jaco_play/0.1.0": {
2202
+ "action": {
2203
+ "mask": [
2204
+ true,
2205
+ true,
2206
+ true,
2207
+ true,
2208
+ true,
2209
+ true,
2210
+ false
2211
+ ],
2212
+ "max": [
2213
+ 0.20000000298023224,
2214
+ 0.20000000298023224,
2215
+ 0.20000000298023224,
2216
+ 0.0,
2217
+ 0.0,
2218
+ 0.0,
2219
+ 1.0
2220
+ ],
2221
+ "mean": [
2222
+ 0.0009658387862145901,
2223
+ -0.005800850689411163,
2224
+ -0.003950685728341341,
2225
+ 0.0,
2226
+ 0.0,
2227
+ 0.0,
2228
+ 0.34934908151626587
2229
+ ],
2230
+ "min": [
2231
+ -0.20000000298023224,
2232
+ -0.20000000298023224,
2233
+ -0.20000000298023224,
2234
+ 0.0,
2235
+ 0.0,
2236
+ 0.0,
2237
+ 0.0
2238
+ ],
2239
+ "q01": [
2240
+ -0.20000000298023224,
2241
+ -0.20000000298023224,
2242
+ -0.20000000298023224,
2243
+ 0.0,
2244
+ 0.0,
2245
+ 0.0,
2246
+ 0.0
2247
+ ],
2248
+ "q99": [
2249
+ 0.20000000298023224,
2250
+ 0.20000000298023224,
2251
+ 0.20000000298023224,
2252
+ 0.0,
2253
+ 0.0,
2254
+ 0.0,
2255
+ 1.0
2256
+ ],
2257
+ "std": [
2258
+ 0.12234985828399658,
2259
+ 0.09678783267736435,
2260
+ 0.1115543395280838,
2261
+ 0.0,
2262
+ 0.0,
2263
+ 0.0,
2264
+ 0.47682321071624756
2265
+ ]
2266
+ },
2267
+ "num_trajectories": 1085,
2268
+ "num_transitions": 77965,
2269
+ "proprio": {
2270
+ "max": [
2271
+ 0.0,
2272
+ 0.0,
2273
+ 0.0,
2274
+ 0.0,
2275
+ 0.0,
2276
+ 0.0,
2277
+ 0.0
2278
+ ],
2279
+ "mean": [
2280
+ 0.0,
2281
+ 0.0,
2282
+ 0.0,
2283
+ 0.0,
2284
+ 0.0,
2285
+ 0.0,
2286
+ 0.0
2287
+ ],
2288
+ "min": [
2289
+ 0.0,
2290
+ 0.0,
2291
+ 0.0,
2292
+ 0.0,
2293
+ 0.0,
2294
+ 0.0,
2295
+ 0.0
2296
+ ],
2297
+ "q01": [
2298
+ 0.0,
2299
+ 0.0,
2300
+ 0.0,
2301
+ 0.0,
2302
+ 0.0,
2303
+ 0.0,
2304
+ 0.0
2305
+ ],
2306
+ "q99": [
2307
+ 0.0,
2308
+ 0.0,
2309
+ 0.0,
2310
+ 0.0,
2311
+ 0.0,
2312
+ 0.0,
2313
+ 0.0
2314
+ ],
2315
+ "std": [
2316
+ 0.0,
2317
+ 0.0,
2318
+ 0.0,
2319
+ 0.0,
2320
+ 0.0,
2321
+ 0.0,
2322
+ 0.0
2323
+ ]
2324
+ }
2325
+ },
2326
+ "kuka/0.1.0": {
2327
+ "action": {
2328
+ "mask": [
2329
+ true,
2330
+ true,
2331
+ true,
2332
+ true,
2333
+ true,
2334
+ true,
2335
+ false
2336
+ ],
2337
+ "max": [
2338
+ 0.1697135865688324,
2339
+ 0.2777623236179352,
2340
+ 0.43710532784461975,
2341
+ 0.0,
2342
+ 0.0,
2343
+ 1.9684287309646606,
2344
+ 1.0
2345
+ ],
2346
+ "mean": [
2347
+ -0.00046687963185831904,
2348
+ 0.00040137648466043174,
2349
+ -0.0012807906605303288,
2350
+ 0.0,
2351
+ 0.0,
2352
+ -0.037225183099508286,
2353
+ 0.4131543040275574
2354
+ ],
2355
+ "min": [
2356
+ -0.159867063164711,
2357
+ -0.2892282009124756,
2358
+ -0.2795473635196686,
2359
+ 0.0,
2360
+ 0.0,
2361
+ -1.9875637292861938,
2362
+ 0.0
2363
+ ],
2364
+ "q01": [
2365
+ -0.06619441494345665,
2366
+ -0.08713878810405731,
2367
+ -0.15083016991615295,
2368
+ 0.0,
2369
+ 0.0,
2370
+ -0.5415697038173676,
2371
+ 0.0
2372
+ ],
2373
+ "q99": [
2374
+ 0.06601839080452929,
2375
+ 0.08732476785779003,
2376
+ 0.18168179214000715,
2377
+ 0.0,
2378
+ 0.0,
2379
+ 0.2923380345106127,
2380
+ 1.0
2381
+ ],
2382
+ "std": [
2383
+ 0.020832739770412445,
2384
+ 0.029158642515540123,
2385
+ 0.0642285868525505,
2386
+ 0.0,
2387
+ 0.0,
2388
+ 0.14224639534950256,
2389
+ 0.4908643662929535
2390
+ ]
2391
+ },
2392
+ "num_trajectories": 209880,
2393
+ "num_transitions": 2455879,
2394
+ "proprio": {
2395
+ "max": [
2396
+ 0.0,
2397
+ 0.0,
2398
+ 0.0,
2399
+ 0.0,
2400
+ 0.0,
2401
+ 0.0,
2402
+ 0.0
2403
+ ],
2404
+ "mean": [
2405
+ 0.0,
2406
+ 0.0,
2407
+ 0.0,
2408
+ 0.0,
2409
+ 0.0,
2410
+ 0.0,
2411
+ 0.0
2412
+ ],
2413
+ "min": [
2414
+ 0.0,
2415
+ 0.0,
2416
+ 0.0,
2417
+ 0.0,
2418
+ 0.0,
2419
+ 0.0,
2420
+ 0.0
2421
+ ],
2422
+ "q01": [
2423
+ 0.0,
2424
+ 0.0,
2425
+ 0.0,
2426
+ 0.0,
2427
+ 0.0,
2428
+ 0.0,
2429
+ 0.0
2430
+ ],
2431
+ "q99": [
2432
+ 0.0,
2433
+ 0.0,
2434
+ 0.0,
2435
+ 0.0,
2436
+ 0.0,
2437
+ 0.0,
2438
+ 0.0
2439
+ ],
2440
+ "std": [
2441
+ 0.0,
2442
+ 0.0,
2443
+ 0.0,
2444
+ 0.0,
2445
+ 0.0,
2446
+ 0.0,
2447
+ 0.0
2448
+ ]
2449
+ }
2450
+ },
2451
+ "language_table/0.1.0": {
2452
+ "action": {
2453
+ "mask": [
2454
+ true,
2455
+ true,
2456
+ true,
2457
+ true,
2458
+ true,
2459
+ true,
2460
+ false
2461
+ ],
2462
+ "max": [
2463
+ 0.23357294499874115,
2464
+ 0.24496802687644958,
2465
+ 0.0,
2466
+ 0.0,
2467
+ 0.0,
2468
+ 0.0,
2469
+ 1.0
2470
+ ],
2471
+ "mean": [
2472
+ 0.00014891766477376223,
2473
+ -0.0005636657006107271,
2474
+ 0.0,
2475
+ 0.0,
2476
+ 0.0,
2477
+ 0.0,
2478
+ 1.0
2479
+ ],
2480
+ "min": [
2481
+ -0.21989956498146057,
2482
+ -0.23736150562763214,
2483
+ 0.0,
2484
+ 0.0,
2485
+ 0.0,
2486
+ 0.0,
2487
+ 1.0
2488
+ ],
2489
+ "q01": [
2490
+ -0.08179590478539467,
2491
+ -0.11795833334326744,
2492
+ 0.0,
2493
+ 0.0,
2494
+ 0.0,
2495
+ 0.0,
2496
+ 1.0
2497
+ ],
2498
+ "q99": [
2499
+ 0.08822273463010788,
2500
+ 0.1191693339496851,
2501
+ 0.0,
2502
+ 0.0,
2503
+ 0.0,
2504
+ 0.0,
2505
+ 1.0
2506
+ ],
2507
+ "std": [
2508
+ 0.030162859708070755,
2509
+ 0.04230763390660286,
2510
+ 0.0,
2511
+ 0.0,
2512
+ 0.0,
2513
+ 0.0,
2514
+ 0.0
2515
+ ]
2516
+ },
2517
+ "num_trajectories": 442226,
2518
+ "num_transitions": 7045476,
2519
+ "proprio": {
2520
+ "max": [
2521
+ 0.0,
2522
+ 0.0,
2523
+ 0.0,
2524
+ 0.0,
2525
+ 0.0,
2526
+ 0.0,
2527
+ 0.0
2528
+ ],
2529
+ "mean": [
2530
+ 0.0,
2531
+ 0.0,
2532
+ 0.0,
2533
+ 0.0,
2534
+ 0.0,
2535
+ 0.0,
2536
+ 0.0
2537
+ ],
2538
+ "min": [
2539
+ 0.0,
2540
+ 0.0,
2541
+ 0.0,
2542
+ 0.0,
2543
+ 0.0,
2544
+ 0.0,
2545
+ 0.0
2546
+ ],
2547
+ "q01": [
2548
+ 0.0,
2549
+ 0.0,
2550
+ 0.0,
2551
+ 0.0,
2552
+ 0.0,
2553
+ 0.0,
2554
+ 0.0
2555
+ ],
2556
+ "q99": [
2557
+ 0.0,
2558
+ 0.0,
2559
+ 0.0,
2560
+ 0.0,
2561
+ 0.0,
2562
+ 0.0,
2563
+ 0.0
2564
+ ],
2565
+ "std": [
2566
+ 0.0,
2567
+ 0.0,
2568
+ 0.0,
2569
+ 0.0,
2570
+ 0.0,
2571
+ 0.0,
2572
+ 0.0
2573
+ ]
2574
+ }
2575
+ },
2576
+ "nyu_franka_play_dataset_converted_externally_to_rlds/0.1.0": {
2577
+ "action": {
2578
+ "mask": [
2579
+ true,
2580
+ true,
2581
+ true,
2582
+ true,
2583
+ true,
2584
+ true,
2585
+ false
2586
+ ],
2587
+ "max": [
2588
+ 0.06424188613891602,
2589
+ 0.07027634978294373,
2590
+ 0.06129661202430725,
2591
+ 6.281067848205566,
2592
+ 0.1967729926109314,
2593
+ 0.26377415657043457,
2594
+ 1.0
2595
+ ],
2596
+ "mean": [
2597
+ 0.0010219910182058811,
2598
+ -0.00012002632865915075,
2599
+ 0.00032894135802052915,
2600
+ 0.0015034276293590665,
2601
+ -0.002198528265580535,
2602
+ -0.0016632305923849344,
2603
+ 0.7230083346366882
2604
+ ],
2605
+ "min": [
2606
+ -0.05952230095863342,
2607
+ -0.07232445478439331,
2608
+ -0.06730806827545166,
2609
+ -6.278434753417969,
2610
+ -0.21479034423828125,
2611
+ -0.3627619743347168,
2612
+ 0.0
2613
+ ],
2614
+ "q01": [
2615
+ -0.03199600875377655,
2616
+ -0.032861671447753905,
2617
+ -0.03368805110454559,
2618
+ -0.12080862045288086,
2619
+ -0.12175218224525451,
2620
+ -0.11370223641395569,
2621
+ 0.0
2622
+ ],
2623
+ "q99": [
2624
+ 0.03101520001888276,
2625
+ 0.0373908892273903,
2626
+ 0.03646374464035038,
2627
+ 0.11764093399047852,
2628
+ 0.1258920183777809,
2629
+ 0.09366151213645942,
2630
+ 1.0
2631
+ ],
2632
+ "std": [
2633
+ 0.013274150900542736,
2634
+ 0.013215919025242329,
2635
+ 0.01282210648059845,
2636
+ 0.27324533462524414,
2637
+ 0.05702253058552742,
2638
+ 0.03917279839515686,
2639
+ 0.44753193855285645
2640
+ ]
2641
+ },
2642
+ "num_trajectories": 456,
2643
+ "num_transitions": 44875,
2644
+ "proprio": {
2645
+ "max": [
2646
+ 0.0,
2647
+ 0.0,
2648
+ 0.0,
2649
+ 0.0,
2650
+ 0.0,
2651
+ 0.0,
2652
+ 0.0
2653
+ ],
2654
+ "mean": [
2655
+ 0.0,
2656
+ 0.0,
2657
+ 0.0,
2658
+ 0.0,
2659
+ 0.0,
2660
+ 0.0,
2661
+ 0.0
2662
+ ],
2663
+ "min": [
2664
+ 0.0,
2665
+ 0.0,
2666
+ 0.0,
2667
+ 0.0,
2668
+ 0.0,
2669
+ 0.0,
2670
+ 0.0
2671
+ ],
2672
+ "q01": [
2673
+ 0.0,
2674
+ 0.0,
2675
+ 0.0,
2676
+ 0.0,
2677
+ 0.0,
2678
+ 0.0,
2679
+ 0.0
2680
+ ],
2681
+ "q99": [
2682
+ 0.0,
2683
+ 0.0,
2684
+ 0.0,
2685
+ 0.0,
2686
+ 0.0,
2687
+ 0.0,
2688
+ 0.0
2689
+ ],
2690
+ "std": [
2691
+ 0.0,
2692
+ 0.0,
2693
+ 0.0,
2694
+ 0.0,
2695
+ 0.0,
2696
+ 0.0,
2697
+ 0.0
2698
+ ]
2699
+ }
2700
+ },
2701
+ "rh20t_rlds/1.0.0": {
2702
+ "action": {
2703
+ "mask": [
2704
+ true,
2705
+ true,
2706
+ true,
2707
+ true,
2708
+ true,
2709
+ true,
2710
+ false
2711
+ ],
2712
+ "max": [
2713
+ 7.582831568163597e+35,
2714
+ 7.557172735451728e+35,
2715
+ 2.2717764477020827e+27,
2716
+ 3.1415927410125732,
2717
+ 1.5116956233978271,
2718
+ 3.1415927410125732,
2719
+ 1.0
2720
+ ],
2721
+ "mean": [
2722
+ -5.332157638779582e+28,
2723
+ -1.5128827327837974e+29,
2724
+ -1.832736619079747e+28,
2725
+ 0.5735913515090942,
2726
+ -0.00847744569182396,
2727
+ -0.5566052198410034,
2728
+ 0.3186892569065094
2729
+ ],
2730
+ "min": [
2731
+ -3.5543094244408723e+36,
2732
+ -8.723098019507117e+36,
2733
+ -9.648338287048974e+35,
2734
+ -3.1415927410125732,
2735
+ -1.5062522888183594,
2736
+ -3.1415927410125732,
2737
+ 0.0
2738
+ ],
2739
+ "q01": [
2740
+ 0.36028257966041566,
2741
+ -0.272584410905838,
2742
+ 0.005985925104469062,
2743
+ -3.1411514282226562,
2744
+ -0.5925320792198181,
2745
+ -3.1415159702301025,
2746
+ 0.0
2747
+ ],
2748
+ "q99": [
2749
+ 0.7534684538841248,
2750
+ 0.31738221645355225,
2751
+ 0.33061375379562374,
2752
+ 3.141425132751465,
2753
+ 0.47507260441780086,
2754
+ 3.141479730606079,
2755
+ 1.0
2756
+ ],
2757
+ "std": [
2758
+ Infinity,
2759
+ Infinity,
2760
+ Infinity,
2761
+ 2.2581026554107666,
2762
+ 0.1548534482717514,
2763
+ 2.2581026554107666,
2764
+ 0.39917993545532227
2765
+ ]
2766
+ },
2767
+ "num_trajectories": 104392,
2768
+ "num_transitions": 52644433,
2769
+ "proprio": {
2770
+ "max": [
2771
+ 0.0,
2772
+ 0.0,
2773
+ 0.0,
2774
+ 0.0,
2775
+ 0.0,
2776
+ 0.0,
2777
+ 0.0
2778
+ ],
2779
+ "mean": [
2780
+ 0.0,
2781
+ 0.0,
2782
+ 0.0,
2783
+ 0.0,
2784
+ 0.0,
2785
+ 0.0,
2786
+ 0.0
2787
+ ],
2788
+ "min": [
2789
+ 0.0,
2790
+ 0.0,
2791
+ 0.0,
2792
+ 0.0,
2793
+ 0.0,
2794
+ 0.0,
2795
+ 0.0
2796
+ ],
2797
+ "q01": [
2798
+ 0.0,
2799
+ 0.0,
2800
+ 0.0,
2801
+ 0.0,
2802
+ 0.0,
2803
+ 0.0,
2804
+ 0.0
2805
+ ],
2806
+ "q99": [
2807
+ 0.0,
2808
+ 0.0,
2809
+ 0.0,
2810
+ 0.0,
2811
+ 0.0,
2812
+ 0.0,
2813
+ 0.0
2814
+ ],
2815
+ "std": [
2816
+ 0.0,
2817
+ 0.0,
2818
+ 0.0,
2819
+ 0.0,
2820
+ 0.0,
2821
+ 0.0,
2822
+ 0.0
2823
+ ]
2824
+ }
2825
+ },
2826
+ "roboturk/0.1.0": {
2827
+ "action": {
2828
+ "mask": [
2829
+ true,
2830
+ true,
2831
+ true,
2832
+ true,
2833
+ true,
2834
+ true,
2835
+ false
2836
+ ],
2837
+ "max": [
2838
+ 0.39124172925949097,
2839
+ 0.4601028263568878,
2840
+ 0.4870833456516266,
2841
+ 1.816888689994812,
2842
+ 1.8240282535552979,
2843
+ 1.4824820756912231,
2844
+ 1.0
2845
+ ],
2846
+ "mean": [
2847
+ 0.001444889116100967,
2848
+ -0.0015945355407893658,
2849
+ -0.0011753803119063377,
2850
+ 0.002301239175722003,
2851
+ -0.0009382442804053426,
2852
+ -0.00011485860886750743,
2853
+ 0.5746025443077087
2854
+ ],
2855
+ "min": [
2856
+ -0.6546999216079712,
2857
+ -0.6365841031074524,
2858
+ -0.4217723608016968,
2859
+ -1.6695482730865479,
2860
+ -1.8023357391357422,
2861
+ -1.4630827903747559,
2862
+ 0.0
2863
+ ],
2864
+ "q01": [
2865
+ -0.1342635464668274,
2866
+ -0.19996687173843383,
2867
+ -0.1482972100377083,
2868
+ -0.20720748245716095,
2869
+ -0.09676413893699647,
2870
+ -0.18075634717941286,
2871
+ 0.0
2872
+ ],
2873
+ "q99": [
2874
+ 0.14956976801157001,
2875
+ 0.1805950567126275,
2876
+ 0.18841815620660796,
2877
+ 0.21615413755178453,
2878
+ 0.09457383215427405,
2879
+ 0.18543301910162005,
2880
+ 1.0
2881
+ ],
2882
+ "std": [
2883
+ 0.0493537075817585,
2884
+ 0.06354564428329468,
2885
+ 0.06116492301225662,
2886
+ 0.0955340564250946,
2887
+ 0.08420011401176453,
2888
+ 0.06517910957336426,
2889
+ 0.4945177137851715
2890
+ ]
2891
+ },
2892
+ "num_trajectories": 1995,
2893
+ "num_transitions": 187507,
2894
+ "proprio": {
2895
+ "max": [
2896
+ 0.0,
2897
+ 0.0,
2898
+ 0.0,
2899
+ 0.0,
2900
+ 0.0,
2901
+ 0.0,
2902
+ 0.0
2903
+ ],
2904
+ "mean": [
2905
+ 0.0,
2906
+ 0.0,
2907
+ 0.0,
2908
+ 0.0,
2909
+ 0.0,
2910
+ 0.0,
2911
+ 0.0
2912
+ ],
2913
+ "min": [
2914
+ 0.0,
2915
+ 0.0,
2916
+ 0.0,
2917
+ 0.0,
2918
+ 0.0,
2919
+ 0.0,
2920
+ 0.0
2921
+ ],
2922
+ "q01": [
2923
+ 0.0,
2924
+ 0.0,
2925
+ 0.0,
2926
+ 0.0,
2927
+ 0.0,
2928
+ 0.0,
2929
+ 0.0
2930
+ ],
2931
+ "q99": [
2932
+ 0.0,
2933
+ 0.0,
2934
+ 0.0,
2935
+ 0.0,
2936
+ 0.0,
2937
+ 0.0,
2938
+ 0.0
2939
+ ],
2940
+ "std": [
2941
+ 0.0,
2942
+ 0.0,
2943
+ 0.0,
2944
+ 0.0,
2945
+ 0.0,
2946
+ 0.0,
2947
+ 0.0
2948
+ ]
2949
+ }
2950
+ },
2951
+ "stanford_hydra_dataset_converted_externally_to_rlds/0.1.0": {
2952
+ "action": {
2953
+ "mask": [
2954
+ true,
2955
+ true,
2956
+ true,
2957
+ true,
2958
+ true,
2959
+ true,
2960
+ false
2961
+ ],
2962
+ "max": [
2963
+ 0.02499854564666748,
2964
+ 0.02499903365969658,
2965
+ 0.024999922141432762,
2966
+ 0.24974457919597626,
2967
+ 0.24997030198574066,
2968
+ 0.24999946355819702,
2969
+ 1.0
2970
+ ],
2971
+ "mean": [
2972
+ 0.0007790043600834906,
2973
+ 0.00013707877951674163,
2974
+ -0.000254859565757215,
2975
+ 0.0012903243768960238,
2976
+ -0.004751724191009998,
2977
+ 0.002692892448976636,
2978
+ 0.48855218291282654
2979
+ ],
2980
+ "min": [
2981
+ -0.024999044835567474,
2982
+ -0.024999700486660004,
2983
+ -0.02499929815530777,
2984
+ -0.24993225932121277,
2985
+ -0.2499666064977646,
2986
+ -0.2499932497739792,
2987
+ 0.0
2988
+ ],
2989
+ "q01": [
2990
+ -0.019992006458342076,
2991
+ -0.02415412735193968,
2992
+ -0.022941758055239916,
2993
+ -0.11085530579090118,
2994
+ -0.12024572037160397,
2995
+ -0.13314770206809043,
2996
+ 0.0
2997
+ ],
2998
+ "q99": [
2999
+ 0.022886231057345868,
3000
+ 0.022358838934451335,
3001
+ 0.02410089675337076,
3002
+ 0.12370114490389822,
3003
+ 0.11323311634361738,
3004
+ 0.18474749639630164,
3005
+ 1.0
3006
+ ],
3007
+ "std": [
3008
+ 0.008022183552384377,
3009
+ 0.009131456725299358,
3010
+ 0.00957438349723816,
3011
+ 0.04122224077582359,
3012
+ 0.03843001648783684,
3013
+ 0.046067025512456894,
3014
+ 0.49978113174438477
3015
+ ]
3016
+ },
3017
+ "num_trajectories": 570,
3018
+ "num_transitions": 358234,
3019
+ "proprio": {
3020
+ "max": [
3021
+ 0.0,
3022
+ 0.0,
3023
+ 0.0,
3024
+ 0.0,
3025
+ 0.0,
3026
+ 0.0,
3027
+ 0.0
3028
+ ],
3029
+ "mean": [
3030
+ 0.0,
3031
+ 0.0,
3032
+ 0.0,
3033
+ 0.0,
3034
+ 0.0,
3035
+ 0.0,
3036
+ 0.0
3037
+ ],
3038
+ "min": [
3039
+ 0.0,
3040
+ 0.0,
3041
+ 0.0,
3042
+ 0.0,
3043
+ 0.0,
3044
+ 0.0,
3045
+ 0.0
3046
+ ],
3047
+ "q01": [
3048
+ 0.0,
3049
+ 0.0,
3050
+ 0.0,
3051
+ 0.0,
3052
+ 0.0,
3053
+ 0.0,
3054
+ 0.0
3055
+ ],
3056
+ "q99": [
3057
+ 0.0,
3058
+ 0.0,
3059
+ 0.0,
3060
+ 0.0,
3061
+ 0.0,
3062
+ 0.0,
3063
+ 0.0
3064
+ ],
3065
+ "std": [
3066
+ 0.0,
3067
+ 0.0,
3068
+ 0.0,
3069
+ 0.0,
3070
+ 0.0,
3071
+ 0.0,
3072
+ 0.0
3073
+ ]
3074
+ }
3075
+ },
3076
+ "taco_play/0.1.0": {
3077
+ "action": {
3078
+ "mask": [
3079
+ true,
3080
+ true,
3081
+ true,
3082
+ true,
3083
+ true,
3084
+ true,
3085
+ false
3086
+ ],
3087
+ "max": [
3088
+ 1.4915844202041626,
3089
+ 2.1842432022094727,
3090
+ 2.6836395263671875,
3091
+ 5.035226821899414,
3092
+ 2.665864944458008,
3093
+ 4.250768661499023,
3094
+ 1.0
3095
+ ],
3096
+ "mean": [
3097
+ -0.0038459226489067078,
3098
+ 0.009671436622738838,
3099
+ 0.01278059184551239,
3100
+ -0.0054037850350141525,
3101
+ -0.009606562554836273,
3102
+ -0.0024807206355035305,
3103
+ 0.4263913035392761
3104
+ ],
3105
+ "min": [
3106
+ -4.242457866668701,
3107
+ -3.192805051803589,
3108
+ -1.3371467590332031,
3109
+ -4.202683448791504,
3110
+ -2.6722638607025146,
3111
+ -3.3467135429382324,
3112
+ 0.0
3113
+ ],
3114
+ "q01": [
3115
+ -0.7106140398979186,
3116
+ -1.056944659948349,
3117
+ -0.5878450274467468,
3118
+ -0.7682853937149048,
3119
+ -0.7180147767066956,
3120
+ -1.5527938604354858,
3121
+ 0.0
3122
+ ],
3123
+ "q99": [
3124
+ 0.6482916426658629,
3125
+ 1.0051310062408447,
3126
+ 0.9480248689651489,
3127
+ 0.6926478147506714,
3128
+ 0.6351067513227462,
3129
+ 1.628010264635086,
3130
+ 1.0
3131
+ ],
3132
+ "std": [
3133
+ 0.23254045844078064,
3134
+ 0.3629826307296753,
3135
+ 0.2869291603565216,
3136
+ 0.261770635843277,
3137
+ 0.24388927221298218,
3138
+ 0.5216501355171204,
3139
+ 0.49469029903411865
3140
+ ]
3141
+ },
3142
+ "num_trajectories": 3603,
3143
+ "num_transitions": 237798,
3144
+ "proprio": {
3145
+ "max": [
3146
+ 0.0,
3147
+ 0.0,
3148
+ 0.0,
3149
+ 0.0,
3150
+ 0.0,
3151
+ 0.0,
3152
+ 0.0
3153
+ ],
3154
+ "mean": [
3155
+ 0.0,
3156
+ 0.0,
3157
+ 0.0,
3158
+ 0.0,
3159
+ 0.0,
3160
+ 0.0,
3161
+ 0.0
3162
+ ],
3163
+ "min": [
3164
+ 0.0,
3165
+ 0.0,
3166
+ 0.0,
3167
+ 0.0,
3168
+ 0.0,
3169
+ 0.0,
3170
+ 0.0
3171
+ ],
3172
+ "q01": [
3173
+ 0.0,
3174
+ 0.0,
3175
+ 0.0,
3176
+ 0.0,
3177
+ 0.0,
3178
+ 0.0,
3179
+ 0.0
3180
+ ],
3181
+ "q99": [
3182
+ 0.0,
3183
+ 0.0,
3184
+ 0.0,
3185
+ 0.0,
3186
+ 0.0,
3187
+ 0.0,
3188
+ 0.0
3189
+ ],
3190
+ "std": [
3191
+ 0.0,
3192
+ 0.0,
3193
+ 0.0,
3194
+ 0.0,
3195
+ 0.0,
3196
+ 0.0,
3197
+ 0.0
3198
+ ]
3199
+ }
3200
+ },
3201
+ "toto/0.1.0": {
3202
+ "action": {
3203
+ "mask": [
3204
+ true,
3205
+ true,
3206
+ true,
3207
+ true,
3208
+ true,
3209
+ true,
3210
+ false
3211
+ ],
3212
+ "max": [
3213
+ 0.6839867234230042,
3214
+ 0.4454185664653778,
3215
+ 0.7984078526496887,
3216
+ 2.120781660079956,
3217
+ 1.371164321899414,
3218
+ 1.4118704795837402,
3219
+ 0.0
3220
+ ],
3221
+ "mean": [
3222
+ 0.3854214549064636,
3223
+ 0.007769507821649313,
3224
+ 0.3632742166519165,
3225
+ -0.665202796459198,
3226
+ 0.1890396624803543,
3227
+ 0.0329875648021698,
3228
+ 0.0
3229
+ ],
3230
+ "min": [
3231
+ 0.09922284632921219,
3232
+ -0.5180193781852722,
3233
+ 0.13791072368621826,
3234
+ -2.635117530822754,
3235
+ -1.0734480619430542,
3236
+ -1.9282547235488892,
3237
+ 0.0
3238
+ ],
3239
+ "q01": [
3240
+ 0.1756722891330719,
3241
+ -0.3077590811252594,
3242
+ 0.235383919775486,
3243
+ -2.0908505964279174,
3244
+ -0.6191593289375306,
3245
+ -0.7488683319091797,
3246
+ 0.0
3247
+ ],
3248
+ "q99": [
3249
+ 0.6136963081359863,
3250
+ 0.33704194784164443,
3251
+ 0.6681221985816956,
3252
+ 0.7422861719131538,
3253
+ 0.7955395007133507,
3254
+ 0.740464625358582,
3255
+ 0.0
3256
+ ],
3257
+ "std": [
3258
+ 0.12211630493402481,
3259
+ 0.19378569722175598,
3260
+ 0.10178232192993164,
3261
+ 0.5725256204605103,
3262
+ 0.298846036195755,
3263
+ 0.32599160075187683,
3264
+ 0.0
3265
+ ]
3266
+ },
3267
+ "num_trajectories": 1003,
3268
+ "num_transitions": 325699,
3269
+ "proprio": {
3270
+ "max": [
3271
+ 0.0,
3272
+ 0.0,
3273
+ 0.0,
3274
+ 0.0,
3275
+ 0.0,
3276
+ 0.0,
3277
+ 0.0
3278
+ ],
3279
+ "mean": [
3280
+ 0.0,
3281
+ 0.0,
3282
+ 0.0,
3283
+ 0.0,
3284
+ 0.0,
3285
+ 0.0,
3286
+ 0.0
3287
+ ],
3288
+ "min": [
3289
+ 0.0,
3290
+ 0.0,
3291
+ 0.0,
3292
+ 0.0,
3293
+ 0.0,
3294
+ 0.0,
3295
+ 0.0
3296
+ ],
3297
+ "q01": [
3298
+ 0.0,
3299
+ 0.0,
3300
+ 0.0,
3301
+ 0.0,
3302
+ 0.0,
3303
+ 0.0,
3304
+ 0.0
3305
+ ],
3306
+ "q99": [
3307
+ 0.0,
3308
+ 0.0,
3309
+ 0.0,
3310
+ 0.0,
3311
+ 0.0,
3312
+ 0.0,
3313
+ 0.0
3314
+ ],
3315
+ "std": [
3316
+ 0.0,
3317
+ 0.0,
3318
+ 0.0,
3319
+ 0.0,
3320
+ 0.0,
3321
+ 0.0,
3322
+ 0.0
3323
+ ]
3324
+ }
3325
+ },
3326
+ "ucsd_kitchen_dataset_converted_externally_to_rlds/0.1.0": {
3327
+ "action": {
3328
+ "mask": [
3329
+ true,
3330
+ true,
3331
+ true,
3332
+ true,
3333
+ true,
3334
+ true,
3335
+ false
3336
+ ],
3337
+ "max": [
3338
+ 678.0,
3339
+ 400.0,
3340
+ 507.0,
3341
+ 180.00001525878906,
3342
+ 6.000013828277588,
3343
+ 116.99998474121094,
3344
+ 1.0
3345
+ ],
3346
+ "mean": [
3347
+ 410.375732421875,
3348
+ 116.9518814086914,
3349
+ 192.35031127929688,
3350
+ -121.22441864013672,
3351
+ -33.84892654418945,
3352
+ 50.016136169433594,
3353
+ 0.741813600063324
3354
+ ],
3355
+ "min": [
3356
+ 172.0,
3357
+ -166.0,
3358
+ -99.99999237060547,
3359
+ -180.00001525878906,
3360
+ -89.0,
3361
+ -96.00010681152344,
3362
+ 0.0
3363
+ ],
3364
+ "q01": [
3365
+ 200.00001052856445,
3366
+ -102.31004211425781,
3367
+ -94.99993370056153,
3368
+ -180.00001525878906,
3369
+ -88.00001525878906,
3370
+ -38.999977111816406,
3371
+ 0.0
3372
+ ],
3373
+ "q99": [
3374
+ 637.0,
3375
+ 368.30999999999995,
3376
+ 493.0,
3377
+ 180.00001525878906,
3378
+ 0.999983012676239,
3379
+ 105.00001525878906,
3380
+ 1.0
3381
+ ],
3382
+ "std": [
3383
+ 122.81488037109375,
3384
+ 108.80094909667969,
3385
+ 130.30345153808594,
3386
+ 116.2820053100586,
3387
+ 27.62191390991211,
3388
+ 41.02091979980469,
3389
+ 0.4376337230205536
3390
+ ]
3391
+ },
3392
+ "num_trajectories": 150,
3393
+ "num_transitions": 3970,
3394
+ "proprio": {
3395
+ "max": [
3396
+ 0.0,
3397
+ 0.0,
3398
+ 0.0,
3399
+ 0.0,
3400
+ 0.0,
3401
+ 0.0,
3402
+ 0.0
3403
+ ],
3404
+ "mean": [
3405
+ 0.0,
3406
+ 0.0,
3407
+ 0.0,
3408
+ 0.0,
3409
+ 0.0,
3410
+ 0.0,
3411
+ 0.0
3412
+ ],
3413
+ "min": [
3414
+ 0.0,
3415
+ 0.0,
3416
+ 0.0,
3417
+ 0.0,
3418
+ 0.0,
3419
+ 0.0,
3420
+ 0.0
3421
+ ],
3422
+ "q01": [
3423
+ 0.0,
3424
+ 0.0,
3425
+ 0.0,
3426
+ 0.0,
3427
+ 0.0,
3428
+ 0.0,
3429
+ 0.0
3430
+ ],
3431
+ "q99": [
3432
+ 0.0,
3433
+ 0.0,
3434
+ 0.0,
3435
+ 0.0,
3436
+ 0.0,
3437
+ 0.0,
3438
+ 0.0
3439
+ ],
3440
+ "std": [
3441
+ 0.0,
3442
+ 0.0,
3443
+ 0.0,
3444
+ 0.0,
3445
+ 0.0,
3446
+ 0.0,
3447
+ 0.0
3448
+ ]
3449
+ }
3450
+ },
3451
+ "utaustin_mutex/0.1.0": {
3452
+ "action": {
3453
+ "mask": [
3454
+ true,
3455
+ true,
3456
+ true,
3457
+ true,
3458
+ true,
3459
+ true,
3460
+ false
3461
+ ],
3462
+ "max": [
3463
+ 1.0,
3464
+ 1.0,
3465
+ 1.0,
3466
+ 0.375,
3467
+ 0.375,
3468
+ 0.375,
3469
+ 1.0
3470
+ ],
3471
+ "mean": [
3472
+ 0.06176406517624855,
3473
+ -0.005005490034818649,
3474
+ 0.10216782987117767,
3475
+ -0.03314131125807762,
3476
+ 0.013895022682845592,
3477
+ -0.011317633092403412,
3478
+ 0.5038976669311523
3479
+ ],
3480
+ "min": [
3481
+ -1.0,
3482
+ -1.0,
3483
+ -1.0,
3484
+ -0.375,
3485
+ -0.375,
3486
+ -0.375,
3487
+ 0.0
3488
+ ],
3489
+ "q01": [
3490
+ -0.4285714328289032,
3491
+ -0.9800000190734863,
3492
+ -0.5571428537368774,
3493
+ -0.375,
3494
+ -0.15642857551574707,
3495
+ -0.335357129573822,
3496
+ 0.0
3497
+ ],
3498
+ "q99": [
3499
+ 0.5914285778999329,
3500
+ 0.9714285731315613,
3501
+ 1.0,
3502
+ 0.3278571367263794,
3503
+ 0.207857146859169,
3504
+ 0.25607141852378845,
3505
+ 1.0
3506
+ ],
3507
+ "std": [
3508
+ 0.187501460313797,
3509
+ 0.4468473196029663,
3510
+ 0.3792876601219177,
3511
+ 0.14097853004932404,
3512
+ 0.06453699618577957,
3513
+ 0.11765265464782715,
3514
+ 0.501045286655426
3515
+ ]
3516
+ },
3517
+ "num_trajectories": 1500,
3518
+ "num_transitions": 361883,
3519
+ "proprio": {
3520
+ "max": [
3521
+ 0.0,
3522
+ 0.0,
3523
+ 0.0,
3524
+ 0.0,
3525
+ 0.0,
3526
+ 0.0,
3527
+ 0.0
3528
+ ],
3529
+ "mean": [
3530
+ 0.0,
3531
+ 0.0,
3532
+ 0.0,
3533
+ 0.0,
3534
+ 0.0,
3535
+ 0.0,
3536
+ 0.0
3537
+ ],
3538
+ "min": [
3539
+ 0.0,
3540
+ 0.0,
3541
+ 0.0,
3542
+ 0.0,
3543
+ 0.0,
3544
+ 0.0,
3545
+ 0.0
3546
+ ],
3547
+ "q01": [
3548
+ 0.0,
3549
+ 0.0,
3550
+ 0.0,
3551
+ 0.0,
3552
+ 0.0,
3553
+ 0.0,
3554
+ 0.0
3555
+ ],
3556
+ "q99": [
3557
+ 0.0,
3558
+ 0.0,
3559
+ 0.0,
3560
+ 0.0,
3561
+ 0.0,
3562
+ 0.0,
3563
+ 0.0
3564
+ ],
3565
+ "std": [
3566
+ 0.0,
3567
+ 0.0,
3568
+ 0.0,
3569
+ 0.0,
3570
+ 0.0,
3571
+ 0.0,
3572
+ 0.0
3573
+ ]
3574
+ }
3575
+ },
3576
+ "viola/0.1.0": {
3577
+ "action": {
3578
+ "mask": [
3579
+ true,
3580
+ true,
3581
+ true,
3582
+ true,
3583
+ true,
3584
+ true,
3585
+ false
3586
+ ],
3587
+ "max": [
3588
+ 1.0,
3589
+ 1.0,
3590
+ 1.0,
3591
+ 0.375,
3592
+ 0.36321428418159485,
3593
+ 0.375,
3594
+ 1.0
3595
+ ],
3596
+ "mean": [
3597
+ 0.04761853069067001,
3598
+ -0.029204534366726875,
3599
+ 0.055867329239845276,
3600
+ -0.0026185200549662113,
3601
+ 0.006867341697216034,
3602
+ -0.016821356490254402,
3603
+ 0.7323777675628662
3604
+ ],
3605
+ "min": [
3606
+ -1.0,
3607
+ -1.0,
3608
+ -1.0,
3609
+ -0.375,
3610
+ -0.375,
3611
+ -0.375,
3612
+ 0.0
3613
+ ],
3614
+ "q01": [
3615
+ -0.9628571271896362,
3616
+ -1.0,
3617
+ -1.0,
3618
+ -0.26249998807907104,
3619
+ -0.21321429312229156,
3620
+ -0.3385714292526245,
3621
+ 0.0
3622
+ ],
3623
+ "q99": [
3624
+ 0.9114285707473755,
3625
+ 0.868571400642395,
3626
+ 1.0,
3627
+ 0.2817857265472412,
3628
+ 0.2239285707473755,
3629
+ 0.3557142913341522,
3630
+ 1.0
3631
+ ],
3632
+ "std": [
3633
+ 0.39157867431640625,
3634
+ 0.40765219926834106,
3635
+ 0.40077903866767883,
3636
+ 0.10023998469114304,
3637
+ 0.08443189412355423,
3638
+ 0.10375089943408966,
3639
+ 0.442600816488266
3640
+ ]
3641
+ },
3642
+ "num_trajectories": 150,
3643
+ "num_transitions": 76324,
3644
+ "proprio": {
3645
+ "max": [
3646
+ 0.0,
3647
+ 0.0,
3648
+ 0.0,
3649
+ 0.0,
3650
+ 0.0,
3651
+ 0.0,
3652
+ 0.0
3653
+ ],
3654
+ "mean": [
3655
+ 0.0,
3656
+ 0.0,
3657
+ 0.0,
3658
+ 0.0,
3659
+ 0.0,
3660
+ 0.0,
3661
+ 0.0
3662
+ ],
3663
+ "min": [
3664
+ 0.0,
3665
+ 0.0,
3666
+ 0.0,
3667
+ 0.0,
3668
+ 0.0,
3669
+ 0.0,
3670
+ 0.0
3671
+ ],
3672
+ "q01": [
3673
+ 0.0,
3674
+ 0.0,
3675
+ 0.0,
3676
+ 0.0,
3677
+ 0.0,
3678
+ 0.0,
3679
+ 0.0
3680
+ ],
3681
+ "q99": [
3682
+ 0.0,
3683
+ 0.0,
3684
+ 0.0,
3685
+ 0.0,
3686
+ 0.0,
3687
+ 0.0,
3688
+ 0.0
3689
+ ],
3690
+ "std": [
3691
+ 0.0,
3692
+ 0.0,
3693
+ 0.0,
3694
+ 0.0,
3695
+ 0.0,
3696
+ 0.0,
3697
+ 0.0
3698
+ ]
3699
+ }
3700
+ }
3701
+ }
3702
+ }
rng_state_0.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9ab9c2b7a17efd4a6eedea33b5eea0b32f48cbe9c26b998a6a82a08f10697635
3
+ size 14512
rng_state_1.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6ad7d8445a28d67eb25f5b8d17d5ff361073ab4b4e4160a075c272bf726fa920
3
+ size 14512
special_tokens_map.json ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ {
4
+ "content": "<image>",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false
9
+ }
10
+ ],
11
+ "bos_token": {
12
+ "content": "<bos>",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false
17
+ },
18
+ "eos_token": {
19
+ "content": "<eos>",
20
+ "lstrip": false,
21
+ "normalized": false,
22
+ "rstrip": false,
23
+ "single_word": false
24
+ },
25
+ "pad_token": {
26
+ "content": "<pad>",
27
+ "lstrip": false,
28
+ "normalized": false,
29
+ "rstrip": false,
30
+ "single_word": false
31
+ },
32
+ "unk_token": {
33
+ "content": "<unk>",
34
+ "lstrip": false,
35
+ "normalized": false,
36
+ "rstrip": false,
37
+ "single_word": false
38
+ }
39
+ }
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2523a63c898ebf0a32c7282a2e459ef2c950a846c5f3172305089e4149b6b6c3
3
+ size 36157680
tokenizer_config.json ADDED
The diff for this file is too large to render. See raw diff
 
trainer_state.json ADDED
The diff for this file is too large to render. See raw diff
 
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:181d7e28194cd0b3534962a7986d190c47b3a243caeaecb4f668abb2628a9ce5
3
+ size 7544
zero_to_fp32.py ADDED
@@ -0,0 +1,674 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright (c) Microsoft Corporation.
4
+ # SPDX-License-Identifier: Apache-2.0
5
+
6
+ # DeepSpeed Team
7
+
8
+ # This script extracts fp32 consolidated weights from a zero 1, 2 and 3 DeepSpeed checkpoints. It gets
9
+ # copied into the top level checkpoint dir, so the user can easily do the conversion at any point in
10
+ # the future. Once extracted, the weights don't require DeepSpeed and can be used in any
11
+ # application.
12
+ #
13
+ # example:
14
+ # python zero_to_fp32.py . output_dir/
15
+ # or
16
+ # python zero_to_fp32.py . output_dir/ --safe_serialization
17
+
18
+ import argparse
19
+ import torch
20
+ import glob
21
+ import math
22
+ import os
23
+ import re
24
+ import json
25
+ from tqdm import tqdm
26
+ from collections import OrderedDict
27
+ from dataclasses import dataclass
28
+
29
+ # while this script doesn't use deepspeed to recover data, since the checkpoints are pickled with
30
+ # DeepSpeed data structures it has to be available in the current python environment.
31
+ from deepspeed.utils import logger
32
+ from deepspeed.checkpoint.constants import (DS_VERSION, OPTIMIZER_STATE_DICT, SINGLE_PARTITION_OF_FP32_GROUPS,
33
+ FP32_FLAT_GROUPS, ZERO_STAGE, PARTITION_COUNT, PARAM_SHAPES, BUFFER_NAMES,
34
+ FROZEN_PARAM_SHAPES, FROZEN_PARAM_FRAGMENTS)
35
+
36
+
37
+ @dataclass
38
+ class zero_model_state:
39
+ buffers: dict()
40
+ param_shapes: dict()
41
+ shared_params: list
42
+ ds_version: int
43
+ frozen_param_shapes: dict()
44
+ frozen_param_fragments: dict()
45
+
46
+
47
+ debug = 0
48
+
49
+ # load to cpu
50
+ device = torch.device('cpu')
51
+
52
+
53
+ def atoi(text):
54
+ return int(text) if text.isdigit() else text
55
+
56
+
57
+ def natural_keys(text):
58
+ '''
59
+ alist.sort(key=natural_keys) sorts in human order
60
+ http://nedbatchelder.com/blog/200712/human_sorting.html
61
+ (See Toothy's implementation in the comments)
62
+ '''
63
+ return [atoi(c) for c in re.split(r'(\d+)', text)]
64
+
65
+
66
+ def get_model_state_file(checkpoint_dir, zero_stage):
67
+ if not os.path.isdir(checkpoint_dir):
68
+ raise FileNotFoundError(f"Directory '{checkpoint_dir}' doesn't exist")
69
+
70
+ # there should be only one file
71
+ if zero_stage <= 2:
72
+ file = os.path.join(checkpoint_dir, "mp_rank_00_model_states.pt")
73
+ elif zero_stage == 3:
74
+ file = os.path.join(checkpoint_dir, "zero_pp_rank_0_mp_rank_00_model_states.pt")
75
+
76
+ if not os.path.exists(file):
77
+ raise FileNotFoundError(f"can't find model states file at '{file}'")
78
+
79
+ return file
80
+
81
+
82
+ def get_checkpoint_files(checkpoint_dir, glob_pattern):
83
+ # XXX: need to test that this simple glob rule works for multi-node setup too
84
+ ckpt_files = sorted(glob.glob(os.path.join(checkpoint_dir, glob_pattern)), key=natural_keys)
85
+
86
+ if len(ckpt_files) == 0:
87
+ raise FileNotFoundError(f"can't find {glob_pattern} files in directory '{checkpoint_dir}'")
88
+
89
+ return ckpt_files
90
+
91
+
92
+ def get_optim_files(checkpoint_dir):
93
+ return get_checkpoint_files(checkpoint_dir, "*_optim_states.pt")
94
+
95
+
96
+ def get_model_state_files(checkpoint_dir):
97
+ return get_checkpoint_files(checkpoint_dir, "*_model_states.pt")
98
+
99
+
100
+ def parse_model_states(files):
101
+ zero_model_states = []
102
+ for file in files:
103
+ state_dict = torch.load(file, map_location=device)
104
+
105
+ if BUFFER_NAMES not in state_dict:
106
+ raise ValueError(f"{file} is not a model state checkpoint")
107
+ buffer_names = state_dict[BUFFER_NAMES]
108
+ if debug:
109
+ print("Found buffers:", buffer_names)
110
+
111
+ # recover just the buffers while restoring them to fp32 if they were saved in fp16
112
+ buffers = {k: v.float() for k, v in state_dict["module"].items() if k in buffer_names}
113
+ param_shapes = state_dict[PARAM_SHAPES]
114
+
115
+ # collect parameters that are included in param_shapes
116
+ param_names = []
117
+ for s in param_shapes:
118
+ for name in s.keys():
119
+ param_names.append(name)
120
+
121
+ # update with frozen parameters
122
+ frozen_param_shapes = state_dict.get(FROZEN_PARAM_SHAPES, None)
123
+ if frozen_param_shapes is not None:
124
+ if debug:
125
+ print(f"Found frozen_param_shapes: {frozen_param_shapes}")
126
+ param_names += list(frozen_param_shapes.keys())
127
+
128
+ # handle shared params
129
+ shared_params = [[k, v] for k, v in state_dict["shared_params"].items()]
130
+
131
+ ds_version = state_dict.get(DS_VERSION, None)
132
+
133
+ frozen_param_fragments = state_dict.get(FROZEN_PARAM_FRAGMENTS, None)
134
+
135
+ z_model_state = zero_model_state(buffers=buffers,
136
+ param_shapes=param_shapes,
137
+ shared_params=shared_params,
138
+ ds_version=ds_version,
139
+ frozen_param_shapes=frozen_param_shapes,
140
+ frozen_param_fragments=frozen_param_fragments)
141
+ zero_model_states.append(z_model_state)
142
+
143
+ return zero_model_states
144
+
145
+
146
+ def parse_optim_states(files, ds_checkpoint_dir):
147
+ total_files = len(files)
148
+ state_dicts = []
149
+ for f in files:
150
+ state_dict = torch.load(f, map_location=device)
151
+ # immediately discard the potentially huge 2 optimizer states as we only care for fp32 master weights
152
+ # and also handle the case where it was already removed by another helper script
153
+ state_dict["optimizer_state_dict"].pop("optimizer_state_dict", None)
154
+ state_dicts.append(state_dict)
155
+
156
+ if not ZERO_STAGE in state_dicts[0][OPTIMIZER_STATE_DICT]:
157
+ raise ValueError(f"{files[0]} is not a zero checkpoint")
158
+ zero_stage = state_dicts[0][OPTIMIZER_STATE_DICT][ZERO_STAGE]
159
+ world_size = state_dicts[0][OPTIMIZER_STATE_DICT][PARTITION_COUNT]
160
+
161
+ # For ZeRO-2 each param group can have different partition_count as data parallelism for expert
162
+ # parameters can be different from data parallelism for non-expert parameters. So we can just
163
+ # use the max of the partition_count to get the dp world_size.
164
+
165
+ if type(world_size) is list:
166
+ world_size = max(world_size)
167
+
168
+ if world_size != total_files:
169
+ raise ValueError(
170
+ f"Expected {world_size} of '*_optim_states.pt' under '{ds_checkpoint_dir}' but found {total_files} files. "
171
+ "Possibly due to an overwrite of an old checkpoint, or a checkpoint didn't get saved by one or more processes."
172
+ )
173
+
174
+ # the groups are named differently in each stage
175
+ if zero_stage <= 2:
176
+ fp32_groups_key = SINGLE_PARTITION_OF_FP32_GROUPS
177
+ elif zero_stage == 3:
178
+ fp32_groups_key = FP32_FLAT_GROUPS
179
+ else:
180
+ raise ValueError(f"unknown zero stage {zero_stage}")
181
+
182
+ if zero_stage <= 2:
183
+ fp32_flat_groups = [state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key] for i in range(len(state_dicts))]
184
+ elif zero_stage == 3:
185
+ # if there is more than one param group, there will be multiple flattened tensors - one
186
+ # flattened tensor per group - for simplicity merge them into a single tensor
187
+ #
188
+ # XXX: could make the script more memory efficient for when there are multiple groups - it
189
+ # will require matching the sub-lists of param_shapes for each param group flattened tensor
190
+
191
+ fp32_flat_groups = [
192
+ torch.cat(state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key], 0) for i in range(len(state_dicts))
193
+ ]
194
+
195
+ return zero_stage, world_size, fp32_flat_groups
196
+
197
+
198
+ def _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters):
199
+ """
200
+ Returns fp32 state_dict reconstructed from ds checkpoint
201
+
202
+ Args:
203
+ - ``ds_checkpoint_dir``: path to the deepspeed checkpoint folder (where the optimizer files are)
204
+
205
+ """
206
+ print(f"Processing zero checkpoint '{ds_checkpoint_dir}'")
207
+
208
+ optim_files = get_optim_files(ds_checkpoint_dir)
209
+ zero_stage, world_size, fp32_flat_groups = parse_optim_states(optim_files, ds_checkpoint_dir)
210
+ print(f"Detected checkpoint of type zero stage {zero_stage}, world_size: {world_size}")
211
+
212
+ model_files = get_model_state_files(ds_checkpoint_dir)
213
+
214
+ zero_model_states = parse_model_states(model_files)
215
+ print(f'Parsing checkpoint created by deepspeed=={zero_model_states[0].ds_version}')
216
+
217
+ if zero_stage <= 2:
218
+ return _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states,
219
+ exclude_frozen_parameters)
220
+ elif zero_stage == 3:
221
+ return _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states,
222
+ exclude_frozen_parameters)
223
+
224
+
225
+ def _zero2_merge_frozen_params(state_dict, zero_model_states):
226
+ if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
227
+ return
228
+
229
+ frozen_param_shapes = zero_model_states[0].frozen_param_shapes
230
+ frozen_param_fragments = zero_model_states[0].frozen_param_fragments
231
+
232
+ if debug:
233
+ num_elem = sum(s.numel() for s in frozen_param_shapes.values())
234
+ print(f'rank 0: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
235
+
236
+ wanted_params = len(frozen_param_shapes)
237
+ wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
238
+ avail_numel = sum([p.numel() for p in frozen_param_fragments.values()])
239
+ print(f'Frozen params: Have {avail_numel} numels to process.')
240
+ print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
241
+
242
+ total_params = 0
243
+ total_numel = 0
244
+ for name, shape in frozen_param_shapes.items():
245
+ total_params += 1
246
+ unpartitioned_numel = shape.numel()
247
+ total_numel += unpartitioned_numel
248
+
249
+ state_dict[name] = frozen_param_fragments[name]
250
+
251
+ if debug:
252
+ print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
253
+
254
+ print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
255
+
256
+
257
+ def _has_callable(obj, fn):
258
+ attr = getattr(obj, fn, None)
259
+ return callable(attr)
260
+
261
+
262
+ def _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
263
+ param_shapes = zero_model_states[0].param_shapes
264
+
265
+ # Reconstruction protocol:
266
+ #
267
+ # XXX: document this
268
+
269
+ if debug:
270
+ for i in range(world_size):
271
+ for j in range(len(fp32_flat_groups[0])):
272
+ print(f"{FP32_FLAT_GROUPS}[{i}][{j}].shape={fp32_flat_groups[i][j].shape}")
273
+
274
+ # XXX: memory usage doubles here (zero2)
275
+ num_param_groups = len(fp32_flat_groups[0])
276
+ merged_single_partition_of_fp32_groups = []
277
+ for i in range(num_param_groups):
278
+ merged_partitions = [sd[i] for sd in fp32_flat_groups]
279
+ full_single_fp32_vector = torch.cat(merged_partitions, 0)
280
+ merged_single_partition_of_fp32_groups.append(full_single_fp32_vector)
281
+ avail_numel = sum(
282
+ [full_single_fp32_vector.numel() for full_single_fp32_vector in merged_single_partition_of_fp32_groups])
283
+
284
+ if debug:
285
+ wanted_params = sum([len(shapes) for shapes in param_shapes])
286
+ wanted_numel = sum([sum(shape.numel() for shape in shapes.values()) for shapes in param_shapes])
287
+ # not asserting if there is a mismatch due to possible padding
288
+ print(f"Have {avail_numel} numels to process.")
289
+ print(f"Need {wanted_numel} numels in {wanted_params} params.")
290
+
291
+ # params
292
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
293
+ # out-of-core computing solution
294
+ total_numel = 0
295
+ total_params = 0
296
+ for shapes, full_single_fp32_vector in zip(param_shapes, merged_single_partition_of_fp32_groups):
297
+ offset = 0
298
+ avail_numel = full_single_fp32_vector.numel()
299
+ for name, shape in shapes.items():
300
+
301
+ unpartitioned_numel = shape.numel() if _has_callable(shape, 'numel') else math.prod(shape)
302
+ total_numel += unpartitioned_numel
303
+ total_params += 1
304
+
305
+ if debug:
306
+ print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
307
+ state_dict[name] = full_single_fp32_vector.narrow(0, offset, unpartitioned_numel).view(shape)
308
+ offset += unpartitioned_numel
309
+
310
+ # Z2 started to align to 2*world_size to improve nccl performance. Therefore both offset and
311
+ # avail_numel can differ by anywhere between 0..2*world_size. Due to two unrelated complex
312
+ # paddings performed in the code it's almost impossible to predict the exact numbers w/o the
313
+ # live optimizer object, so we are checking that the numbers are within the right range
314
+ align_to = 2 * world_size
315
+
316
+ def zero2_align(x):
317
+ return align_to * math.ceil(x / align_to)
318
+
319
+ if debug:
320
+ print(f"original offset={offset}, avail_numel={avail_numel}")
321
+
322
+ offset = zero2_align(offset)
323
+ avail_numel = zero2_align(avail_numel)
324
+
325
+ if debug:
326
+ print(f"aligned offset={offset}, avail_numel={avail_numel}")
327
+
328
+ # Sanity check
329
+ if offset != avail_numel:
330
+ raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
331
+
332
+ print(f"Reconstructed fp32 state dict with {total_params} params {total_numel} elements")
333
+
334
+
335
+ def _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states,
336
+ exclude_frozen_parameters):
337
+ state_dict = OrderedDict()
338
+
339
+ # buffers
340
+ buffers = zero_model_states[0].buffers
341
+ state_dict.update(buffers)
342
+ if debug:
343
+ print(f"added {len(buffers)} buffers")
344
+
345
+ if not exclude_frozen_parameters:
346
+ _zero2_merge_frozen_params(state_dict, zero_model_states)
347
+
348
+ _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
349
+
350
+ # recover shared parameters
351
+ for pair in zero_model_states[0].shared_params:
352
+ if pair[1] in state_dict:
353
+ state_dict[pair[0]] = state_dict[pair[1]]
354
+
355
+ return state_dict
356
+
357
+
358
+ def zero3_partitioned_param_info(unpartitioned_numel, world_size):
359
+ remainder = unpartitioned_numel % world_size
360
+ padding_numel = (world_size - remainder) if remainder else 0
361
+ partitioned_numel = math.ceil(unpartitioned_numel / world_size)
362
+ return partitioned_numel, padding_numel
363
+
364
+
365
+ def _zero3_merge_frozen_params(state_dict, world_size, zero_model_states):
366
+ if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
367
+ return
368
+
369
+ if debug:
370
+ for i in range(world_size):
371
+ num_elem = sum(s.numel() for s in zero_model_states[i].frozen_param_fragments.values())
372
+ print(f'rank {i}: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
373
+
374
+ frozen_param_shapes = zero_model_states[0].frozen_param_shapes
375
+ wanted_params = len(frozen_param_shapes)
376
+ wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
377
+ avail_numel = sum([p.numel() for p in zero_model_states[0].frozen_param_fragments.values()]) * world_size
378
+ print(f'Frozen params: Have {avail_numel} numels to process.')
379
+ print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
380
+
381
+ total_params = 0
382
+ total_numel = 0
383
+ for name, shape in zero_model_states[0].frozen_param_shapes.items():
384
+ total_params += 1
385
+ unpartitioned_numel = shape.numel()
386
+ total_numel += unpartitioned_numel
387
+
388
+ param_frags = tuple(model_state.frozen_param_fragments[name] for model_state in zero_model_states)
389
+ state_dict[name] = torch.cat(param_frags, 0).narrow(0, 0, unpartitioned_numel).view(shape)
390
+
391
+ partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
392
+
393
+ if debug:
394
+ print(
395
+ f"Frozen params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
396
+ )
397
+
398
+ print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
399
+
400
+
401
+ def _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
402
+ param_shapes = zero_model_states[0].param_shapes
403
+ avail_numel = fp32_flat_groups[0].numel() * world_size
404
+ # Reconstruction protocol: For zero3 we need to zip the partitions together at boundary of each
405
+ # param, re-consolidating each param, while dealing with padding if any
406
+
407
+ # merge list of dicts, preserving order
408
+ param_shapes = {k: v for d in param_shapes for k, v in d.items()}
409
+
410
+ if debug:
411
+ for i in range(world_size):
412
+ print(f"{FP32_FLAT_GROUPS}[{i}].shape={fp32_flat_groups[i].shape}")
413
+
414
+ wanted_params = len(param_shapes)
415
+ wanted_numel = sum(shape.numel() for shape in param_shapes.values())
416
+ # not asserting if there is a mismatch due to possible padding
417
+ avail_numel = fp32_flat_groups[0].numel() * world_size
418
+ print(f"Trainable params: Have {avail_numel} numels to process.")
419
+ print(f"Trainable params: Need {wanted_numel} numels in {wanted_params} params.")
420
+
421
+ # params
422
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
423
+ # out-of-core computing solution
424
+ offset = 0
425
+ total_numel = 0
426
+ total_params = 0
427
+ for name, shape in tqdm(param_shapes.items(), desc='Gathering Sharded Weights'):
428
+ unpartitioned_numel = shape.numel()
429
+ total_numel += unpartitioned_numel
430
+ total_params += 1
431
+ partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
432
+
433
+ if debug:
434
+ print(
435
+ f"Trainable params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
436
+ )
437
+
438
+ # XXX: memory usage doubles here
439
+ state_dict[name] = torch.cat(
440
+ tuple(fp32_flat_groups[i].narrow(0, offset, partitioned_numel) for i in range(world_size)),
441
+ 0).narrow(0, 0, unpartitioned_numel).view(shape)
442
+ offset += partitioned_numel
443
+
444
+ offset *= world_size
445
+
446
+ # Sanity check
447
+ if offset != avail_numel:
448
+ raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
449
+
450
+ print(f"Reconstructed Trainable fp32 state dict with {total_params} params {total_numel} elements")
451
+
452
+
453
+ def _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states,
454
+ exclude_frozen_parameters):
455
+ state_dict = OrderedDict()
456
+
457
+ # buffers
458
+ buffers = zero_model_states[0].buffers
459
+ state_dict.update(buffers)
460
+ if debug:
461
+ print(f"added {len(buffers)} buffers")
462
+
463
+ if not exclude_frozen_parameters:
464
+ _zero3_merge_frozen_params(state_dict, world_size, zero_model_states)
465
+
466
+ _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
467
+
468
+ # recover shared parameters
469
+ for pair in zero_model_states[0].shared_params:
470
+ if pair[1] in state_dict:
471
+ state_dict[pair[0]] = state_dict[pair[1]]
472
+
473
+ return state_dict
474
+
475
+
476
+ def get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag=None, exclude_frozen_parameters=False):
477
+ """
478
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated state_dict that can be loaded with
479
+ ``load_state_dict()`` and used for training without DeepSpeed or shared with others, for example
480
+ via a model hub.
481
+
482
+ Args:
483
+ - ``checkpoint_dir``: path to the desired checkpoint folder
484
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in 'latest' file. e.g., ``global_step14``
485
+ - ``exclude_frozen_parameters``: exclude frozen parameters
486
+
487
+ Returns:
488
+ - pytorch ``state_dict``
489
+
490
+ Note: this approach may not work if your application doesn't have sufficient free CPU memory and
491
+ you may need to use the offline approach using the ``zero_to_fp32.py`` script that is saved with
492
+ the checkpoint.
493
+
494
+ A typical usage might be ::
495
+
496
+ from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint
497
+ # do the training and checkpoint saving
498
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir) # already on cpu
499
+ model = model.cpu() # move to cpu
500
+ model.load_state_dict(state_dict)
501
+ # submit to model hub or save the model to share with others
502
+
503
+ In this example the ``model`` will no longer be usable in the deepspeed context of the same
504
+ application. i.e. you will need to re-initialize the deepspeed engine, since
505
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
506
+
507
+ If you want it all done for you, use ``load_state_dict_from_zero_checkpoint`` instead.
508
+
509
+ """
510
+ if tag is None:
511
+ latest_path = os.path.join(checkpoint_dir, 'latest')
512
+ if os.path.isfile(latest_path):
513
+ with open(latest_path, 'r') as fd:
514
+ tag = fd.read().strip()
515
+ else:
516
+ raise ValueError(f"Unable to find 'latest' file at {latest_path}")
517
+
518
+ ds_checkpoint_dir = os.path.join(checkpoint_dir, tag)
519
+
520
+ if not os.path.isdir(ds_checkpoint_dir):
521
+ raise FileNotFoundError(f"Directory '{ds_checkpoint_dir}' doesn't exist")
522
+
523
+ return _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters)
524
+
525
+
526
+ def convert_zero_checkpoint_to_fp32_state_dict(checkpoint_dir,
527
+ output_dir,
528
+ max_shard_size="5GB",
529
+ safe_serialization=False,
530
+ tag=None,
531
+ exclude_frozen_parameters=False):
532
+ """
533
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` file that can be
534
+ loaded with ``torch.load(file)`` + ``load_state_dict()`` and used for training without DeepSpeed.
535
+
536
+ Args:
537
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
538
+ - ``output_dir``: directory to the pytorch fp32 state_dict output files
539
+ - ``max_shard_size``: the maximum size for a checkpoint before being sharded, default value is 5GB
540
+ - ``safe_serialization``: whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`).
541
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
542
+ - ``exclude_frozen_parameters``: exclude frozen parameters
543
+ """
544
+ # Dependency pre-check
545
+ if safe_serialization:
546
+ try:
547
+ from safetensors.torch import save_file
548
+ except ImportError:
549
+ print('If you want to use `safe_serialization`, please `pip install safetensors`')
550
+ raise
551
+ if max_shard_size is not None:
552
+ try:
553
+ from huggingface_hub import split_torch_state_dict_into_shards
554
+ except ImportError:
555
+ print('If you want to use `max_shard_size`, please `pip install huggingface_hub`')
556
+ raise
557
+
558
+ # Convert zero checkpoint to state_dict
559
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag, exclude_frozen_parameters)
560
+
561
+ # Shard the model if it is too big.
562
+ weights_name = "model.safetensors" if safe_serialization else "pytorch_model.bin"
563
+ if max_shard_size is not None:
564
+ filename_pattern = weights_name.replace(".bin", "{suffix}.bin").replace(".safetensors", "{suffix}.safetensors")
565
+ state_dict_split = split_torch_state_dict_into_shards(state_dict,
566
+ filename_pattern=filename_pattern,
567
+ max_shard_size=max_shard_size)
568
+ else:
569
+ from collections import namedtuple
570
+ StateDictSplit = namedtuple("StateDictSplit", ["is_sharded", "filename_to_tensors"])
571
+ state_dict_split = StateDictSplit(is_sharded=False,
572
+ filename_to_tensors={weights_name: list(state_dict.keys())})
573
+
574
+ # Save the model
575
+ filename_to_tensors = state_dict_split.filename_to_tensors.items()
576
+ for shard_file, tensors in tqdm(filename_to_tensors, desc="Saving checkpoint shards"):
577
+ shard = {tensor: state_dict[tensor].contiguous() for tensor in tensors}
578
+ output_path = os.path.join(output_dir, shard_file)
579
+ if safe_serialization:
580
+ save_file(shard, output_path, metadata={"format": "pt"})
581
+ else:
582
+ torch.save(shard, output_path)
583
+
584
+ # Save index if sharded
585
+ if state_dict_split.is_sharded:
586
+ index = {
587
+ "metadata": state_dict_split.metadata,
588
+ "weight_map": state_dict_split.tensor_to_filename,
589
+ }
590
+ save_index_file = "model.safetensors.index.json" if safe_serialization else "pytorch_model.bin.index.json"
591
+ save_index_file = os.path.join(output_dir, save_index_file)
592
+ with open(save_index_file, "w", encoding="utf-8") as f:
593
+ content = json.dumps(index, indent=2, sort_keys=True) + "\n"
594
+ f.write(content)
595
+
596
+
597
+ def load_state_dict_from_zero_checkpoint(model, checkpoint_dir, tag=None):
598
+ """
599
+ 1. Put the provided model to cpu
600
+ 2. Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict``
601
+ 3. Load it into the provided model
602
+
603
+ Args:
604
+ - ``model``: the model object to update
605
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
606
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
607
+
608
+ Returns:
609
+ - ``model`: modified model
610
+
611
+ Make sure you have plenty of CPU memory available before you call this function. If you don't
612
+ have enough use the ``zero_to_fp32.py`` utility to do the conversion. You will find it
613
+ conveniently placed for you in the checkpoint folder.
614
+
615
+ A typical usage might be ::
616
+
617
+ from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint
618
+ model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir)
619
+ # submit to model hub or save the model to share with others
620
+
621
+ Note, that once this was run, the ``model`` will no longer be usable in the deepspeed context
622
+ of the same application. i.e. you will need to re-initialize the deepspeed engine, since
623
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
624
+
625
+ """
626
+ logger.info(f"Extracting fp32 weights")
627
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
628
+
629
+ logger.info(f"Overwriting model with fp32 weights")
630
+ model = model.cpu()
631
+ model.load_state_dict(state_dict, strict=False)
632
+
633
+ return model
634
+
635
+
636
+ if __name__ == "__main__":
637
+ parser = argparse.ArgumentParser()
638
+ parser.add_argument("checkpoint_dir",
639
+ type=str,
640
+ help="path to the desired checkpoint folder, e.g., path/checkpoint-12")
641
+ parser.add_argument("output_dir",
642
+ type=str,
643
+ help="directory to the pytorch fp32 state_dict output files"
644
+ "(e.g. path/checkpoint-12-output/)")
645
+ parser.add_argument(
646
+ "--max_shard_size",
647
+ type=str,
648
+ default="5GB",
649
+ help="The maximum size for a checkpoint before being sharded. Checkpoints shard will then be each of size"
650
+ "lower than this size. If expressed as a string, needs to be digits followed by a unit (like `5MB`"
651
+ "We default it to 5GB in order for models to be able to run easily on free-tier google colab instances"
652
+ "without CPU OOM issues.")
653
+ parser.add_argument(
654
+ "--safe_serialization",
655
+ default=False,
656
+ action='store_true',
657
+ help="Whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`).")
658
+ parser.add_argument("-t",
659
+ "--tag",
660
+ type=str,
661
+ default=None,
662
+ help="checkpoint tag used as a unique identifier for checkpoint. e.g., global_step1")
663
+ parser.add_argument("--exclude_frozen_parameters", action='store_true', help="exclude frozen parameters")
664
+ parser.add_argument("-d", "--debug", action='store_true', help="enable debug")
665
+ args = parser.parse_args()
666
+
667
+ debug = args.debug
668
+
669
+ convert_zero_checkpoint_to_fp32_state_dict(args.checkpoint_dir,
670
+ args.output_dir,
671
+ max_shard_size=args.max_shard_size,
672
+ safe_serialization=args.safe_serialization,
673
+ tag=args.tag,
674
+ exclude_frozen_parameters=args.exclude_frozen_parameters)