TrieTran commited on
Commit
58bf47f
·
verified ·
1 Parent(s): bd363c8

Upload folder using huggingface_hub

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitignore +1 -0
  2. experiments/logs/EVAL-real_data-openvla-2025_09_21-13_59_52.txt +0 -0
  3. experiments/logs/EVAL-real_data-openvla-2025_09_21-14_00_36.txt +0 -0
  4. libero_utils.py +104 -0
  5. oc_model/README.md +202 -0
  6. oc_model/adapter_config.json +258 -0
  7. oc_model/adapter_model.safetensors +3 -0
  8. oc_model/added_tokens.json +3 -0
  9. oc_model/dataset_statistics.json +127 -0
  10. oc_model/preprocessor_config.json +114 -0
  11. oc_model/pretrained_slots-w-filters_s16.safetensors +3 -0
  12. oc_model/relate_object_bboxes_w_mask_actionable_s16h1.safetensors +3 -0
  13. oc_model/special_tokens_map.json +30 -0
  14. oc_model/tokenizer.json +0 -0
  15. oc_model/tokenizer.model +3 -0
  16. oc_model/tokenizer_config.json +53 -0
  17. openvla_utils.py +244 -0
  18. orc_model/README.md +202 -0
  19. orc_model/adapter_config.json +258 -0
  20. orc_model/adapter_model.safetensors +3 -0
  21. orc_model/added_tokens.json +3 -0
  22. orc_model/dataset_statistics.json +127 -0
  23. orc_model/preprocessor_config.json +114 -0
  24. orc_model/pretrained_slots-w-filters_s16.safetensors +3 -0
  25. orc_model/relate_object_bboxes_w_mask_actionable_s16h1.safetensors +3 -0
  26. orc_model/special_tokens_map.json +30 -0
  27. orc_model/tokenizer.json +0 -0
  28. orc_model/tokenizer.model +3 -0
  29. orc_model/tokenizer_config.json +53 -0
  30. output_hf_model_openx/.gitattributes +35 -0
  31. output_hf_model_openx/README.md +99 -0
  32. output_hf_model_openx/added_tokens.json +3 -0
  33. output_hf_model_openx/config.json +3168 -0
  34. output_hf_model_openx/configuration_prismatic.py +140 -0
  35. output_hf_model_openx/generation_config.json +7 -0
  36. output_hf_model_openx/model-00001-of-00003.safetensors +3 -0
  37. output_hf_model_openx/model-00002-of-00003.safetensors +3 -0
  38. output_hf_model_openx/model-00003-of-00003.safetensors +3 -0
  39. output_hf_model_openx/model.safetensors.index.json +989 -0
  40. output_hf_model_openx/modeling_prismatic.py +562 -0
  41. output_hf_model_openx/preprocessor_config.json +114 -0
  42. output_hf_model_openx/processing_prismatic.py +257 -0
  43. output_hf_model_openx/processor_config.json +6 -0
  44. output_hf_model_openx/special_tokens_map.json +30 -0
  45. output_hf_model_openx/tokenizer.json +0 -0
  46. output_hf_model_openx/tokenizer.model +3 -0
  47. output_hf_model_openx/tokenizer_config.json +53 -0
  48. prismatic/__init__.py +1 -0
  49. prismatic/conf/__init__.py +3 -0
  50. prismatic/conf/datasets.py +133 -0
.gitignore ADDED
@@ -0,0 +1 @@
 
 
1
+ __pycache__
experiments/logs/EVAL-real_data-openvla-2025_09_21-13_59_52.txt ADDED
File without changes
experiments/logs/EVAL-real_data-openvla-2025_09_21-14_00_36.txt ADDED
File without changes
libero_utils.py ADDED
@@ -0,0 +1,104 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Utils for evaluating policies in LIBERO simulation environments."""
2
+
3
+ import math
4
+ import os
5
+
6
+ import imageio
7
+ import numpy as np
8
+ import tensorflow as tf
9
+ from libero.libero import get_libero_path
10
+ from libero.libero.envs import OffScreenRenderEnv
11
+
12
+ from robot_utils import (
13
+ DATE,
14
+ DATE_TIME,
15
+ )
16
+
17
+
18
+ def get_libero_env(task, model_family, resolution=256):
19
+ """Initializes and returns the LIBERO environment, along with the task description."""
20
+ task_description = task.language
21
+ task_bddl_file = os.path.join(get_libero_path("bddl_files"), task.problem_folder, task.bddl_file)
22
+ env_args = {"bddl_file_name": task_bddl_file, "camera_heights": resolution, "camera_widths": resolution}
23
+ env = OffScreenRenderEnv(**env_args)
24
+ env.seed(0) # IMPORTANT: seed seems to affect object positions even when using fixed initial state
25
+ return env, task_description
26
+
27
+
28
+ def get_libero_dummy_action(model_family: str):
29
+ """Get dummy/no-op action, used to roll out the simulation while the robot does nothing."""
30
+ return [0, 0, 0, 0, 0, 0, -1]
31
+
32
+
33
+ def resize_image(img, resize_size):
34
+ """
35
+ Takes numpy array corresponding to a single image and returns resized image as numpy array.
36
+
37
+ NOTE (Moo Jin): To make input images in distribution with respect to the inputs seen at training time, we follow
38
+ the same resizing scheme used in the Octo dataloader, which OpenVLA uses for training.
39
+ """
40
+ assert isinstance(resize_size, tuple)
41
+ # Resize to image size expected by model
42
+ img = tf.image.encode_jpeg(img) # Encode as JPEG, as done in RLDS dataset builder
43
+ img = tf.io.decode_image(img, expand_animations=False, dtype=tf.uint8) # Immediately decode back
44
+ img = tf.image.resize(img, resize_size, method="lanczos3", antialias=True)
45
+ img = tf.cast(tf.clip_by_value(tf.round(img), 0, 255), tf.uint8)
46
+ img = img.numpy()
47
+ return img
48
+
49
+
50
+ def get_libero_image(obs, resize_size):
51
+ """Extracts image from observations and preprocesses it."""
52
+ assert isinstance(resize_size, int) or isinstance(resize_size, tuple)
53
+ if isinstance(resize_size, int):
54
+ resize_size = (resize_size, resize_size)
55
+ img = obs["agentview_image"]
56
+ img = img[::-1, ::-1] # IMPORTANT: rotate 180 degrees to match train preprocessing
57
+ img = resize_image(img, resize_size)
58
+ return img
59
+
60
+
61
+ def save_rollout_video(rollout_images, idx, success, task_description, log_file=None, saved_dir=None):
62
+ """Saves an MP4 replay of an episode."""
63
+ if saved_dir is None:
64
+ rollout_dir = f"./rollouts/{DATE}"
65
+ else:
66
+ rollout_dir = f"./rollouts/{saved_dir}/{DATE}"
67
+ os.makedirs(rollout_dir, exist_ok=True)
68
+ processed_task_description = task_description.lower().replace(" ", "_").replace("\n", "_").replace(".", "_")[:50]
69
+ mp4_path = f"{rollout_dir}/{DATE_TIME}--episode={idx}--success={success}--task={processed_task_description}.mp4"
70
+ video_writer = imageio.get_writer(mp4_path, fps=30)
71
+ for img in rollout_images:
72
+ video_writer.append_data(img)
73
+ video_writer.close()
74
+ print(f"Saved rollout MP4 at path {mp4_path}")
75
+ if log_file is not None:
76
+ log_file.write(f"Saved rollout MP4 at path {mp4_path}\n")
77
+ return mp4_path
78
+
79
+
80
+ def quat2axisangle(quat):
81
+ """
82
+ Copied from robosuite: https://github.com/ARISE-Initiative/robosuite/blob/eafb81f54ffc104f905ee48a16bb15f059176ad3/robosuite/utils/transform_utils.py#L490C1-L512C55
83
+
84
+ Converts quaternion to axis-angle format.
85
+ Returns a unit vector direction scaled by its angle in radians.
86
+
87
+ Args:
88
+ quat (np.array): (x,y,z,w) vec4 float angles
89
+
90
+ Returns:
91
+ np.array: (ax,ay,az) axis-angle exponential coordinates
92
+ """
93
+ # clip quaternion
94
+ if quat[3] > 1.0:
95
+ quat[3] = 1.0
96
+ elif quat[3] < -1.0:
97
+ quat[3] = -1.0
98
+
99
+ den = np.sqrt(1.0 - quat[3] * quat[3])
100
+ if math.isclose(den, 0.0):
101
+ # This is (close to) a zero degree rotation, immediately return
102
+ return np.zeros(3)
103
+
104
+ return (quat[:3] * 2.0 * math.acos(quat[3])) / den
oc_model/README.md ADDED
@@ -0,0 +1,202 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: output_hf_model_openx
3
+ library_name: peft
4
+ ---
5
+
6
+ # Model Card for Model ID
7
+
8
+ <!-- Provide a quick summary of what the model is/does. -->
9
+
10
+
11
+
12
+ ## Model Details
13
+
14
+ ### Model Description
15
+
16
+ <!-- Provide a longer summary of what this model is. -->
17
+
18
+
19
+
20
+ - **Developed by:** [More Information Needed]
21
+ - **Funded by [optional]:** [More Information Needed]
22
+ - **Shared by [optional]:** [More Information Needed]
23
+ - **Model type:** [More Information Needed]
24
+ - **Language(s) (NLP):** [More Information Needed]
25
+ - **License:** [More Information Needed]
26
+ - **Finetuned from model [optional]:** [More Information Needed]
27
+
28
+ ### Model Sources [optional]
29
+
30
+ <!-- Provide the basic links for the model. -->
31
+
32
+ - **Repository:** [More Information Needed]
33
+ - **Paper [optional]:** [More Information Needed]
34
+ - **Demo [optional]:** [More Information Needed]
35
+
36
+ ## Uses
37
+
38
+ <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
39
+
40
+ ### Direct Use
41
+
42
+ <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
43
+
44
+ [More Information Needed]
45
+
46
+ ### Downstream Use [optional]
47
+
48
+ <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
49
+
50
+ [More Information Needed]
51
+
52
+ ### Out-of-Scope Use
53
+
54
+ <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
55
+
56
+ [More Information Needed]
57
+
58
+ ## Bias, Risks, and Limitations
59
+
60
+ <!-- This section is meant to convey both technical and sociotechnical limitations. -->
61
+
62
+ [More Information Needed]
63
+
64
+ ### Recommendations
65
+
66
+ <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
67
+
68
+ Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
69
+
70
+ ## How to Get Started with the Model
71
+
72
+ Use the code below to get started with the model.
73
+
74
+ [More Information Needed]
75
+
76
+ ## Training Details
77
+
78
+ ### Training Data
79
+
80
+ <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
81
+
82
+ [More Information Needed]
83
+
84
+ ### Training Procedure
85
+
86
+ <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
87
+
88
+ #### Preprocessing [optional]
89
+
90
+ [More Information Needed]
91
+
92
+
93
+ #### Training Hyperparameters
94
+
95
+ - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
96
+
97
+ #### Speeds, Sizes, Times [optional]
98
+
99
+ <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
100
+
101
+ [More Information Needed]
102
+
103
+ ## Evaluation
104
+
105
+ <!-- This section describes the evaluation protocols and provides the results. -->
106
+
107
+ ### Testing Data, Factors & Metrics
108
+
109
+ #### Testing Data
110
+
111
+ <!-- This should link to a Dataset Card if possible. -->
112
+
113
+ [More Information Needed]
114
+
115
+ #### Factors
116
+
117
+ <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
118
+
119
+ [More Information Needed]
120
+
121
+ #### Metrics
122
+
123
+ <!-- These are the evaluation metrics being used, ideally with a description of why. -->
124
+
125
+ [More Information Needed]
126
+
127
+ ### Results
128
+
129
+ [More Information Needed]
130
+
131
+ #### Summary
132
+
133
+
134
+
135
+ ## Model Examination [optional]
136
+
137
+ <!-- Relevant interpretability work for the model goes here -->
138
+
139
+ [More Information Needed]
140
+
141
+ ## Environmental Impact
142
+
143
+ <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
144
+
145
+ Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
146
+
147
+ - **Hardware Type:** [More Information Needed]
148
+ - **Hours used:** [More Information Needed]
149
+ - **Cloud Provider:** [More Information Needed]
150
+ - **Compute Region:** [More Information Needed]
151
+ - **Carbon Emitted:** [More Information Needed]
152
+
153
+ ## Technical Specifications [optional]
154
+
155
+ ### Model Architecture and Objective
156
+
157
+ [More Information Needed]
158
+
159
+ ### Compute Infrastructure
160
+
161
+ [More Information Needed]
162
+
163
+ #### Hardware
164
+
165
+ [More Information Needed]
166
+
167
+ #### Software
168
+
169
+ [More Information Needed]
170
+
171
+ ## Citation [optional]
172
+
173
+ <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
174
+
175
+ **BibTeX:**
176
+
177
+ [More Information Needed]
178
+
179
+ **APA:**
180
+
181
+ [More Information Needed]
182
+
183
+ ## Glossary [optional]
184
+
185
+ <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
186
+
187
+ [More Information Needed]
188
+
189
+ ## More Information [optional]
190
+
191
+ [More Information Needed]
192
+
193
+ ## Model Card Authors [optional]
194
+
195
+ [More Information Needed]
196
+
197
+ ## Model Card Contact
198
+
199
+ [More Information Needed]
200
+ ### Framework versions
201
+
202
+ - PEFT 0.11.1
oc_model/adapter_config.json ADDED
@@ -0,0 +1,258 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": {
4
+ "base_model_class": "OpenVLAForActionPrediction",
5
+ "parent_library": "prismatic.extern.hf.modeling_prismatic"
6
+ },
7
+ "base_model_name_or_path": "output_hf_model_openx",
8
+ "bias": "none",
9
+ "fan_in_fan_out": false,
10
+ "inference_mode": true,
11
+ "init_lora_weights": "gaussian",
12
+ "layer_replication": null,
13
+ "layers_pattern": null,
14
+ "layers_to_transform": null,
15
+ "loftq_config": {},
16
+ "lora_alpha": 16,
17
+ "lora_dropout": 0.0,
18
+ "megatron_config": null,
19
+ "megatron_core": "megatron.core",
20
+ "modules_to_save": null,
21
+ "peft_type": "LORA",
22
+ "r": 64,
23
+ "rank_pattern": {},
24
+ "revision": null,
25
+ "target_modules": [
26
+ "language_model.model.layers.8.mlp.down_proj",
27
+ "language_model.model.layers.7.self_attn.k_proj",
28
+ "language_model.model.layers.16.mlp.gate_proj",
29
+ "language_model.model.layers.25.self_attn.v_proj",
30
+ "language_model.model.layers.15.self_attn.v_proj",
31
+ "language_model.model.layers.3.self_attn.o_proj",
32
+ "language_model.model.layers.17.self_attn.q_proj",
33
+ "language_model.model.layers.30.mlp.gate_proj",
34
+ "language_model.model.layers.16.self_attn.k_proj",
35
+ "language_model.model.layers.5.self_attn.k_proj",
36
+ "language_model.model.layers.3.self_attn.v_proj",
37
+ "language_model.model.layers.9.mlp.up_proj",
38
+ "language_model.model.layers.28.self_attn.v_proj",
39
+ "language_model.model.layers.10.self_attn.k_proj",
40
+ "language_model.model.layers.3.mlp.up_proj",
41
+ "language_model.model.layers.25.mlp.gate_proj",
42
+ "language_model.model.layers.20.self_attn.v_proj",
43
+ "language_model.model.layers.14.self_attn.o_proj",
44
+ "language_model.model.layers.2.mlp.gate_proj",
45
+ "language_model.model.layers.17.self_attn.v_proj",
46
+ "language_model.model.layers.30.self_attn.o_proj",
47
+ "language_model.lm_head",
48
+ "language_model.model.layers.1.mlp.up_proj",
49
+ "language_model.model.layers.28.mlp.up_proj",
50
+ "language_model.model.layers.4.self_attn.o_proj",
51
+ "language_model.model.layers.29.mlp.down_proj",
52
+ "language_model.model.layers.21.mlp.up_proj",
53
+ "language_model.model.layers.10.self_attn.v_proj",
54
+ "language_model.model.layers.18.self_attn.o_proj",
55
+ "language_model.model.layers.19.mlp.down_proj",
56
+ "language_model.model.layers.2.self_attn.o_proj",
57
+ "language_model.model.layers.22.self_attn.q_proj",
58
+ "language_model.model.layers.26.mlp.up_proj",
59
+ "language_model.model.layers.17.mlp.gate_proj",
60
+ "language_model.model.layers.11.mlp.gate_proj",
61
+ "language_model.model.layers.7.self_attn.q_proj",
62
+ "language_model.model.layers.30.self_attn.k_proj",
63
+ "language_model.model.layers.27.mlp.gate_proj",
64
+ "language_model.model.layers.6.mlp.gate_proj",
65
+ "language_model.model.layers.10.mlp.gate_proj",
66
+ "language_model.model.layers.12.mlp.down_proj",
67
+ "language_model.model.layers.13.mlp.down_proj",
68
+ "language_model.model.layers.6.self_attn.q_proj",
69
+ "language_model.model.layers.0.mlp.down_proj",
70
+ "language_model.model.layers.20.self_attn.o_proj",
71
+ "language_model.model.layers.23.self_attn.k_proj",
72
+ "language_model.model.layers.23.mlp.gate_proj",
73
+ "language_model.model.layers.3.self_attn.q_proj",
74
+ "language_model.model.layers.18.self_attn.k_proj",
75
+ "language_model.model.layers.19.mlp.gate_proj",
76
+ "language_model.model.layers.20.mlp.down_proj",
77
+ "language_model.model.layers.21.self_attn.v_proj",
78
+ "language_model.model.layers.30.mlp.down_proj",
79
+ "language_model.model.layers.18.self_attn.q_proj",
80
+ "language_model.model.layers.26.self_attn.o_proj",
81
+ "language_model.model.layers.19.self_attn.o_proj",
82
+ "language_model.model.layers.14.self_attn.v_proj",
83
+ "language_model.model.layers.31.mlp.gate_proj",
84
+ "language_model.model.layers.11.mlp.up_proj",
85
+ "language_model.model.layers.24.self_attn.k_proj",
86
+ "language_model.model.layers.9.mlp.down_proj",
87
+ "language_model.model.layers.16.self_attn.o_proj",
88
+ "language_model.model.layers.28.mlp.gate_proj",
89
+ "language_model.model.layers.4.mlp.up_proj",
90
+ "language_model.model.layers.31.self_attn.o_proj",
91
+ "language_model.model.layers.9.self_attn.v_proj",
92
+ "language_model.model.layers.20.self_attn.k_proj",
93
+ "language_model.model.layers.27.self_attn.q_proj",
94
+ "language_model.model.layers.19.mlp.up_proj",
95
+ "language_model.model.layers.28.mlp.down_proj",
96
+ "language_model.model.layers.26.self_attn.v_proj",
97
+ "language_model.model.layers.16.self_attn.q_proj",
98
+ "language_model.model.layers.21.mlp.gate_proj",
99
+ "language_model.model.layers.6.self_attn.v_proj",
100
+ "language_model.model.layers.15.self_attn.q_proj",
101
+ "language_model.model.layers.5.self_attn.o_proj",
102
+ "language_model.model.layers.14.mlp.gate_proj",
103
+ "language_model.model.layers.11.self_attn.k_proj",
104
+ "language_model.model.layers.5.mlp.gate_proj",
105
+ "language_model.model.layers.10.self_attn.o_proj",
106
+ "language_model.model.layers.11.mlp.down_proj",
107
+ "language_model.model.layers.8.mlp.gate_proj",
108
+ "projector.fc1",
109
+ "language_model.model.layers.9.self_attn.k_proj",
110
+ "language_model.model.layers.21.self_attn.o_proj",
111
+ "language_model.model.layers.5.mlp.down_proj",
112
+ "language_model.model.layers.8.self_attn.k_proj",
113
+ "language_model.model.layers.22.self_attn.o_proj",
114
+ "language_model.model.layers.3.mlp.gate_proj",
115
+ "language_model.model.layers.7.self_attn.o_proj",
116
+ "language_model.model.layers.30.self_attn.q_proj",
117
+ "language_model.model.layers.0.self_attn.q_proj",
118
+ "language_model.model.layers.11.self_attn.o_proj",
119
+ "language_model.model.layers.17.self_attn.k_proj",
120
+ "language_model.model.layers.14.self_attn.k_proj",
121
+ "language_model.model.layers.27.self_attn.v_proj",
122
+ "language_model.model.layers.21.self_attn.q_proj",
123
+ "language_model.model.layers.4.self_attn.v_proj",
124
+ "language_model.model.layers.0.self_attn.v_proj",
125
+ "language_model.model.layers.0.mlp.up_proj",
126
+ "language_model.model.layers.20.self_attn.q_proj",
127
+ "language_model.model.layers.7.self_attn.v_proj",
128
+ "language_model.model.layers.23.self_attn.v_proj",
129
+ "language_model.model.layers.29.self_attn.k_proj",
130
+ "language_model.model.layers.10.mlp.up_proj",
131
+ "language_model.model.layers.17.self_attn.o_proj",
132
+ "language_model.model.layers.16.mlp.up_proj",
133
+ "language_model.model.layers.6.mlp.down_proj",
134
+ "language_model.model.layers.13.mlp.gate_proj",
135
+ "language_model.model.layers.24.mlp.down_proj",
136
+ "language_model.model.layers.26.self_attn.k_proj",
137
+ "language_model.model.layers.29.mlp.up_proj",
138
+ "language_model.model.layers.25.mlp.up_proj",
139
+ "language_model.model.layers.29.mlp.gate_proj",
140
+ "language_model.model.layers.2.self_attn.v_proj",
141
+ "language_model.model.layers.1.self_attn.o_proj",
142
+ "language_model.model.layers.27.mlp.down_proj",
143
+ "language_model.model.layers.21.self_attn.k_proj",
144
+ "language_model.model.layers.9.self_attn.q_proj",
145
+ "language_model.model.layers.9.self_attn.o_proj",
146
+ "language_model.model.layers.10.self_attn.q_proj",
147
+ "language_model.model.layers.22.self_attn.k_proj",
148
+ "language_model.model.layers.12.self_attn.q_proj",
149
+ "language_model.model.layers.31.self_attn.k_proj",
150
+ "language_model.model.layers.22.mlp.up_proj",
151
+ "language_model.model.layers.14.mlp.up_proj",
152
+ "language_model.model.layers.17.mlp.down_proj",
153
+ "language_model.model.layers.23.self_attn.o_proj",
154
+ "language_model.model.layers.1.self_attn.v_proj",
155
+ "language_model.model.layers.12.self_attn.k_proj",
156
+ "language_model.model.layers.7.mlp.gate_proj",
157
+ "language_model.model.layers.20.mlp.up_proj",
158
+ "language_model.model.layers.31.self_attn.q_proj",
159
+ "language_model.model.layers.15.self_attn.k_proj",
160
+ "language_model.model.layers.26.mlp.down_proj",
161
+ "language_model.model.layers.19.self_attn.k_proj",
162
+ "language_model.model.layers.6.self_attn.k_proj",
163
+ "language_model.model.layers.2.self_attn.k_proj",
164
+ "language_model.model.layers.12.mlp.gate_proj",
165
+ "language_model.model.layers.13.self_attn.q_proj",
166
+ "language_model.model.layers.13.mlp.up_proj",
167
+ "language_model.model.layers.23.mlp.up_proj",
168
+ "language_model.model.layers.13.self_attn.o_proj",
169
+ "language_model.model.layers.11.self_attn.v_proj",
170
+ "language_model.model.layers.23.mlp.down_proj",
171
+ "language_model.model.layers.19.self_attn.q_proj",
172
+ "language_model.model.layers.9.mlp.gate_proj",
173
+ "language_model.model.layers.4.mlp.down_proj",
174
+ "language_model.model.layers.21.mlp.down_proj",
175
+ "language_model.model.layers.20.mlp.gate_proj",
176
+ "language_model.model.layers.8.self_attn.o_proj",
177
+ "language_model.model.layers.23.self_attn.q_proj",
178
+ "language_model.model.layers.24.self_attn.q_proj",
179
+ "language_model.model.layers.6.self_attn.o_proj",
180
+ "language_model.model.layers.28.self_attn.q_proj",
181
+ "language_model.model.layers.29.self_attn.o_proj",
182
+ "language_model.model.layers.24.self_attn.o_proj",
183
+ "language_model.model.layers.25.self_attn.o_proj",
184
+ "language_model.model.layers.27.mlp.up_proj",
185
+ "language_model.model.layers.12.self_attn.o_proj",
186
+ "language_model.model.layers.22.mlp.gate_proj",
187
+ "language_model.model.layers.1.mlp.gate_proj",
188
+ "language_model.model.layers.0.self_attn.k_proj",
189
+ "language_model.model.layers.1.mlp.down_proj",
190
+ "language_model.model.layers.12.mlp.up_proj",
191
+ "language_model.model.layers.28.self_attn.k_proj",
192
+ "language_model.model.layers.13.self_attn.v_proj",
193
+ "language_model.model.layers.22.mlp.down_proj",
194
+ "language_model.model.layers.3.mlp.down_proj",
195
+ "language_model.model.layers.17.mlp.up_proj",
196
+ "language_model.model.layers.31.self_attn.v_proj",
197
+ "language_model.model.layers.18.mlp.gate_proj",
198
+ "language_model.model.layers.24.mlp.up_proj",
199
+ "language_model.model.layers.14.mlp.down_proj",
200
+ "language_model.model.layers.8.self_attn.v_proj",
201
+ "language_model.model.layers.1.self_attn.q_proj",
202
+ "language_model.model.layers.2.mlp.up_proj",
203
+ "language_model.model.layers.26.self_attn.q_proj",
204
+ "language_model.model.layers.27.self_attn.o_proj",
205
+ "language_model.model.layers.28.self_attn.o_proj",
206
+ "language_model.model.layers.15.mlp.gate_proj",
207
+ "language_model.model.layers.25.mlp.down_proj",
208
+ "language_model.model.layers.24.self_attn.v_proj",
209
+ "language_model.model.layers.11.self_attn.q_proj",
210
+ "language_model.model.layers.15.mlp.down_proj",
211
+ "projector.fc2",
212
+ "language_model.model.layers.29.self_attn.q_proj",
213
+ "language_model.model.layers.15.mlp.up_proj",
214
+ "language_model.model.layers.27.self_attn.k_proj",
215
+ "language_model.model.layers.29.self_attn.v_proj",
216
+ "language_model.model.layers.26.mlp.gate_proj",
217
+ "language_model.model.layers.24.mlp.gate_proj",
218
+ "language_model.model.layers.30.self_attn.v_proj",
219
+ "language_model.model.layers.18.mlp.down_proj",
220
+ "language_model.model.layers.14.self_attn.q_proj",
221
+ "language_model.model.layers.18.self_attn.v_proj",
222
+ "language_model.model.layers.4.self_attn.k_proj",
223
+ "language_model.model.layers.3.self_attn.k_proj",
224
+ "language_model.model.layers.22.self_attn.v_proj",
225
+ "language_model.model.layers.7.mlp.up_proj",
226
+ "language_model.model.layers.10.mlp.down_proj",
227
+ "language_model.model.layers.16.self_attn.v_proj",
228
+ "language_model.model.layers.12.self_attn.v_proj",
229
+ "language_model.model.layers.2.self_attn.q_proj",
230
+ "language_model.model.layers.15.self_attn.o_proj",
231
+ "language_model.model.layers.1.self_attn.k_proj",
232
+ "language_model.model.layers.30.mlp.up_proj",
233
+ "language_model.model.layers.19.self_attn.v_proj",
234
+ "language_model.model.layers.0.mlp.gate_proj",
235
+ "language_model.model.layers.8.self_attn.q_proj",
236
+ "language_model.model.layers.18.mlp.up_proj",
237
+ "language_model.model.layers.31.mlp.down_proj",
238
+ "language_model.model.layers.31.mlp.up_proj",
239
+ "language_model.model.layers.25.self_attn.k_proj",
240
+ "projector.fc3",
241
+ "language_model.model.layers.7.mlp.down_proj",
242
+ "language_model.model.layers.16.mlp.down_proj",
243
+ "language_model.model.layers.13.self_attn.k_proj",
244
+ "language_model.model.layers.0.self_attn.o_proj",
245
+ "language_model.model.layers.5.self_attn.v_proj",
246
+ "language_model.model.layers.2.mlp.down_proj",
247
+ "language_model.model.layers.5.self_attn.q_proj",
248
+ "language_model.model.layers.4.self_attn.q_proj",
249
+ "language_model.model.layers.4.mlp.gate_proj",
250
+ "language_model.model.layers.6.mlp.up_proj",
251
+ "language_model.model.layers.8.mlp.up_proj",
252
+ "language_model.model.layers.5.mlp.up_proj",
253
+ "language_model.model.layers.25.self_attn.q_proj"
254
+ ],
255
+ "task_type": null,
256
+ "use_dora": false,
257
+ "use_rslora": false
258
+ }
oc_model/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:494a46a1aa21689c4cf9b6b591b75e78de82e187dfa3e31655fe7b97efe44cfa
3
+ size 328592296
oc_model/added_tokens.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ {
2
+ "<PAD>": 32000
3
+ }
oc_model/dataset_statistics.json ADDED
@@ -0,0 +1,127 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "real_data": {
3
+ "action": {
4
+ "mean": [
5
+ -0.00021713033493142575,
6
+ 3.951489816245157e-06,
7
+ -6.244335236260667e-05,
8
+ 0.024315927177667618,
9
+ -0.024200621992349625,
10
+ 0.0001426006929250434,
11
+ -0.1762954145669937
12
+ ],
13
+ "std": [
14
+ 0.005905783269554377,
15
+ 0.010097788646817207,
16
+ 0.005760197062045336,
17
+ 0.9473578929901123,
18
+ 0.9439426064491272,
19
+ 0.028618143871426582,
20
+ 0.9843310117721558
21
+ ],
22
+ "max": [
23
+ 0.0263775996863842,
24
+ 0.02954130433499813,
25
+ 0.02553696744143963,
26
+ 4.978769779205322,
27
+ 5.3642754554748535,
28
+ 0.4048313796520233,
29
+ 1.0
30
+ ],
31
+ "min": [
32
+ -0.029638897627592087,
33
+ -0.029703686013817787,
34
+ -0.024002285674214363,
35
+ -4.92792272567749,
36
+ -5.274268627166748,
37
+ -0.44714170694351196,
38
+ -1.0
39
+ ],
40
+ "q01": [
41
+ -0.018638468496501446,
42
+ -0.0258210021071136,
43
+ -0.012519038049504161,
44
+ -4.446948285102844,
45
+ -4.433915729522705,
46
+ -0.08356364756822586,
47
+ -1.0
48
+ ],
49
+ "q99": [
50
+ 0.017138871438801287,
51
+ 0.025166765898466083,
52
+ 0.02048220705240963,
53
+ 4.4373928689956665,
54
+ 4.42347291469574,
55
+ 0.08423277527093823,
56
+ 1.0
57
+ ],
58
+ "mask": [
59
+ true,
60
+ true,
61
+ true,
62
+ true,
63
+ true,
64
+ true,
65
+ false
66
+ ]
67
+ },
68
+ "proprio": {
69
+ "mean": [
70
+ 0.0,
71
+ 0.0,
72
+ 0.0,
73
+ 0.0,
74
+ 0.0,
75
+ 0.0,
76
+ 0.0
77
+ ],
78
+ "std": [
79
+ 0.0,
80
+ 0.0,
81
+ 0.0,
82
+ 0.0,
83
+ 0.0,
84
+ 0.0,
85
+ 0.0
86
+ ],
87
+ "max": [
88
+ 0.0,
89
+ 0.0,
90
+ 0.0,
91
+ 0.0,
92
+ 0.0,
93
+ 0.0,
94
+ 0.0
95
+ ],
96
+ "min": [
97
+ 0.0,
98
+ 0.0,
99
+ 0.0,
100
+ 0.0,
101
+ 0.0,
102
+ 0.0,
103
+ 0.0
104
+ ],
105
+ "q01": [
106
+ 0.0,
107
+ 0.0,
108
+ 0.0,
109
+ 0.0,
110
+ 0.0,
111
+ 0.0,
112
+ 0.0
113
+ ],
114
+ "q99": [
115
+ 0.0,
116
+ 0.0,
117
+ 0.0,
118
+ 0.0,
119
+ 0.0,
120
+ 0.0,
121
+ 0.0
122
+ ]
123
+ },
124
+ "num_transitions": 20148,
125
+ "num_trajectories": 150
126
+ }
127
+ }
oc_model/preprocessor_config.json ADDED
@@ -0,0 +1,114 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "auto_map": {
3
+ "AutoImageProcessor": "processing_prismatic.PrismaticImageProcessor",
4
+ "AutoProcessor": "processing_prismatic.PrismaticProcessor"
5
+ },
6
+ "image_processor_type": "PrismaticImageProcessor",
7
+ "image_resize_strategy": "resize-naive",
8
+ "input_sizes": [
9
+ [
10
+ 3,
11
+ 224,
12
+ 224
13
+ ],
14
+ [
15
+ 3,
16
+ 224,
17
+ 224
18
+ ]
19
+ ],
20
+ "interpolations": [
21
+ "bicubic",
22
+ "bicubic"
23
+ ],
24
+ "means": [
25
+ [
26
+ 0.485,
27
+ 0.456,
28
+ 0.406
29
+ ],
30
+ [
31
+ 0.5,
32
+ 0.5,
33
+ 0.5
34
+ ]
35
+ ],
36
+ "processor_class": "PrismaticProcessor",
37
+ "stds": [
38
+ [
39
+ 0.229,
40
+ 0.224,
41
+ 0.225
42
+ ],
43
+ [
44
+ 0.5,
45
+ 0.5,
46
+ 0.5
47
+ ]
48
+ ],
49
+ "tvf_crop_params": [
50
+ {
51
+ "output_size": [
52
+ 224,
53
+ 224
54
+ ]
55
+ },
56
+ {
57
+ "output_size": [
58
+ 224,
59
+ 224
60
+ ]
61
+ }
62
+ ],
63
+ "tvf_do_letterbox": false,
64
+ "tvf_letterbox_fill": null,
65
+ "tvf_normalize_params": [
66
+ {
67
+ "inplace": false,
68
+ "mean": [
69
+ 0.484375,
70
+ 0.455078125,
71
+ 0.40625
72
+ ],
73
+ "std": [
74
+ 0.228515625,
75
+ 0.2236328125,
76
+ 0.224609375
77
+ ]
78
+ },
79
+ {
80
+ "inplace": false,
81
+ "mean": [
82
+ 0.5,
83
+ 0.5,
84
+ 0.5
85
+ ],
86
+ "std": [
87
+ 0.5,
88
+ 0.5,
89
+ 0.5
90
+ ]
91
+ }
92
+ ],
93
+ "tvf_resize_params": [
94
+ {
95
+ "antialias": true,
96
+ "interpolation": 3,
97
+ "max_size": null,
98
+ "size": [
99
+ 224,
100
+ 224
101
+ ]
102
+ },
103
+ {
104
+ "antialias": true,
105
+ "interpolation": 3,
106
+ "max_size": null,
107
+ "size": [
108
+ 224,
109
+ 224
110
+ ]
111
+ }
112
+ ],
113
+ "use_fused_vision_backbone": true
114
+ }
oc_model/pretrained_slots-w-filters_s16.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4fb915260b6c164b68b2abb642558980f5eaa6b8178613d089f88067b4234a5b
3
+ size 146434388
oc_model/relate_object_bboxes_w_mask_actionable_s16h1.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:875099ecf58478b785669c75f5537fa9c841cdf695daa67af7043f4296d3a072
3
+ size 901969424
oc_model/special_tokens_map.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "</s>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "<PAD>",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "unk_token": {
24
+ "content": "<unk>",
25
+ "lstrip": false,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ }
30
+ }
oc_model/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
oc_model/tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
3
+ size 499723
oc_model/tokenizer_config.json ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
+ "added_tokens_decoder": {
5
+ "0": {
6
+ "content": "<unk>",
7
+ "lstrip": false,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "1": {
14
+ "content": "<s>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "2": {
22
+ "content": "</s>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": true
28
+ },
29
+ "32000": {
30
+ "content": "<PAD>",
31
+ "lstrip": false,
32
+ "normalized": false,
33
+ "rstrip": false,
34
+ "single_word": false,
35
+ "special": true
36
+ }
37
+ },
38
+ "auto_map": {
39
+ "AutoProcessor": "processing_prismatic.PrismaticProcessor"
40
+ },
41
+ "bos_token": "<s>",
42
+ "clean_up_tokenization_spaces": false,
43
+ "eos_token": "</s>",
44
+ "legacy": false,
45
+ "model_max_length": 2048,
46
+ "pad_token": "<PAD>",
47
+ "padding_side": "right",
48
+ "processor_class": "PrismaticProcessor",
49
+ "sp_model_kwargs": {},
50
+ "tokenizer_class": "LlamaTokenizer",
51
+ "unk_token": "<unk>",
52
+ "use_default_system_prompt": false
53
+ }
openvla_utils.py ADDED
@@ -0,0 +1,244 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Utils for evaluating the OpenVLA policy."""
2
+
3
+ import json
4
+ import os
5
+ import time
6
+
7
+ import numpy as np
8
+ import tensorflow as tf
9
+ import torch
10
+ from PIL import Image
11
+ from transformers import AutoConfig, AutoImageProcessor, AutoModelForVision2Seq, AutoProcessor
12
+
13
+ from prismatic.extern.hf.configuration_prismatic import OpenVLAConfig
14
+ from prismatic.extern.hf.modeling_prismatic import OpenVLAForActionPrediction
15
+ from prismatic.extern.hf.processing_prismatic import PrismaticImageProcessor, PrismaticProcessor
16
+
17
+ # Initialize important constants and pretty-printing mode in NumPy.
18
+ ACTION_DIM = 7
19
+ DATE = time.strftime("%Y_%m_%d")
20
+ DATE_TIME = time.strftime("%Y_%m_%d-%H_%M_%S")
21
+ DEVICE = torch.device("cuda:0") if torch.cuda.is_available() else torch.device("cpu")
22
+ np.set_printoptions(formatter={"float": lambda x: "{0:0.3f}".format(x)})
23
+
24
+ # Initialize system prompt for OpenVLA v0.1.
25
+ OPENVLA_V01_SYSTEM_PROMPT = (
26
+ "A chat between a curious user and an artificial intelligence assistant. "
27
+ "The assistant gives helpful, detailed, and polite answers to the user's questions."
28
+ )
29
+
30
+ from peft import PeftModel
31
+ from safetensors.torch import load_file
32
+
33
+
34
+ def crop_and_resize(image, crop_scale, batch_size):
35
+ """
36
+ Center-crops an image to have area `crop_scale` * (original image area), and then resizes back
37
+ to original size. We use the same logic seen in the `dlimp` RLDS datasets wrapper to avoid
38
+ distribution shift at test time.
39
+
40
+ Args:
41
+ image: TF Tensor of shape (batch_size, H, W, C) or (H, W, C) and datatype tf.float32 with
42
+ values between [0,1].
43
+ crop_scale: The area of the center crop with respect to the original image.
44
+ batch_size: Batch size.
45
+ """
46
+ # Convert from 3D Tensor (H, W, C) to 4D Tensor (batch_size, H, W, C)
47
+ assert image.shape.ndims == 3 or image.shape.ndims == 4
48
+ expanded_dims = False
49
+ if image.shape.ndims == 3:
50
+ image = tf.expand_dims(image, axis=0)
51
+ expanded_dims = True
52
+
53
+ # Get height and width of crop
54
+ new_heights = tf.reshape(tf.clip_by_value(tf.sqrt(crop_scale), 0, 1), shape=(batch_size,))
55
+ new_widths = tf.reshape(tf.clip_by_value(tf.sqrt(crop_scale), 0, 1), shape=(batch_size,))
56
+
57
+ # Get bounding box representing crop
58
+ height_offsets = (1 - new_heights) / 2
59
+ width_offsets = (1 - new_widths) / 2
60
+ bounding_boxes = tf.stack(
61
+ [
62
+ height_offsets,
63
+ width_offsets,
64
+ height_offsets + new_heights,
65
+ width_offsets + new_widths,
66
+ ],
67
+ axis=1,
68
+ )
69
+
70
+ # Crop and then resize back up
71
+ image = tf.image.crop_and_resize(image, bounding_boxes, tf.range(batch_size), (224, 224))
72
+
73
+ # Convert back to 3D Tensor (H, W, C)
74
+ if expanded_dims:
75
+ image = image[0]
76
+
77
+ return image
78
+
79
+
80
+ def get_vla_action(vla, processor, base_vla_name, obs, task_label, unnorm_key, center_crop=False):
81
+ """Generates an action with the VLA policy."""
82
+ image = Image.fromarray(obs["full_image"])
83
+ image = image.convert("RGB")
84
+
85
+ # (If trained with image augmentations) Center crop image and then resize back up to original size.
86
+ # IMPORTANT: Let's say crop scale == 0.9. To get the new height and width (post-crop), multiply
87
+ # the original height and width by sqrt(0.9) -- not 0.9!
88
+ if center_crop:
89
+ batch_size = 1
90
+ crop_scale = 0.9
91
+
92
+ # Convert to TF Tensor and record original data type (should be tf.uint8)
93
+ image = tf.convert_to_tensor(np.array(image))
94
+ orig_dtype = image.dtype
95
+
96
+ # Convert to data type tf.float32 and values between [0,1]
97
+ image = tf.image.convert_image_dtype(image, tf.float32)
98
+
99
+ # Crop and then resize back to original size
100
+ image = crop_and_resize(image, crop_scale, batch_size)
101
+
102
+ # Convert back to original data type
103
+ image = tf.clip_by_value(image, 0, 1)
104
+ image = tf.image.convert_image_dtype(image, orig_dtype, saturate=True)
105
+
106
+ # Convert back to PIL Image
107
+ image = Image.fromarray(image.numpy())
108
+ image = image.convert("RGB")
109
+
110
+ # Build VLA prompt
111
+ if "openvla-v01" in base_vla_name: # OpenVLA v0.1
112
+ prompt = (
113
+ f"{OPENVLA_V01_SYSTEM_PROMPT} USER: What action should the robot take to {task_label.lower()}? ASSISTANT:"
114
+ )
115
+ else: # OpenVLA
116
+ prompt = f"In: What action should the robot take to {task_label.lower()}?\nOut: _ _ _ _ _ _ _ _</s>"
117
+
118
+ # Process inputs.
119
+ inputs = processor(prompt, image).to(DEVICE, dtype=torch.float16)
120
+ inputs['input_ids'][0][-9] = 29871
121
+ # print(inputs['input_ids']); 1/0
122
+
123
+ # Get action.
124
+ action = vla.predict_action(**inputs, unnorm_key=unnorm_key, do_sample=False)
125
+ return action
126
+
127
+
128
+ def get_vla_action_v2(vla, processor, base_vla_name, obs, task_label, unnorm_key, center_crop=False):
129
+ """Generates an action with the VLA policy."""
130
+ images = [obs["full_image"], obs["wrist_image"]]
131
+ input_data = []
132
+ for image in images:
133
+ image = Image.fromarray(image)
134
+ image = image.convert("RGB")
135
+
136
+ # (If trained with image augmentations) Center crop image and then resize back up to original size.
137
+ # IMPORTANT: Let's say crop scale == 0.9. To get the new height and width (post-crop), multiply
138
+ # the original height and width by sqrt(0.9) -- not 0.9!
139
+ if center_crop:
140
+ batch_size = 1
141
+ crop_scale = 0.9
142
+
143
+ # Convert to TF Tensor and record original data type (should be tf.uint8)
144
+ image = tf.convert_to_tensor(np.array(image))
145
+ orig_dtype = image.dtype
146
+
147
+ # Convert to data type tf.float32 and values between [0,1]
148
+ image = tf.image.convert_image_dtype(image, tf.float32)
149
+
150
+ # Crop and then resize back to original size
151
+ image = crop_and_resize(image, crop_scale, batch_size)
152
+
153
+ # Convert back to original data type
154
+ image = tf.clip_by_value(image, 0, 1)
155
+ image = tf.image.convert_image_dtype(image, orig_dtype, saturate=True)
156
+
157
+ # Convert back to PIL Image
158
+ image = Image.fromarray(image.numpy())
159
+ image = image.convert("RGB")
160
+
161
+ # Build VLA prompt
162
+ if "openvla-v01" in base_vla_name: # OpenVLA v0.1
163
+ prompt = (
164
+ f"{OPENVLA_V01_SYSTEM_PROMPT} USER: What action should the robot take to {task_label.lower()}? ASSISTANT:"
165
+ )
166
+ else: # OpenVLA
167
+ prompt = f"In: What action should the robot take to {task_label.lower()}?\nOut: _ _ _ _ _ _ _ _</s>"
168
+
169
+ # Process inputs.
170
+ inputs = processor(prompt, image).to(DEVICE, dtype=torch.float16)
171
+ inputs['input_ids'][0][-9] = 29871
172
+ input_data.append(inputs)
173
+
174
+ # Get action.
175
+ inputs = {}
176
+ inputs['pixel_values'] = []
177
+ for datum in input_data:
178
+ inputs['input_ids'] = datum['input_ids']
179
+ inputs['attention_mask'] = datum['attention_mask']
180
+ bz, cc, h, w = datum['pixel_values'].shape
181
+ datum['pixel_values'] = datum['pixel_values'].reshape(bz, 1, cc, h, w)
182
+ inputs['pixel_values'].append(datum['pixel_values'])
183
+ inputs['pixel_values'] = torch.stack(inputs['pixel_values'], dim=1)
184
+ action = vla.predict_action(**inputs, unnorm_key=unnorm_key, do_sample=False)
185
+ return action
186
+
187
+
188
+ def get_vla_action_v3(vla, processor, base_vla_name, obs, task_label, unnorm_key, center_crop=False):
189
+ """Generates an action with the VLA policy."""
190
+ images = [obs["full_image"], obs["wrist_image"], obs["depth_full_image"], obs["depth_wrist_image"]]
191
+ input_data = []
192
+ for image in images:
193
+ image = Image.fromarray(image)
194
+ image = image.convert("RGB")
195
+
196
+ # (If trained with image augmentations) Center crop image and then resize back up to original size.
197
+ # IMPORTANT: Let's say crop scale == 0.9. To get the new height and width (post-crop), multiply
198
+ # the original height and width by sqrt(0.9) -- not 0.9!
199
+ if center_crop:
200
+ batch_size = 1
201
+ crop_scale = 0.9
202
+
203
+ # Convert to TF Tensor and record original data type (should be tf.uint8)
204
+ image = tf.convert_to_tensor(np.array(image))
205
+ orig_dtype = image.dtype
206
+
207
+ # Convert to data type tf.float32 and values between [0,1]
208
+ image = tf.image.convert_image_dtype(image, tf.float32)
209
+
210
+ # Crop and then resize back to original size
211
+ image = crop_and_resize(image, crop_scale, batch_size)
212
+
213
+ # Convert back to original data type
214
+ image = tf.clip_by_value(image, 0, 1)
215
+ image = tf.image.convert_image_dtype(image, orig_dtype, saturate=True)
216
+
217
+ # Convert back to PIL Image
218
+ image = Image.fromarray(image.numpy())
219
+ image = image.convert("RGB")
220
+
221
+ # Build VLA prompt
222
+ if "openvla-v01" in base_vla_name: # OpenVLA v0.1
223
+ prompt = (
224
+ f"{OPENVLA_V01_SYSTEM_PROMPT} USER: What action should the robot take to {task_label.lower()}? ASSISTANT:"
225
+ )
226
+ else: # OpenVLA
227
+ prompt = f"In: What action should the robot take to {task_label.lower()}?\nOut:"
228
+
229
+ # Process inputs.
230
+ inputs = processor(prompt, image).to(DEVICE, dtype=torch.bfloat16)
231
+ input_data.append(inputs)
232
+
233
+ # Get action.
234
+ inputs = {}
235
+ inputs['pixel_values'] = []
236
+ for datum in input_data:
237
+ inputs['input_ids'] = datum['input_ids']
238
+ inputs['attention_mask'] = datum['attention_mask']
239
+ bz, cc, h, w = datum['pixel_values'].shape
240
+ datum['pixel_values'] = datum['pixel_values'].reshape(bz, 1, cc, h, w)
241
+ inputs['pixel_values'].append(datum['pixel_values'])
242
+ inputs['pixel_values'] = torch.stack(inputs['pixel_values'], dim=1)
243
+ action = vla.predict_action(**inputs, unnorm_key=unnorm_key, do_sample=False)
244
+ return action
orc_model/README.md ADDED
@@ -0,0 +1,202 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: output_hf_model_openx
3
+ library_name: peft
4
+ ---
5
+
6
+ # Model Card for Model ID
7
+
8
+ <!-- Provide a quick summary of what the model is/does. -->
9
+
10
+
11
+
12
+ ## Model Details
13
+
14
+ ### Model Description
15
+
16
+ <!-- Provide a longer summary of what this model is. -->
17
+
18
+
19
+
20
+ - **Developed by:** [More Information Needed]
21
+ - **Funded by [optional]:** [More Information Needed]
22
+ - **Shared by [optional]:** [More Information Needed]
23
+ - **Model type:** [More Information Needed]
24
+ - **Language(s) (NLP):** [More Information Needed]
25
+ - **License:** [More Information Needed]
26
+ - **Finetuned from model [optional]:** [More Information Needed]
27
+
28
+ ### Model Sources [optional]
29
+
30
+ <!-- Provide the basic links for the model. -->
31
+
32
+ - **Repository:** [More Information Needed]
33
+ - **Paper [optional]:** [More Information Needed]
34
+ - **Demo [optional]:** [More Information Needed]
35
+
36
+ ## Uses
37
+
38
+ <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
39
+
40
+ ### Direct Use
41
+
42
+ <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
43
+
44
+ [More Information Needed]
45
+
46
+ ### Downstream Use [optional]
47
+
48
+ <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
49
+
50
+ [More Information Needed]
51
+
52
+ ### Out-of-Scope Use
53
+
54
+ <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
55
+
56
+ [More Information Needed]
57
+
58
+ ## Bias, Risks, and Limitations
59
+
60
+ <!-- This section is meant to convey both technical and sociotechnical limitations. -->
61
+
62
+ [More Information Needed]
63
+
64
+ ### Recommendations
65
+
66
+ <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
67
+
68
+ Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
69
+
70
+ ## How to Get Started with the Model
71
+
72
+ Use the code below to get started with the model.
73
+
74
+ [More Information Needed]
75
+
76
+ ## Training Details
77
+
78
+ ### Training Data
79
+
80
+ <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
81
+
82
+ [More Information Needed]
83
+
84
+ ### Training Procedure
85
+
86
+ <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
87
+
88
+ #### Preprocessing [optional]
89
+
90
+ [More Information Needed]
91
+
92
+
93
+ #### Training Hyperparameters
94
+
95
+ - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
96
+
97
+ #### Speeds, Sizes, Times [optional]
98
+
99
+ <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
100
+
101
+ [More Information Needed]
102
+
103
+ ## Evaluation
104
+
105
+ <!-- This section describes the evaluation protocols and provides the results. -->
106
+
107
+ ### Testing Data, Factors & Metrics
108
+
109
+ #### Testing Data
110
+
111
+ <!-- This should link to a Dataset Card if possible. -->
112
+
113
+ [More Information Needed]
114
+
115
+ #### Factors
116
+
117
+ <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
118
+
119
+ [More Information Needed]
120
+
121
+ #### Metrics
122
+
123
+ <!-- These are the evaluation metrics being used, ideally with a description of why. -->
124
+
125
+ [More Information Needed]
126
+
127
+ ### Results
128
+
129
+ [More Information Needed]
130
+
131
+ #### Summary
132
+
133
+
134
+
135
+ ## Model Examination [optional]
136
+
137
+ <!-- Relevant interpretability work for the model goes here -->
138
+
139
+ [More Information Needed]
140
+
141
+ ## Environmental Impact
142
+
143
+ <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
144
+
145
+ Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
146
+
147
+ - **Hardware Type:** [More Information Needed]
148
+ - **Hours used:** [More Information Needed]
149
+ - **Cloud Provider:** [More Information Needed]
150
+ - **Compute Region:** [More Information Needed]
151
+ - **Carbon Emitted:** [More Information Needed]
152
+
153
+ ## Technical Specifications [optional]
154
+
155
+ ### Model Architecture and Objective
156
+
157
+ [More Information Needed]
158
+
159
+ ### Compute Infrastructure
160
+
161
+ [More Information Needed]
162
+
163
+ #### Hardware
164
+
165
+ [More Information Needed]
166
+
167
+ #### Software
168
+
169
+ [More Information Needed]
170
+
171
+ ## Citation [optional]
172
+
173
+ <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
174
+
175
+ **BibTeX:**
176
+
177
+ [More Information Needed]
178
+
179
+ **APA:**
180
+
181
+ [More Information Needed]
182
+
183
+ ## Glossary [optional]
184
+
185
+ <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
186
+
187
+ [More Information Needed]
188
+
189
+ ## More Information [optional]
190
+
191
+ [More Information Needed]
192
+
193
+ ## Model Card Authors [optional]
194
+
195
+ [More Information Needed]
196
+
197
+ ## Model Card Contact
198
+
199
+ [More Information Needed]
200
+ ### Framework versions
201
+
202
+ - PEFT 0.11.1
orc_model/adapter_config.json ADDED
@@ -0,0 +1,258 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": {
4
+ "base_model_class": "OpenVLAForActionPrediction",
5
+ "parent_library": "prismatic.extern.hf.modeling_prismatic"
6
+ },
7
+ "base_model_name_or_path": "output_hf_model_openx",
8
+ "bias": "none",
9
+ "fan_in_fan_out": false,
10
+ "inference_mode": true,
11
+ "init_lora_weights": "gaussian",
12
+ "layer_replication": null,
13
+ "layers_pattern": null,
14
+ "layers_to_transform": null,
15
+ "loftq_config": {},
16
+ "lora_alpha": 16,
17
+ "lora_dropout": 0.0,
18
+ "megatron_config": null,
19
+ "megatron_core": "megatron.core",
20
+ "modules_to_save": null,
21
+ "peft_type": "LORA",
22
+ "r": 64,
23
+ "rank_pattern": {},
24
+ "revision": null,
25
+ "target_modules": [
26
+ "language_model.model.layers.22.mlp.up_proj",
27
+ "language_model.model.layers.11.self_attn.k_proj",
28
+ "language_model.model.layers.23.self_attn.q_proj",
29
+ "language_model.model.layers.15.mlp.up_proj",
30
+ "language_model.model.layers.20.mlp.up_proj",
31
+ "language_model.model.layers.29.mlp.gate_proj",
32
+ "language_model.model.layers.18.self_attn.k_proj",
33
+ "language_model.model.layers.9.self_attn.v_proj",
34
+ "language_model.model.layers.18.mlp.up_proj",
35
+ "language_model.model.layers.19.self_attn.q_proj",
36
+ "language_model.model.layers.4.self_attn.o_proj",
37
+ "language_model.model.layers.20.self_attn.q_proj",
38
+ "language_model.model.layers.26.self_attn.k_proj",
39
+ "projector.fc1",
40
+ "language_model.model.layers.2.self_attn.q_proj",
41
+ "language_model.model.layers.0.mlp.gate_proj",
42
+ "language_model.model.layers.17.self_attn.q_proj",
43
+ "language_model.model.layers.23.self_attn.k_proj",
44
+ "language_model.model.layers.9.self_attn.k_proj",
45
+ "language_model.model.layers.16.self_attn.q_proj",
46
+ "language_model.model.layers.11.self_attn.o_proj",
47
+ "language_model.model.layers.18.self_attn.v_proj",
48
+ "language_model.model.layers.14.mlp.gate_proj",
49
+ "language_model.model.layers.4.mlp.down_proj",
50
+ "language_model.model.layers.12.mlp.gate_proj",
51
+ "language_model.model.layers.23.self_attn.v_proj",
52
+ "language_model.model.layers.7.mlp.gate_proj",
53
+ "language_model.model.layers.17.mlp.down_proj",
54
+ "language_model.model.layers.21.mlp.down_proj",
55
+ "language_model.model.layers.22.mlp.down_proj",
56
+ "language_model.model.layers.13.self_attn.k_proj",
57
+ "language_model.model.layers.17.mlp.gate_proj",
58
+ "language_model.model.layers.10.mlp.down_proj",
59
+ "language_model.model.layers.11.mlp.down_proj",
60
+ "language_model.model.layers.2.self_attn.k_proj",
61
+ "language_model.model.layers.2.mlp.gate_proj",
62
+ "language_model.model.layers.7.mlp.down_proj",
63
+ "language_model.model.layers.19.mlp.gate_proj",
64
+ "language_model.lm_head",
65
+ "language_model.model.layers.1.self_attn.k_proj",
66
+ "language_model.model.layers.16.mlp.down_proj",
67
+ "language_model.model.layers.13.mlp.down_proj",
68
+ "language_model.model.layers.19.self_attn.k_proj",
69
+ "language_model.model.layers.25.self_attn.v_proj",
70
+ "language_model.model.layers.4.self_attn.v_proj",
71
+ "language_model.model.layers.9.mlp.gate_proj",
72
+ "language_model.model.layers.3.self_attn.q_proj",
73
+ "language_model.model.layers.7.self_attn.o_proj",
74
+ "language_model.model.layers.30.self_attn.k_proj",
75
+ "language_model.model.layers.31.self_attn.k_proj",
76
+ "language_model.model.layers.0.self_attn.q_proj",
77
+ "language_model.model.layers.25.mlp.down_proj",
78
+ "language_model.model.layers.31.mlp.gate_proj",
79
+ "language_model.model.layers.26.self_attn.v_proj",
80
+ "language_model.model.layers.27.self_attn.v_proj",
81
+ "language_model.model.layers.27.mlp.gate_proj",
82
+ "language_model.model.layers.7.self_attn.k_proj",
83
+ "language_model.model.layers.27.self_attn.o_proj",
84
+ "language_model.model.layers.0.self_attn.k_proj",
85
+ "language_model.model.layers.28.self_attn.v_proj",
86
+ "language_model.model.layers.25.mlp.gate_proj",
87
+ "language_model.model.layers.30.mlp.down_proj",
88
+ "language_model.model.layers.29.self_attn.k_proj",
89
+ "language_model.model.layers.17.self_attn.k_proj",
90
+ "language_model.model.layers.11.self_attn.q_proj",
91
+ "language_model.model.layers.5.mlp.gate_proj",
92
+ "language_model.model.layers.24.self_attn.v_proj",
93
+ "language_model.model.layers.31.mlp.up_proj",
94
+ "language_model.model.layers.1.mlp.gate_proj",
95
+ "language_model.model.layers.13.mlp.up_proj",
96
+ "language_model.model.layers.23.mlp.down_proj",
97
+ "language_model.model.layers.5.self_attn.k_proj",
98
+ "language_model.model.layers.9.self_attn.q_proj",
99
+ "language_model.model.layers.14.self_attn.v_proj",
100
+ "language_model.model.layers.24.mlp.gate_proj",
101
+ "language_model.model.layers.8.self_attn.o_proj",
102
+ "language_model.model.layers.31.self_attn.o_proj",
103
+ "language_model.model.layers.5.self_attn.q_proj",
104
+ "language_model.model.layers.12.self_attn.q_proj",
105
+ "language_model.model.layers.26.mlp.down_proj",
106
+ "language_model.model.layers.16.self_attn.k_proj",
107
+ "language_model.model.layers.12.self_attn.k_proj",
108
+ "language_model.model.layers.24.self_attn.k_proj",
109
+ "language_model.model.layers.26.mlp.gate_proj",
110
+ "language_model.model.layers.29.mlp.up_proj",
111
+ "projector.fc2",
112
+ "language_model.model.layers.5.self_attn.o_proj",
113
+ "language_model.model.layers.5.mlp.down_proj",
114
+ "language_model.model.layers.31.mlp.down_proj",
115
+ "language_model.model.layers.0.mlp.up_proj",
116
+ "language_model.model.layers.12.mlp.up_proj",
117
+ "language_model.model.layers.9.mlp.up_proj",
118
+ "language_model.model.layers.9.self_attn.o_proj",
119
+ "language_model.model.layers.22.self_attn.q_proj",
120
+ "language_model.model.layers.20.self_attn.k_proj",
121
+ "language_model.model.layers.25.self_attn.q_proj",
122
+ "language_model.model.layers.24.self_attn.o_proj",
123
+ "language_model.model.layers.26.self_attn.o_proj",
124
+ "language_model.model.layers.3.self_attn.k_proj",
125
+ "language_model.model.layers.30.mlp.gate_proj",
126
+ "language_model.model.layers.10.self_attn.k_proj",
127
+ "language_model.model.layers.30.self_attn.q_proj",
128
+ "language_model.model.layers.14.mlp.up_proj",
129
+ "language_model.model.layers.19.self_attn.v_proj",
130
+ "language_model.model.layers.21.mlp.gate_proj",
131
+ "language_model.model.layers.28.mlp.gate_proj",
132
+ "language_model.model.layers.1.self_attn.q_proj",
133
+ "language_model.model.layers.11.mlp.gate_proj",
134
+ "language_model.model.layers.18.mlp.down_proj",
135
+ "language_model.model.layers.2.mlp.up_proj",
136
+ "language_model.model.layers.12.self_attn.o_proj",
137
+ "language_model.model.layers.0.self_attn.v_proj",
138
+ "language_model.model.layers.10.self_attn.o_proj",
139
+ "language_model.model.layers.17.self_attn.v_proj",
140
+ "language_model.model.layers.3.self_attn.o_proj",
141
+ "language_model.model.layers.20.self_attn.o_proj",
142
+ "language_model.model.layers.4.mlp.up_proj",
143
+ "language_model.model.layers.5.self_attn.v_proj",
144
+ "language_model.model.layers.10.self_attn.q_proj",
145
+ "language_model.model.layers.13.self_attn.o_proj",
146
+ "language_model.model.layers.31.self_attn.v_proj",
147
+ "language_model.model.layers.2.self_attn.o_proj",
148
+ "language_model.model.layers.21.self_attn.o_proj",
149
+ "language_model.model.layers.30.mlp.up_proj",
150
+ "language_model.model.layers.29.self_attn.o_proj",
151
+ "language_model.model.layers.14.self_attn.k_proj",
152
+ "language_model.model.layers.15.mlp.down_proj",
153
+ "language_model.model.layers.8.self_attn.k_proj",
154
+ "language_model.model.layers.27.self_attn.k_proj",
155
+ "language_model.model.layers.6.self_attn.v_proj",
156
+ "language_model.model.layers.8.mlp.up_proj",
157
+ "language_model.model.layers.20.mlp.gate_proj",
158
+ "language_model.model.layers.21.self_attn.q_proj",
159
+ "language_model.model.layers.7.mlp.up_proj",
160
+ "language_model.model.layers.13.self_attn.v_proj",
161
+ "language_model.model.layers.20.self_attn.v_proj",
162
+ "language_model.model.layers.8.mlp.down_proj",
163
+ "language_model.model.layers.11.self_attn.v_proj",
164
+ "language_model.model.layers.0.mlp.down_proj",
165
+ "language_model.model.layers.10.mlp.gate_proj",
166
+ "language_model.model.layers.16.self_attn.o_proj",
167
+ "language_model.model.layers.15.mlp.gate_proj",
168
+ "language_model.model.layers.24.mlp.up_proj",
169
+ "language_model.model.layers.11.mlp.up_proj",
170
+ "language_model.model.layers.15.self_attn.q_proj",
171
+ "language_model.model.layers.22.self_attn.k_proj",
172
+ "language_model.model.layers.19.mlp.up_proj",
173
+ "language_model.model.layers.18.mlp.gate_proj",
174
+ "language_model.model.layers.17.self_attn.o_proj",
175
+ "language_model.model.layers.29.self_attn.v_proj",
176
+ "language_model.model.layers.16.self_attn.v_proj",
177
+ "language_model.model.layers.14.self_attn.q_proj",
178
+ "projector.fc3",
179
+ "language_model.model.layers.26.self_attn.q_proj",
180
+ "language_model.model.layers.27.self_attn.q_proj",
181
+ "language_model.model.layers.28.self_attn.q_proj",
182
+ "language_model.model.layers.1.mlp.up_proj",
183
+ "language_model.model.layers.8.self_attn.q_proj",
184
+ "language_model.model.layers.15.self_attn.k_proj",
185
+ "language_model.model.layers.30.self_attn.o_proj",
186
+ "language_model.model.layers.22.self_attn.o_proj",
187
+ "language_model.model.layers.25.mlp.up_proj",
188
+ "language_model.model.layers.16.mlp.up_proj",
189
+ "language_model.model.layers.23.mlp.up_proj",
190
+ "language_model.model.layers.7.self_attn.q_proj",
191
+ "language_model.model.layers.19.mlp.down_proj",
192
+ "language_model.model.layers.28.self_attn.o_proj",
193
+ "language_model.model.layers.13.self_attn.q_proj",
194
+ "language_model.model.layers.10.mlp.up_proj",
195
+ "language_model.model.layers.26.mlp.up_proj",
196
+ "language_model.model.layers.7.self_attn.v_proj",
197
+ "language_model.model.layers.6.self_attn.q_proj",
198
+ "language_model.model.layers.15.self_attn.v_proj",
199
+ "language_model.model.layers.18.self_attn.q_proj",
200
+ "language_model.model.layers.4.self_attn.q_proj",
201
+ "language_model.model.layers.1.self_attn.o_proj",
202
+ "language_model.model.layers.27.mlp.down_proj",
203
+ "language_model.model.layers.6.self_attn.k_proj",
204
+ "language_model.model.layers.28.mlp.up_proj",
205
+ "language_model.model.layers.29.mlp.down_proj",
206
+ "language_model.model.layers.6.mlp.gate_proj",
207
+ "language_model.model.layers.1.self_attn.v_proj",
208
+ "language_model.model.layers.3.mlp.up_proj",
209
+ "language_model.model.layers.23.mlp.gate_proj",
210
+ "language_model.model.layers.12.mlp.down_proj",
211
+ "language_model.model.layers.3.mlp.gate_proj",
212
+ "language_model.model.layers.28.self_attn.k_proj",
213
+ "language_model.model.layers.3.mlp.down_proj",
214
+ "language_model.model.layers.3.self_attn.v_proj",
215
+ "language_model.model.layers.27.mlp.up_proj",
216
+ "language_model.model.layers.18.self_attn.o_proj",
217
+ "language_model.model.layers.4.self_attn.k_proj",
218
+ "language_model.model.layers.14.mlp.down_proj",
219
+ "language_model.model.layers.12.self_attn.v_proj",
220
+ "language_model.model.layers.31.self_attn.q_proj",
221
+ "language_model.model.layers.30.self_attn.v_proj",
222
+ "language_model.model.layers.6.mlp.up_proj",
223
+ "language_model.model.layers.22.mlp.gate_proj",
224
+ "language_model.model.layers.22.self_attn.v_proj",
225
+ "language_model.model.layers.25.self_attn.o_proj",
226
+ "language_model.model.layers.4.mlp.gate_proj",
227
+ "language_model.model.layers.15.self_attn.o_proj",
228
+ "language_model.model.layers.1.mlp.down_proj",
229
+ "language_model.model.layers.8.mlp.gate_proj",
230
+ "language_model.model.layers.14.self_attn.o_proj",
231
+ "language_model.model.layers.21.self_attn.k_proj",
232
+ "language_model.model.layers.9.mlp.down_proj",
233
+ "language_model.model.layers.10.self_attn.v_proj",
234
+ "language_model.model.layers.0.self_attn.o_proj",
235
+ "language_model.model.layers.24.self_attn.q_proj",
236
+ "language_model.model.layers.29.self_attn.q_proj",
237
+ "language_model.model.layers.8.self_attn.v_proj",
238
+ "language_model.model.layers.2.self_attn.v_proj",
239
+ "language_model.model.layers.6.mlp.down_proj",
240
+ "language_model.model.layers.13.mlp.gate_proj",
241
+ "language_model.model.layers.20.mlp.down_proj",
242
+ "language_model.model.layers.21.mlp.up_proj",
243
+ "language_model.model.layers.23.self_attn.o_proj",
244
+ "language_model.model.layers.21.self_attn.v_proj",
245
+ "language_model.model.layers.24.mlp.down_proj",
246
+ "language_model.model.layers.16.mlp.gate_proj",
247
+ "language_model.model.layers.6.self_attn.o_proj",
248
+ "language_model.model.layers.5.mlp.up_proj",
249
+ "language_model.model.layers.28.mlp.down_proj",
250
+ "language_model.model.layers.25.self_attn.k_proj",
251
+ "language_model.model.layers.2.mlp.down_proj",
252
+ "language_model.model.layers.17.mlp.up_proj",
253
+ "language_model.model.layers.19.self_attn.o_proj"
254
+ ],
255
+ "task_type": null,
256
+ "use_dora": false,
257
+ "use_rslora": false
258
+ }
orc_model/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5bc9ade7a25cee73354c4e6f2973c1b2e548b04094280428f3f58d1fb2a9eb4f
3
+ size 328592296
orc_model/added_tokens.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ {
2
+ "<PAD>": 32000
3
+ }
orc_model/dataset_statistics.json ADDED
@@ -0,0 +1,127 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "real_data": {
3
+ "action": {
4
+ "mean": [
5
+ -0.00021713033493142575,
6
+ 3.951489816245157e-06,
7
+ -6.244335236260667e-05,
8
+ 0.024315927177667618,
9
+ -0.024200621992349625,
10
+ 0.0001426006929250434,
11
+ -0.1762954145669937
12
+ ],
13
+ "std": [
14
+ 0.005905783269554377,
15
+ 0.010097788646817207,
16
+ 0.005760197062045336,
17
+ 0.9473578929901123,
18
+ 0.9439426064491272,
19
+ 0.028618143871426582,
20
+ 0.9843310117721558
21
+ ],
22
+ "max": [
23
+ 0.0263775996863842,
24
+ 0.02954130433499813,
25
+ 0.02553696744143963,
26
+ 4.978769779205322,
27
+ 5.3642754554748535,
28
+ 0.4048313796520233,
29
+ 1.0
30
+ ],
31
+ "min": [
32
+ -0.029638897627592087,
33
+ -0.029703686013817787,
34
+ -0.024002285674214363,
35
+ -4.92792272567749,
36
+ -5.274268627166748,
37
+ -0.44714170694351196,
38
+ -1.0
39
+ ],
40
+ "q01": [
41
+ -0.018638468496501446,
42
+ -0.0258210021071136,
43
+ -0.012519038049504161,
44
+ -4.446948285102844,
45
+ -4.433915729522705,
46
+ -0.08356364756822586,
47
+ -1.0
48
+ ],
49
+ "q99": [
50
+ 0.017138871438801287,
51
+ 0.025166765898466083,
52
+ 0.02048220705240963,
53
+ 4.4373928689956665,
54
+ 4.42347291469574,
55
+ 0.08423277527093823,
56
+ 1.0
57
+ ],
58
+ "mask": [
59
+ true,
60
+ true,
61
+ true,
62
+ true,
63
+ true,
64
+ true,
65
+ false
66
+ ]
67
+ },
68
+ "proprio": {
69
+ "mean": [
70
+ 0.0,
71
+ 0.0,
72
+ 0.0,
73
+ 0.0,
74
+ 0.0,
75
+ 0.0,
76
+ 0.0
77
+ ],
78
+ "std": [
79
+ 0.0,
80
+ 0.0,
81
+ 0.0,
82
+ 0.0,
83
+ 0.0,
84
+ 0.0,
85
+ 0.0
86
+ ],
87
+ "max": [
88
+ 0.0,
89
+ 0.0,
90
+ 0.0,
91
+ 0.0,
92
+ 0.0,
93
+ 0.0,
94
+ 0.0
95
+ ],
96
+ "min": [
97
+ 0.0,
98
+ 0.0,
99
+ 0.0,
100
+ 0.0,
101
+ 0.0,
102
+ 0.0,
103
+ 0.0
104
+ ],
105
+ "q01": [
106
+ 0.0,
107
+ 0.0,
108
+ 0.0,
109
+ 0.0,
110
+ 0.0,
111
+ 0.0,
112
+ 0.0
113
+ ],
114
+ "q99": [
115
+ 0.0,
116
+ 0.0,
117
+ 0.0,
118
+ 0.0,
119
+ 0.0,
120
+ 0.0,
121
+ 0.0
122
+ ]
123
+ },
124
+ "num_transitions": 20148,
125
+ "num_trajectories": 150
126
+ }
127
+ }
orc_model/preprocessor_config.json ADDED
@@ -0,0 +1,114 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "auto_map": {
3
+ "AutoImageProcessor": "processing_prismatic.PrismaticImageProcessor",
4
+ "AutoProcessor": "processing_prismatic.PrismaticProcessor"
5
+ },
6
+ "image_processor_type": "PrismaticImageProcessor",
7
+ "image_resize_strategy": "resize-naive",
8
+ "input_sizes": [
9
+ [
10
+ 3,
11
+ 224,
12
+ 224
13
+ ],
14
+ [
15
+ 3,
16
+ 224,
17
+ 224
18
+ ]
19
+ ],
20
+ "interpolations": [
21
+ "bicubic",
22
+ "bicubic"
23
+ ],
24
+ "means": [
25
+ [
26
+ 0.485,
27
+ 0.456,
28
+ 0.406
29
+ ],
30
+ [
31
+ 0.5,
32
+ 0.5,
33
+ 0.5
34
+ ]
35
+ ],
36
+ "processor_class": "PrismaticProcessor",
37
+ "stds": [
38
+ [
39
+ 0.229,
40
+ 0.224,
41
+ 0.225
42
+ ],
43
+ [
44
+ 0.5,
45
+ 0.5,
46
+ 0.5
47
+ ]
48
+ ],
49
+ "tvf_crop_params": [
50
+ {
51
+ "output_size": [
52
+ 224,
53
+ 224
54
+ ]
55
+ },
56
+ {
57
+ "output_size": [
58
+ 224,
59
+ 224
60
+ ]
61
+ }
62
+ ],
63
+ "tvf_do_letterbox": false,
64
+ "tvf_letterbox_fill": null,
65
+ "tvf_normalize_params": [
66
+ {
67
+ "inplace": false,
68
+ "mean": [
69
+ 0.484375,
70
+ 0.455078125,
71
+ 0.40625
72
+ ],
73
+ "std": [
74
+ 0.228515625,
75
+ 0.2236328125,
76
+ 0.224609375
77
+ ]
78
+ },
79
+ {
80
+ "inplace": false,
81
+ "mean": [
82
+ 0.5,
83
+ 0.5,
84
+ 0.5
85
+ ],
86
+ "std": [
87
+ 0.5,
88
+ 0.5,
89
+ 0.5
90
+ ]
91
+ }
92
+ ],
93
+ "tvf_resize_params": [
94
+ {
95
+ "antialias": true,
96
+ "interpolation": 3,
97
+ "max_size": null,
98
+ "size": [
99
+ 224,
100
+ 224
101
+ ]
102
+ },
103
+ {
104
+ "antialias": true,
105
+ "interpolation": 3,
106
+ "max_size": null,
107
+ "size": [
108
+ 224,
109
+ 224
110
+ ]
111
+ }
112
+ ],
113
+ "use_fused_vision_backbone": true
114
+ }
orc_model/pretrained_slots-w-filters_s16.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4fb915260b6c164b68b2abb642558980f5eaa6b8178613d089f88067b4234a5b
3
+ size 146434388
orc_model/relate_object_bboxes_w_mask_actionable_s16h1.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e3a25bf18137f9021d5244628d54464dabc08e44d278ceb8cc26f26e415d9107
3
+ size 1002856432
orc_model/special_tokens_map.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "</s>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "<PAD>",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "unk_token": {
24
+ "content": "<unk>",
25
+ "lstrip": false,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ }
30
+ }
orc_model/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
orc_model/tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
3
+ size 499723
orc_model/tokenizer_config.json ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
+ "added_tokens_decoder": {
5
+ "0": {
6
+ "content": "<unk>",
7
+ "lstrip": false,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "1": {
14
+ "content": "<s>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "2": {
22
+ "content": "</s>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": true
28
+ },
29
+ "32000": {
30
+ "content": "<PAD>",
31
+ "lstrip": false,
32
+ "normalized": false,
33
+ "rstrip": false,
34
+ "single_word": false,
35
+ "special": true
36
+ }
37
+ },
38
+ "auto_map": {
39
+ "AutoProcessor": "processing_prismatic.PrismaticProcessor"
40
+ },
41
+ "bos_token": "<s>",
42
+ "clean_up_tokenization_spaces": false,
43
+ "eos_token": "</s>",
44
+ "legacy": false,
45
+ "model_max_length": 2048,
46
+ "pad_token": "<PAD>",
47
+ "padding_side": "right",
48
+ "processor_class": "PrismaticProcessor",
49
+ "sp_model_kwargs": {},
50
+ "tokenizer_class": "LlamaTokenizer",
51
+ "unk_token": "<unk>",
52
+ "use_default_system_prompt": false
53
+ }
output_hf_model_openx/.gitattributes ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
output_hf_model_openx/README.md ADDED
@@ -0,0 +1,99 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: transformers
3
+ tags:
4
+ - robotics
5
+ - vla
6
+ - image-text-to-text
7
+ - multimodal
8
+ - pretraining
9
+ license: mit
10
+ language:
11
+ - en
12
+ pipeline_tag: image-text-to-text
13
+ ---
14
+
15
+ # OpenVLA 7B
16
+
17
+ OpenVLA 7B (`openvla-7b`) is an open vision-language-action model trained on 970K robot manipulation episodes from the [Open X-Embodiment](https://robotics-transformer-x.github.io/) dataset.
18
+ The model takes language instructions and camera images as input and generates robot actions. It supports controlling multiple robots out-of-the-box, and can be quickly adapted for new robot domains via (parameter-efficient) fine-tuning.
19
+
20
+ All OpenVLA checkpoints, as well as our [training codebase](https://github.com/openvla/openvla) are released under an MIT License.
21
+
22
+ For full details, please read [our paper](https://arxiv.org/abs/2406.09246) and see [our project page](https://openvla.github.io/).
23
+
24
+ ## Model Summary
25
+
26
+ - **Developed by:** The OpenVLA team consisting of researchers from Stanford, UC Berkeley, Google Deepmind, and the Toyota Research Institute.
27
+ - **Model type:** Vision-language-action (language, image => robot actions)
28
+ - **Language(s) (NLP):** en
29
+ - **License:** MIT
30
+ - **Finetuned from:** [`prism-dinosiglip-224px`](https://github.com/TRI-ML/prismatic-vlms), a VLM trained from:
31
+ + **Vision Backbone**: DINOv2 ViT-L/14 and SigLIP ViT-So400M/14
32
+ + **Language Model**: Llama-2
33
+ - **Pretraining Dataset:** [Open X-Embodiment](https://robotics-transformer-x.github.io/) -- specific component datasets can be found [here](https://github.com/openvla/openvla).
34
+ - **Repository:** [https://github.com/openvla/openvla](https://github.com/openvla/openvla)
35
+ - **Paper:** [OpenVLA: An Open-Source Vision-Language-Action Model](https://arxiv.org/abs/2406.09246)
36
+ - **Project Page & Videos:** [https://openvla.github.io/](https://openvla.github.io/)
37
+
38
+ ## Uses
39
+
40
+ OpenVLA models take a language instruction and a camera image of a robot workspace as input, and predict (normalized) robot actions consisting of 7-DoF end-effector deltas
41
+ of the form (x, y, z, roll, pitch, yaw, gripper). To execute on an actual robot platform, actions need to be *un-normalized* subject to statistics computed on a per-robot,
42
+ per-dataset basis. See [our repository](https://github.com/openvla/openvla) for more information.
43
+
44
+ OpenVLA models can be used zero-shot to control robots for specific combinations of embodiments and domains seen in the Open-X pretraining mixture (e.g., for
45
+ [BridgeV2 environments with a Widow-X robot](https://rail-berkeley.github.io/bridgedata/)). They can also be efficiently *fine-tuned* for new tasks and robot setups
46
+ given minimal demonstration data; [see here](https://github.com/openvla/openvla/blob/main/scripts/finetune.py).
47
+
48
+ **Out-of-Scope:** OpenVLA models do not zero-shot generalize to new (unseen) robot embodiments, or setups that are not represented in the pretraining mix; in these cases,
49
+ we suggest collecting a dataset of demonstrations on the desired setup, and fine-tuning OpenVLA models instead.
50
+
51
+ ## Getting Started
52
+
53
+ OpenVLA 7B can be used to control multiple robots for domains represented in the pretraining mixture out-of-the-box. For example,
54
+ here is an example for loading `openvla-7b` for zero-shot instruction following in the [BridgeV2 environments] with a Widow-X robot:
55
+
56
+ ```python
57
+ # Install minimal dependencies (`torch`, `transformers`, `timm`, `tokenizers`, ...)
58
+ # > pip install -r https://raw.githubusercontent.com/openvla/openvla/main/requirements-min.txt
59
+ from transformers import AutoModelForVision2Seq, AutoProcessor
60
+ from PIL import Image
61
+
62
+ import torch
63
+
64
+ # Load Processor & VLA
65
+ processor = AutoProcessor.from_pretrained("openvla/openvla-7b", trust_remote_code=True)
66
+ vla = AutoModelForVision2Seq.from_pretrained(
67
+ "openvla/openvla-7b",
68
+ attn_implementation="flash_attention_2", # [Optional] Requires `flash_attn`
69
+ torch_dtype=torch.bfloat16,
70
+ low_cpu_mem_usage=True,
71
+ trust_remote_code=True
72
+ ).to("cuda:0")
73
+
74
+ # Grab image input & format prompt
75
+ image: Image.Image = get_from_camera(...)
76
+ prompt = "In: What action should the robot take to {<INSTRUCTION>}?\nOut:"
77
+
78
+ # Predict Action (7-DoF; un-normalize for BridgeV2)
79
+ inputs = processor(prompt, image).to("cuda:0", dtype=torch.bfloat16)
80
+ action = vla.predict_action(**inputs, unnorm_key="bridge_orig", do_sample=False)
81
+
82
+ # Execute...
83
+ robot.act(action, ...)
84
+ ```
85
+
86
+ For more examples, including scripts for fine-tuning OpenVLA models on your own robot demonstration datasets, see [our training repository](https://github.com/openvla/openvla).
87
+
88
+ ## Citation
89
+
90
+ **BibTeX:**
91
+
92
+ ```bibtex
93
+ @article{kim24openvla,
94
+ title={OpenVLA: An Open-Source Vision-Language-Action Model},
95
+ author={{Moo Jin} Kim and Karl Pertsch and Siddharth Karamcheti and Ted Xiao and Ashwin Balakrishna and Suraj Nair and Rafael Rafailov and Ethan Foster and Grace Lam and Pannag Sanketi and Quan Vuong and Thomas Kollar and Benjamin Burchfiel and Russ Tedrake and Dorsa Sadigh and Sergey Levine and Percy Liang and Chelsea Finn},
96
+ journal = {arXiv preprint arXiv:2406.09246},
97
+ year={2024}
98
+ }
99
+ ```
output_hf_model_openx/added_tokens.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ {
2
+ "<PAD>": 32000
3
+ }
output_hf_model_openx/config.json ADDED
@@ -0,0 +1,3168 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "arch_specifier": "no-align+fused-gelu-mlp",
3
+ "architectures": [
4
+ "OpenVLAForActionPrediction"
5
+ ],
6
+ "auto_map": {
7
+ "AutoConfig": "configuration_prismatic.OpenVLAConfig",
8
+ "AutoModelForVision2Seq": "modeling_prismatic.OpenVLAForActionPrediction"
9
+ },
10
+ "hf_llm_id": "meta-llama/Llama-2-7b-hf",
11
+ "image_resize_strategy": "resize-naive",
12
+ "image_sizes": [
13
+ 224,
14
+ 224
15
+ ],
16
+ "llm_backbone_id": "llama2-7b-pure",
17
+ "llm_max_length": 2048,
18
+ "model_type": "openvla",
19
+ "n_action_bins": 256,
20
+ "norm_stats": {
21
+ "austin_buds_dataset_converted_externally_to_rlds": {
22
+ "action": {
23
+ "mask": [
24
+ true,
25
+ true,
26
+ true,
27
+ true,
28
+ true,
29
+ true,
30
+ false
31
+ ],
32
+ "max": [
33
+ 1.0,
34
+ 1.0,
35
+ 1.0,
36
+ 0.0,
37
+ 0.0,
38
+ 0.0,
39
+ 1.0
40
+ ],
41
+ "mean": [
42
+ -0.07678354531526566,
43
+ 0.0036849044263362885,
44
+ 0.05644911900162697,
45
+ 0.0,
46
+ 0.0,
47
+ 0.0,
48
+ 0.3510494828224182
49
+ ],
50
+ "min": [
51
+ -1.0,
52
+ -1.0,
53
+ -1.0,
54
+ 0.0,
55
+ 0.0,
56
+ 0.0,
57
+ 0.0
58
+ ],
59
+ "q01": [
60
+ -1.0,
61
+ -0.9599999785423279,
62
+ -0.8714285492897034,
63
+ 0.0,
64
+ 0.0,
65
+ 0.0,
66
+ 0.0
67
+ ],
68
+ "q99": [
69
+ 1.0,
70
+ 0.8600000143051147,
71
+ 1.0,
72
+ 0.0,
73
+ 0.0,
74
+ 0.0,
75
+ 1.0
76
+ ],
77
+ "std": [
78
+ 0.6367740631103516,
79
+ 0.37889179587364197,
80
+ 0.47796326875686646,
81
+ 0.0,
82
+ 0.0,
83
+ 0.0,
84
+ 0.47721168398857117
85
+ ]
86
+ },
87
+ "num_trajectories": 50,
88
+ "num_transitions": 34112,
89
+ "proprio": {
90
+ "max": [
91
+ 0.0,
92
+ 0.0,
93
+ 0.0,
94
+ 0.0,
95
+ 0.0,
96
+ 0.0,
97
+ 0.0
98
+ ],
99
+ "mean": [
100
+ 0.0,
101
+ 0.0,
102
+ 0.0,
103
+ 0.0,
104
+ 0.0,
105
+ 0.0,
106
+ 0.0
107
+ ],
108
+ "min": [
109
+ 0.0,
110
+ 0.0,
111
+ 0.0,
112
+ 0.0,
113
+ 0.0,
114
+ 0.0,
115
+ 0.0
116
+ ],
117
+ "q01": [
118
+ 0.0,
119
+ 0.0,
120
+ 0.0,
121
+ 0.0,
122
+ 0.0,
123
+ 0.0,
124
+ 0.0
125
+ ],
126
+ "q99": [
127
+ 0.0,
128
+ 0.0,
129
+ 0.0,
130
+ 0.0,
131
+ 0.0,
132
+ 0.0,
133
+ 0.0
134
+ ],
135
+ "std": [
136
+ 0.0,
137
+ 0.0,
138
+ 0.0,
139
+ 0.0,
140
+ 0.0,
141
+ 0.0,
142
+ 0.0
143
+ ]
144
+ }
145
+ },
146
+ "austin_sailor_dataset_converted_externally_to_rlds": {
147
+ "action": {
148
+ "mask": [
149
+ true,
150
+ true,
151
+ true,
152
+ true,
153
+ true,
154
+ true,
155
+ false
156
+ ],
157
+ "max": [
158
+ 1.0,
159
+ 1.0,
160
+ 1.0,
161
+ 0.0,
162
+ 0.0,
163
+ 0.375,
164
+ 1.0
165
+ ],
166
+ "mean": [
167
+ 0.011825348250567913,
168
+ 0.006461074110120535,
169
+ 0.06023626774549484,
170
+ 0.0,
171
+ 0.0,
172
+ 0.0016465914668515325,
173
+ 0.5260950326919556
174
+ ],
175
+ "min": [
176
+ -1.0,
177
+ -1.0,
178
+ -1.0,
179
+ 0.0,
180
+ 0.0,
181
+ -0.375,
182
+ 0.0
183
+ ],
184
+ "q01": [
185
+ -1.0,
186
+ -0.9828571677207947,
187
+ -0.6000000238418579,
188
+ 0.0,
189
+ 0.0,
190
+ -0.17249999940395355,
191
+ 0.0
192
+ ],
193
+ "q99": [
194
+ 1.0,
195
+ 0.9457142949104309,
196
+ 1.0,
197
+ 0.0,
198
+ 0.0,
199
+ 0.17892856895923615,
200
+ 1.0
201
+ ],
202
+ "std": [
203
+ 0.46348899602890015,
204
+ 0.41240179538726807,
205
+ 0.411862850189209,
206
+ 0.0,
207
+ 0.0,
208
+ 0.0578610822558403,
209
+ 0.49894046783447266
210
+ ]
211
+ },
212
+ "num_trajectories": 240,
213
+ "num_transitions": 353094,
214
+ "proprio": {
215
+ "max": [
216
+ 0.0,
217
+ 0.0,
218
+ 0.0,
219
+ 0.0,
220
+ 0.0,
221
+ 0.0,
222
+ 0.0
223
+ ],
224
+ "mean": [
225
+ 0.0,
226
+ 0.0,
227
+ 0.0,
228
+ 0.0,
229
+ 0.0,
230
+ 0.0,
231
+ 0.0
232
+ ],
233
+ "min": [
234
+ 0.0,
235
+ 0.0,
236
+ 0.0,
237
+ 0.0,
238
+ 0.0,
239
+ 0.0,
240
+ 0.0
241
+ ],
242
+ "q01": [
243
+ 0.0,
244
+ 0.0,
245
+ 0.0,
246
+ 0.0,
247
+ 0.0,
248
+ 0.0,
249
+ 0.0
250
+ ],
251
+ "q99": [
252
+ 0.0,
253
+ 0.0,
254
+ 0.0,
255
+ 0.0,
256
+ 0.0,
257
+ 0.0,
258
+ 0.0
259
+ ],
260
+ "std": [
261
+ 0.0,
262
+ 0.0,
263
+ 0.0,
264
+ 0.0,
265
+ 0.0,
266
+ 0.0,
267
+ 0.0
268
+ ]
269
+ }
270
+ },
271
+ "austin_sirius_dataset_converted_externally_to_rlds": {
272
+ "action": {
273
+ "mask": [
274
+ true,
275
+ true,
276
+ true,
277
+ true,
278
+ true,
279
+ true,
280
+ false
281
+ ],
282
+ "max": [
283
+ 1.0002285242080688,
284
+ 0.960608720779419,
285
+ 1.105179786682129,
286
+ 0.0,
287
+ 0.0,
288
+ 0.341785728931427,
289
+ 1.0
290
+ ],
291
+ "mean": [
292
+ 0.07747682929039001,
293
+ 0.03195561468601227,
294
+ 0.04244732856750488,
295
+ 0.0,
296
+ 0.0,
297
+ -0.01603456400334835,
298
+ 0.43260177969932556
299
+ ],
300
+ "min": [
301
+ -1.0183025598526,
302
+ -0.9800000190734863,
303
+ -0.9774575233459473,
304
+ 0.0,
305
+ 0.0,
306
+ -0.34607142210006714,
307
+ 0.0
308
+ ],
309
+ "q01": [
310
+ -0.780905865430832,
311
+ -0.5667179036140442,
312
+ -0.5254343223571777,
313
+ 0.0,
314
+ 0.0,
315
+ -0.28495091378688814,
316
+ 0.0
317
+ ],
318
+ "q99": [
319
+ 0.9569637751579284,
320
+ 0.6971374487876891,
321
+ 0.8124888157844541,
322
+ 0.0,
323
+ 0.0,
324
+ 0.1971428543329239,
325
+ 1.0
326
+ ],
327
+ "std": [
328
+ 0.3906329572200775,
329
+ 0.2998155355453491,
330
+ 0.2782271206378937,
331
+ 0.0,
332
+ 0.0,
333
+ 0.08120622485876083,
334
+ 0.49528297781944275
335
+ ]
336
+ },
337
+ "num_trajectories": 559,
338
+ "num_transitions": 279939,
339
+ "proprio": {
340
+ "max": [
341
+ 0.0,
342
+ 0.0,
343
+ 0.0,
344
+ 0.0,
345
+ 0.0,
346
+ 0.0,
347
+ 0.0
348
+ ],
349
+ "mean": [
350
+ 0.0,
351
+ 0.0,
352
+ 0.0,
353
+ 0.0,
354
+ 0.0,
355
+ 0.0,
356
+ 0.0
357
+ ],
358
+ "min": [
359
+ 0.0,
360
+ 0.0,
361
+ 0.0,
362
+ 0.0,
363
+ 0.0,
364
+ 0.0,
365
+ 0.0
366
+ ],
367
+ "q01": [
368
+ 0.0,
369
+ 0.0,
370
+ 0.0,
371
+ 0.0,
372
+ 0.0,
373
+ 0.0,
374
+ 0.0
375
+ ],
376
+ "q99": [
377
+ 0.0,
378
+ 0.0,
379
+ 0.0,
380
+ 0.0,
381
+ 0.0,
382
+ 0.0,
383
+ 0.0
384
+ ],
385
+ "std": [
386
+ 0.0,
387
+ 0.0,
388
+ 0.0,
389
+ 0.0,
390
+ 0.0,
391
+ 0.0,
392
+ 0.0
393
+ ]
394
+ }
395
+ },
396
+ "bc_z": {
397
+ "action": {
398
+ "mask": [
399
+ true,
400
+ true,
401
+ true,
402
+ true,
403
+ true,
404
+ true,
405
+ false
406
+ ],
407
+ "max": [
408
+ 0.2165454924106598,
409
+ 0.1251407265663147,
410
+ 0.10772687941789627,
411
+ 0.33544227480888367,
412
+ 0.28117990493774414,
413
+ 0.40614867210388184,
414
+ 1.0
415
+ ],
416
+ "mean": [
417
+ -0.009958467446267605,
418
+ 0.0008958321413956583,
419
+ 0.004995597992092371,
420
+ 0.00029755113064311445,
421
+ -0.008735382929444313,
422
+ -0.030693737789988518,
423
+ 0.8344562649726868
424
+ ],
425
+ "min": [
426
+ -0.1677047461271286,
427
+ -0.14630407094955444,
428
+ -0.10066790133714676,
429
+ -0.29421567916870117,
430
+ -0.32101404666900635,
431
+ -0.4635624885559082,
432
+ 0.0
433
+ ],
434
+ "q01": [
435
+ -0.09220654994249344,
436
+ -0.06456145539879798,
437
+ -0.049121275544166565,
438
+ -0.11594625547528267,
439
+ -0.14152548640966414,
440
+ -0.2251061636209488,
441
+ 0.0
442
+ ],
443
+ "q99": [
444
+ 0.07628866866230968,
445
+ 0.058019736707210584,
446
+ 0.052540797740221024,
447
+ 0.11740604028105736,
448
+ 0.11703975558280955,
449
+ 0.16729306846857078,
450
+ 1.0
451
+ ],
452
+ "std": [
453
+ 0.03053455986082554,
454
+ 0.0231423731893301,
455
+ 0.020641816779971123,
456
+ 0.04155943542718887,
457
+ 0.046427831053733826,
458
+ 0.0769818127155304,
459
+ 0.3610210120677948
460
+ ]
461
+ },
462
+ "num_trajectories": 43264,
463
+ "num_transitions": 6015535,
464
+ "proprio": {
465
+ "max": [
466
+ 0.0,
467
+ 0.0,
468
+ 0.0,
469
+ 0.0,
470
+ 0.0,
471
+ 0.0,
472
+ 0.0
473
+ ],
474
+ "mean": [
475
+ 0.0,
476
+ 0.0,
477
+ 0.0,
478
+ 0.0,
479
+ 0.0,
480
+ 0.0,
481
+ 0.0
482
+ ],
483
+ "min": [
484
+ 0.0,
485
+ 0.0,
486
+ 0.0,
487
+ 0.0,
488
+ 0.0,
489
+ 0.0,
490
+ 0.0
491
+ ],
492
+ "q01": [
493
+ 0.0,
494
+ 0.0,
495
+ 0.0,
496
+ 0.0,
497
+ 0.0,
498
+ 0.0,
499
+ 0.0
500
+ ],
501
+ "q99": [
502
+ 0.0,
503
+ 0.0,
504
+ 0.0,
505
+ 0.0,
506
+ 0.0,
507
+ 0.0,
508
+ 0.0
509
+ ],
510
+ "std": [
511
+ 0.0,
512
+ 0.0,
513
+ 0.0,
514
+ 0.0,
515
+ 0.0,
516
+ 0.0,
517
+ 0.0
518
+ ]
519
+ }
520
+ },
521
+ "berkeley_autolab_ur5": {
522
+ "action": {
523
+ "mask": [
524
+ true,
525
+ true,
526
+ true,
527
+ true,
528
+ true,
529
+ true,
530
+ false
531
+ ],
532
+ "max": [
533
+ 0.019999999552965164,
534
+ 0.019999999552965164,
535
+ 0.019999999552965164,
536
+ 0.06666667014360428,
537
+ 0.06666667014360428,
538
+ 0.06666667014360428,
539
+ 1.0
540
+ ],
541
+ "mean": [
542
+ 0.0005683620693162084,
543
+ 0.001217700308188796,
544
+ -0.0005296372692100704,
545
+ 0.00021029810886830091,
546
+ 6.0695128922816366e-05,
547
+ 0.001204986940138042,
548
+ 0.6298308372497559
549
+ ],
550
+ "min": [
551
+ -0.019999999552965164,
552
+ -0.019999999552965164,
553
+ -0.019999999552965164,
554
+ -0.06666667014360428,
555
+ -0.06666667014360428,
556
+ -0.06666667014360428,
557
+ 0.0
558
+ ],
559
+ "q01": [
560
+ -0.019999999552965164,
561
+ -0.019999999552965164,
562
+ -0.019999999552965164,
563
+ -0.02628571353852749,
564
+ -0.06666667014360428,
565
+ -0.03847619146108627,
566
+ 0.0
567
+ ],
568
+ "q99": [
569
+ 0.019999999552965164,
570
+ 0.019999999552965164,
571
+ 0.019999999552965164,
572
+ 0.031809523701667786,
573
+ 0.06666667014360428,
574
+ 0.036571428179740906,
575
+ 1.0
576
+ ],
577
+ "std": [
578
+ 0.0115329809486866,
579
+ 0.007990492507815361,
580
+ 0.009577835910022259,
581
+ 0.009432995691895485,
582
+ 0.016427582129836082,
583
+ 0.011053967289626598,
584
+ 0.48267969489097595
585
+ ]
586
+ },
587
+ "num_trajectories": 1000,
588
+ "num_transitions": 97939,
589
+ "proprio": {
590
+ "max": [
591
+ 0.0,
592
+ 0.0,
593
+ 0.0,
594
+ 0.0,
595
+ 0.0,
596
+ 0.0,
597
+ 0.0
598
+ ],
599
+ "mean": [
600
+ 0.0,
601
+ 0.0,
602
+ 0.0,
603
+ 0.0,
604
+ 0.0,
605
+ 0.0,
606
+ 0.0
607
+ ],
608
+ "min": [
609
+ 0.0,
610
+ 0.0,
611
+ 0.0,
612
+ 0.0,
613
+ 0.0,
614
+ 0.0,
615
+ 0.0
616
+ ],
617
+ "q01": [
618
+ 0.0,
619
+ 0.0,
620
+ 0.0,
621
+ 0.0,
622
+ 0.0,
623
+ 0.0,
624
+ 0.0
625
+ ],
626
+ "q99": [
627
+ 0.0,
628
+ 0.0,
629
+ 0.0,
630
+ 0.0,
631
+ 0.0,
632
+ 0.0,
633
+ 0.0
634
+ ],
635
+ "std": [
636
+ 0.0,
637
+ 0.0,
638
+ 0.0,
639
+ 0.0,
640
+ 0.0,
641
+ 0.0,
642
+ 0.0
643
+ ]
644
+ }
645
+ },
646
+ "berkeley_cable_routing": {
647
+ "action": {
648
+ "mask": [
649
+ true,
650
+ true,
651
+ true,
652
+ true,
653
+ true,
654
+ true,
655
+ false
656
+ ],
657
+ "max": [
658
+ 0.9633283019065857,
659
+ 1.0,
660
+ 1.0,
661
+ 0.0,
662
+ 0.0,
663
+ 1.0,
664
+ 0.0
665
+ ],
666
+ "mean": [
667
+ -0.07139874249696732,
668
+ 0.023609008640050888,
669
+ 0.10241943597793579,
670
+ 0.0,
671
+ 0.0,
672
+ 0.049671024084091187,
673
+ 0.0
674
+ ],
675
+ "min": [
676
+ -0.9809081554412842,
677
+ -0.9554349184036255,
678
+ -0.9994775056838989,
679
+ 0.0,
680
+ 0.0,
681
+ -1.0,
682
+ 0.0
683
+ ],
684
+ "q01": [
685
+ -0.5534318816661835,
686
+ -0.4797285574674606,
687
+ -0.5314934802055359,
688
+ 0.0,
689
+ 0.0,
690
+ -0.8855219376087189,
691
+ 0.0
692
+ ],
693
+ "q99": [
694
+ 0.42652835428714786,
695
+ 0.5000944086909298,
696
+ 0.639823433756829,
697
+ 0.0,
698
+ 0.0,
699
+ 0.984243879914284,
700
+ 0.0
701
+ ],
702
+ "std": [
703
+ 0.1815500408411026,
704
+ 0.1810990273952484,
705
+ 0.21220779418945312,
706
+ 0.0,
707
+ 0.0,
708
+ 0.3475511968135834,
709
+ 0.0
710
+ ]
711
+ },
712
+ "num_trajectories": 1647,
713
+ "num_transitions": 42328,
714
+ "proprio": {
715
+ "max": [
716
+ 0.0,
717
+ 0.0,
718
+ 0.0,
719
+ 0.0,
720
+ 0.0,
721
+ 0.0,
722
+ 0.0
723
+ ],
724
+ "mean": [
725
+ 0.0,
726
+ 0.0,
727
+ 0.0,
728
+ 0.0,
729
+ 0.0,
730
+ 0.0,
731
+ 0.0
732
+ ],
733
+ "min": [
734
+ 0.0,
735
+ 0.0,
736
+ 0.0,
737
+ 0.0,
738
+ 0.0,
739
+ 0.0,
740
+ 0.0
741
+ ],
742
+ "q01": [
743
+ 0.0,
744
+ 0.0,
745
+ 0.0,
746
+ 0.0,
747
+ 0.0,
748
+ 0.0,
749
+ 0.0
750
+ ],
751
+ "q99": [
752
+ 0.0,
753
+ 0.0,
754
+ 0.0,
755
+ 0.0,
756
+ 0.0,
757
+ 0.0,
758
+ 0.0
759
+ ],
760
+ "std": [
761
+ 0.0,
762
+ 0.0,
763
+ 0.0,
764
+ 0.0,
765
+ 0.0,
766
+ 0.0,
767
+ 0.0
768
+ ]
769
+ }
770
+ },
771
+ "berkeley_fanuc_manipulation": {
772
+ "action": {
773
+ "mask": [
774
+ true,
775
+ true,
776
+ true,
777
+ true,
778
+ true,
779
+ true,
780
+ false
781
+ ],
782
+ "max": [
783
+ 0.009999999776482582,
784
+ 0.009999999776482582,
785
+ 0.009999999776482582,
786
+ 0.03490658476948738,
787
+ 0.03490658476948738,
788
+ 0.03490658476948738,
789
+ 1.0
790
+ ],
791
+ "mean": [
792
+ 0.0007744057802483439,
793
+ -0.00031240080716088414,
794
+ -0.0015001941937953234,
795
+ -0.0007515158504247665,
796
+ -0.00015832878125365824,
797
+ 0.00014327642566058785,
798
+ 0.699295699596405
799
+ ],
800
+ "min": [
801
+ -0.009999999776482582,
802
+ -0.009999999776482582,
803
+ -0.009999999776482582,
804
+ -0.03490658476948738,
805
+ -0.03490658476948738,
806
+ -0.03490658476948738,
807
+ 0.0
808
+ ],
809
+ "q01": [
810
+ -0.009999999776482582,
811
+ -0.009999999776482582,
812
+ -0.009999999776482582,
813
+ -0.03490658476948738,
814
+ 0.0,
815
+ -0.03490658476948738,
816
+ 0.0
817
+ ],
818
+ "q99": [
819
+ 0.009999999776482582,
820
+ 0.009999999776482582,
821
+ 0.009999999776482582,
822
+ 0.03490658476948738,
823
+ 0.0,
824
+ 0.03490658476948738,
825
+ 1.0
826
+ ],
827
+ "std": [
828
+ 0.0034070091787725687,
829
+ 0.0049921851605176926,
830
+ 0.005344334989786148,
831
+ 0.00759894959628582,
832
+ 0.004081866703927517,
833
+ 0.008568956516683102,
834
+ 0.4586937427520752
835
+ ]
836
+ },
837
+ "num_trajectories": 415,
838
+ "num_transitions": 62613,
839
+ "proprio": {
840
+ "max": [
841
+ 0.0,
842
+ 0.0,
843
+ 0.0,
844
+ 0.0,
845
+ 0.0,
846
+ 0.0,
847
+ 0.0
848
+ ],
849
+ "mean": [
850
+ 0.0,
851
+ 0.0,
852
+ 0.0,
853
+ 0.0,
854
+ 0.0,
855
+ 0.0,
856
+ 0.0
857
+ ],
858
+ "min": [
859
+ 0.0,
860
+ 0.0,
861
+ 0.0,
862
+ 0.0,
863
+ 0.0,
864
+ 0.0,
865
+ 0.0
866
+ ],
867
+ "q01": [
868
+ 0.0,
869
+ 0.0,
870
+ 0.0,
871
+ 0.0,
872
+ 0.0,
873
+ 0.0,
874
+ 0.0
875
+ ],
876
+ "q99": [
877
+ 0.0,
878
+ 0.0,
879
+ 0.0,
880
+ 0.0,
881
+ 0.0,
882
+ 0.0,
883
+ 0.0
884
+ ],
885
+ "std": [
886
+ 0.0,
887
+ 0.0,
888
+ 0.0,
889
+ 0.0,
890
+ 0.0,
891
+ 0.0,
892
+ 0.0
893
+ ]
894
+ }
895
+ },
896
+ "bridge_orig": {
897
+ "action": {
898
+ "mask": [
899
+ true,
900
+ true,
901
+ true,
902
+ true,
903
+ true,
904
+ true,
905
+ false
906
+ ],
907
+ "max": [
908
+ 0.41691166162490845,
909
+ 0.25864794850349426,
910
+ 0.21218234300613403,
911
+ 3.122201919555664,
912
+ 1.8618112802505493,
913
+ 6.280478477478027,
914
+ 1.0
915
+ ],
916
+ "mean": [
917
+ 0.0002334194869035855,
918
+ 0.00013004911306779832,
919
+ -0.00012762474943883717,
920
+ -0.0001556558854645118,
921
+ -0.0004039328487124294,
922
+ 0.00023557482927571982,
923
+ 0.5764579176902771
924
+ ],
925
+ "min": [
926
+ -0.4007510244846344,
927
+ -0.13874775171279907,
928
+ -0.22553899884223938,
929
+ -3.2010786533355713,
930
+ -1.8618112802505493,
931
+ -6.279075622558594,
932
+ 0.0
933
+ ],
934
+ "q01": [
935
+ -0.02872725307941437,
936
+ -0.04170349963009357,
937
+ -0.026093858778476715,
938
+ -0.08092105075716972,
939
+ -0.09288699507713317,
940
+ -0.20718276381492615,
941
+ 0.0
942
+ ],
943
+ "q99": [
944
+ 0.028309678435325586,
945
+ 0.040855254605412394,
946
+ 0.040161586627364146,
947
+ 0.08192047759890528,
948
+ 0.07792850524187081,
949
+ 0.20382574498653397,
950
+ 1.0
951
+ ],
952
+ "std": [
953
+ 0.009765930473804474,
954
+ 0.013689135201275349,
955
+ 0.012667362578213215,
956
+ 0.028534092009067535,
957
+ 0.030637972056865692,
958
+ 0.07691419124603271,
959
+ 0.4973701536655426
960
+ ]
961
+ },
962
+ "num_trajectories": 60064,
963
+ "num_transitions": 2135463,
964
+ "proprio": {
965
+ "max": [
966
+ 0.0,
967
+ 0.0,
968
+ 0.0,
969
+ 0.0,
970
+ 0.0,
971
+ 0.0,
972
+ 0.0
973
+ ],
974
+ "mean": [
975
+ 0.0,
976
+ 0.0,
977
+ 0.0,
978
+ 0.0,
979
+ 0.0,
980
+ 0.0,
981
+ 0.0
982
+ ],
983
+ "min": [
984
+ 0.0,
985
+ 0.0,
986
+ 0.0,
987
+ 0.0,
988
+ 0.0,
989
+ 0.0,
990
+ 0.0
991
+ ],
992
+ "q01": [
993
+ 0.0,
994
+ 0.0,
995
+ 0.0,
996
+ 0.0,
997
+ 0.0,
998
+ 0.0,
999
+ 0.0
1000
+ ],
1001
+ "q99": [
1002
+ 0.0,
1003
+ 0.0,
1004
+ 0.0,
1005
+ 0.0,
1006
+ 0.0,
1007
+ 0.0,
1008
+ 0.0
1009
+ ],
1010
+ "std": [
1011
+ 0.0,
1012
+ 0.0,
1013
+ 0.0,
1014
+ 0.0,
1015
+ 0.0,
1016
+ 0.0,
1017
+ 0.0
1018
+ ]
1019
+ }
1020
+ },
1021
+ "cmu_stretch": {
1022
+ "action": {
1023
+ "mask": [
1024
+ true,
1025
+ true,
1026
+ true,
1027
+ true,
1028
+ true,
1029
+ true,
1030
+ false
1031
+ ],
1032
+ "max": [
1033
+ 0.02338407188653946,
1034
+ 0.0,
1035
+ 0.023404927924275398,
1036
+ 0.0,
1037
+ 0.0,
1038
+ 0.0,
1039
+ 1.0
1040
+ ],
1041
+ "mean": [
1042
+ 0.00036304505192674696,
1043
+ 0.0,
1044
+ 0.0016466958913952112,
1045
+ 0.0,
1046
+ 0.0,
1047
+ 0.0,
1048
+ 0.3987048268318176
1049
+ ],
1050
+ "min": [
1051
+ -0.019353797659277916,
1052
+ 0.0,
1053
+ -0.02019215188920498,
1054
+ 0.0,
1055
+ 0.0,
1056
+ 0.0,
1057
+ 0.0
1058
+ ],
1059
+ "q01": [
1060
+ -0.011175686959177256,
1061
+ 0.0,
1062
+ -0.0032206363626755773,
1063
+ 0.0,
1064
+ 0.0,
1065
+ 0.0,
1066
+ 0.0
1067
+ ],
1068
+ "q99": [
1069
+ 0.014501785952597848,
1070
+ 0.0,
1071
+ 0.015056106168776728,
1072
+ 0.0,
1073
+ 0.0,
1074
+ 0.0,
1075
+ 1.0
1076
+ ],
1077
+ "std": [
1078
+ 0.004081828519701958,
1079
+ 0.0,
1080
+ 0.0037743328139185905,
1081
+ 0.0,
1082
+ 0.0,
1083
+ 0.0,
1084
+ 0.48963725566864014
1085
+ ]
1086
+ },
1087
+ "num_trajectories": 135,
1088
+ "num_transitions": 25016,
1089
+ "proprio": {
1090
+ "max": [
1091
+ 0.0,
1092
+ 0.0,
1093
+ 0.0,
1094
+ 0.0,
1095
+ 0.0,
1096
+ 0.0,
1097
+ 0.0
1098
+ ],
1099
+ "mean": [
1100
+ 0.0,
1101
+ 0.0,
1102
+ 0.0,
1103
+ 0.0,
1104
+ 0.0,
1105
+ 0.0,
1106
+ 0.0
1107
+ ],
1108
+ "min": [
1109
+ 0.0,
1110
+ 0.0,
1111
+ 0.0,
1112
+ 0.0,
1113
+ 0.0,
1114
+ 0.0,
1115
+ 0.0
1116
+ ],
1117
+ "q01": [
1118
+ 0.0,
1119
+ 0.0,
1120
+ 0.0,
1121
+ 0.0,
1122
+ 0.0,
1123
+ 0.0,
1124
+ 0.0
1125
+ ],
1126
+ "q99": [
1127
+ 0.0,
1128
+ 0.0,
1129
+ 0.0,
1130
+ 0.0,
1131
+ 0.0,
1132
+ 0.0,
1133
+ 0.0
1134
+ ],
1135
+ "std": [
1136
+ 0.0,
1137
+ 0.0,
1138
+ 0.0,
1139
+ 0.0,
1140
+ 0.0,
1141
+ 0.0,
1142
+ 0.0
1143
+ ]
1144
+ }
1145
+ },
1146
+ "dlr_edan_shared_control_converted_externally_to_rlds": {
1147
+ "action": {
1148
+ "mask": [
1149
+ true,
1150
+ true,
1151
+ true,
1152
+ true,
1153
+ true,
1154
+ true,
1155
+ false
1156
+ ],
1157
+ "max": [
1158
+ 0.18991442024707794,
1159
+ 0.0739002525806427,
1160
+ 0.18064819276332855,
1161
+ 0.0866486132144928,
1162
+ 0.13464981317520142,
1163
+ 0.16910280287265778,
1164
+ 1.0
1165
+ ],
1166
+ "mean": [
1167
+ 0.006647810339927673,
1168
+ -0.0007657372043468058,
1169
+ 0.006522852927446365,
1170
+ 0.0011679717572405934,
1171
+ -0.006395625416189432,
1172
+ -0.011902998201549053,
1173
+ 0.6985887289047241
1174
+ ],
1175
+ "min": [
1176
+ -0.10054297000169754,
1177
+ -0.08427435159683228,
1178
+ -0.13533438742160797,
1179
+ -0.17556548118591309,
1180
+ -0.18485672771930695,
1181
+ -0.2680685818195343,
1182
+ 0.0
1183
+ ],
1184
+ "q01": [
1185
+ -0.02987122368067503,
1186
+ -0.06013262912631035,
1187
+ -0.08286409199237824,
1188
+ -0.05924444157630205,
1189
+ -0.15986866518855095,
1190
+ -0.15636983573436739,
1191
+ 0.0
1192
+ ],
1193
+ "q99": [
1194
+ 0.08832092039287087,
1195
+ 0.042126184627413736,
1196
+ 0.11311905644834042,
1197
+ 0.0643695573508739,
1198
+ 0.03941855944693088,
1199
+ 0.156646853685379,
1200
+ 1.0
1201
+ ],
1202
+ "std": [
1203
+ 0.021393608301877975,
1204
+ 0.01814231649041176,
1205
+ 0.03374375030398369,
1206
+ 0.01743541844189167,
1207
+ 0.03394376486539841,
1208
+ 0.04641875624656677,
1209
+ 0.4588589072227478
1210
+ ]
1211
+ },
1212
+ "num_trajectories": 104,
1213
+ "num_transitions": 8928,
1214
+ "proprio": {
1215
+ "max": [
1216
+ 0.0,
1217
+ 0.0,
1218
+ 0.0,
1219
+ 0.0,
1220
+ 0.0,
1221
+ 0.0,
1222
+ 0.0
1223
+ ],
1224
+ "mean": [
1225
+ 0.0,
1226
+ 0.0,
1227
+ 0.0,
1228
+ 0.0,
1229
+ 0.0,
1230
+ 0.0,
1231
+ 0.0
1232
+ ],
1233
+ "min": [
1234
+ 0.0,
1235
+ 0.0,
1236
+ 0.0,
1237
+ 0.0,
1238
+ 0.0,
1239
+ 0.0,
1240
+ 0.0
1241
+ ],
1242
+ "q01": [
1243
+ 0.0,
1244
+ 0.0,
1245
+ 0.0,
1246
+ 0.0,
1247
+ 0.0,
1248
+ 0.0,
1249
+ 0.0
1250
+ ],
1251
+ "q99": [
1252
+ 0.0,
1253
+ 0.0,
1254
+ 0.0,
1255
+ 0.0,
1256
+ 0.0,
1257
+ 0.0,
1258
+ 0.0
1259
+ ],
1260
+ "std": [
1261
+ 0.0,
1262
+ 0.0,
1263
+ 0.0,
1264
+ 0.0,
1265
+ 0.0,
1266
+ 0.0,
1267
+ 0.0
1268
+ ]
1269
+ }
1270
+ },
1271
+ "dobbe": {
1272
+ "action": {
1273
+ "mask": [
1274
+ true,
1275
+ true,
1276
+ true,
1277
+ true,
1278
+ true,
1279
+ true,
1280
+ false
1281
+ ],
1282
+ "max": [
1283
+ 38.590423583984375,
1284
+ 17.932697296142578,
1285
+ 4.843764305114746,
1286
+ 1.4372116327285767,
1287
+ 0.4340403974056244,
1288
+ 1.2057193517684937,
1289
+ 0.9998947381973267
1290
+ ],
1291
+ "mean": [
1292
+ -0.0001120665911003016,
1293
+ 0.0011229600058868527,
1294
+ -0.00010194431524723768,
1295
+ -7.371398532995954e-05,
1296
+ -0.00067531579406932,
1297
+ -5.6643435527803376e-05,
1298
+ 0.6318281888961792
1299
+ ],
1300
+ "min": [
1301
+ -5.700923442840576,
1302
+ -21.605947494506836,
1303
+ -123.72489929199219,
1304
+ -1.7229845523834229,
1305
+ -0.4998578727245331,
1306
+ -0.8867913484573364,
1307
+ 1.4196479014572105e-06
1308
+ ],
1309
+ "q01": [
1310
+ -0.01119564864784479,
1311
+ -0.014266146533191203,
1312
+ -0.0071747214533388615,
1313
+ -0.009444301575422287,
1314
+ -0.03990109823644161,
1315
+ -0.017422311007976532,
1316
+ 4.003279136668425e-05
1317
+ ],
1318
+ "q99": [
1319
+ 0.01015154086053368,
1320
+ 0.017181577533483497,
1321
+ 0.007216989761218411,
1322
+ 0.010380979906767595,
1323
+ 0.03556173853576176,
1324
+ 0.018032474815845446,
1325
+ 0.9982578039169312
1326
+ ],
1327
+ "std": [
1328
+ 0.04264938458800316,
1329
+ 0.04428559169173241,
1330
+ 0.12224084138870239,
1331
+ 0.005388413090258837,
1332
+ 0.011246449314057827,
1333
+ 0.006287882570177317,
1334
+ 0.39732322096824646
1335
+ ]
1336
+ },
1337
+ "num_trajectories": 5208,
1338
+ "num_transitions": 1139911,
1339
+ "proprio": {
1340
+ "max": [
1341
+ 0.0,
1342
+ 0.0,
1343
+ 0.0,
1344
+ 0.0,
1345
+ 0.0,
1346
+ 0.0,
1347
+ 0.0
1348
+ ],
1349
+ "mean": [
1350
+ 0.0,
1351
+ 0.0,
1352
+ 0.0,
1353
+ 0.0,
1354
+ 0.0,
1355
+ 0.0,
1356
+ 0.0
1357
+ ],
1358
+ "min": [
1359
+ 0.0,
1360
+ 0.0,
1361
+ 0.0,
1362
+ 0.0,
1363
+ 0.0,
1364
+ 0.0,
1365
+ 0.0
1366
+ ],
1367
+ "q01": [
1368
+ 0.0,
1369
+ 0.0,
1370
+ 0.0,
1371
+ 0.0,
1372
+ 0.0,
1373
+ 0.0,
1374
+ 0.0
1375
+ ],
1376
+ "q99": [
1377
+ 0.0,
1378
+ 0.0,
1379
+ 0.0,
1380
+ 0.0,
1381
+ 0.0,
1382
+ 0.0,
1383
+ 0.0
1384
+ ],
1385
+ "std": [
1386
+ 0.0,
1387
+ 0.0,
1388
+ 0.0,
1389
+ 0.0,
1390
+ 0.0,
1391
+ 0.0,
1392
+ 0.0
1393
+ ]
1394
+ }
1395
+ },
1396
+ "fmb_dataset": {
1397
+ "action": {
1398
+ "mask": [
1399
+ true,
1400
+ true,
1401
+ true,
1402
+ true,
1403
+ true,
1404
+ true,
1405
+ false
1406
+ ],
1407
+ "max": [
1408
+ 1.399999976158142,
1409
+ 1.0,
1410
+ 1.399999976158142,
1411
+ 1.0,
1412
+ 1.0,
1413
+ 1.0,
1414
+ 1.0
1415
+ ],
1416
+ "mean": [
1417
+ 0.059029702097177505,
1418
+ -0.06476633995771408,
1419
+ -0.09787475317716599,
1420
+ 0.004325388930737972,
1421
+ 0.00028963794466108084,
1422
+ -0.04457257315516472,
1423
+ 0.7336440086364746
1424
+ ],
1425
+ "min": [
1426
+ -1.399999976158142,
1427
+ -1.399999976158142,
1428
+ -1.0,
1429
+ -1.0,
1430
+ -1.0,
1431
+ -1.0,
1432
+ 0.0
1433
+ ],
1434
+ "q01": [
1435
+ -0.8257142901420593,
1436
+ -1.399999976158142,
1437
+ -1.0,
1438
+ -1.0,
1439
+ -0.3028571307659149,
1440
+ -1.0,
1441
+ 0.0
1442
+ ],
1443
+ "q99": [
1444
+ 1.0,
1445
+ 0.5257142782211304,
1446
+ 1.0,
1447
+ 1.0,
1448
+ 0.3400000035762787,
1449
+ 1.0,
1450
+ 1.0
1451
+ ],
1452
+ "std": [
1453
+ 0.28809213638305664,
1454
+ 0.2820415794849396,
1455
+ 0.4626740515232086,
1456
+ 0.3266514539718628,
1457
+ 0.10842999070882797,
1458
+ 0.3440099358558655,
1459
+ 0.4435282051563263
1460
+ ]
1461
+ },
1462
+ "num_trajectories": 8612,
1463
+ "num_transitions": 1137459,
1464
+ "proprio": {
1465
+ "max": [
1466
+ 0.0,
1467
+ 0.0,
1468
+ 0.0,
1469
+ 0.0,
1470
+ 0.0,
1471
+ 0.0,
1472
+ 0.0
1473
+ ],
1474
+ "mean": [
1475
+ 0.0,
1476
+ 0.0,
1477
+ 0.0,
1478
+ 0.0,
1479
+ 0.0,
1480
+ 0.0,
1481
+ 0.0
1482
+ ],
1483
+ "min": [
1484
+ 0.0,
1485
+ 0.0,
1486
+ 0.0,
1487
+ 0.0,
1488
+ 0.0,
1489
+ 0.0,
1490
+ 0.0
1491
+ ],
1492
+ "q01": [
1493
+ 0.0,
1494
+ 0.0,
1495
+ 0.0,
1496
+ 0.0,
1497
+ 0.0,
1498
+ 0.0,
1499
+ 0.0
1500
+ ],
1501
+ "q99": [
1502
+ 0.0,
1503
+ 0.0,
1504
+ 0.0,
1505
+ 0.0,
1506
+ 0.0,
1507
+ 0.0,
1508
+ 0.0
1509
+ ],
1510
+ "std": [
1511
+ 0.0,
1512
+ 0.0,
1513
+ 0.0,
1514
+ 0.0,
1515
+ 0.0,
1516
+ 0.0,
1517
+ 0.0
1518
+ ]
1519
+ }
1520
+ },
1521
+ "fractal20220817_data": {
1522
+ "action": {
1523
+ "mask": [
1524
+ true,
1525
+ true,
1526
+ true,
1527
+ true,
1528
+ true,
1529
+ true,
1530
+ false
1531
+ ],
1532
+ "max": [
1533
+ 2.9984593391418457,
1534
+ 22.09052848815918,
1535
+ 2.7507524490356445,
1536
+ 1.570636510848999,
1537
+ 1.5321086645126343,
1538
+ 1.5691522359848022,
1539
+ 1.0
1540
+ ],
1541
+ "mean": [
1542
+ 0.006987582892179489,
1543
+ 0.006265917327255011,
1544
+ -0.01262515690177679,
1545
+ 0.04333311319351196,
1546
+ -0.005756212864071131,
1547
+ 0.0009130256366916001,
1548
+ 0.5354204773902893
1549
+ ],
1550
+ "min": [
1551
+ -2.0204520225524902,
1552
+ -5.497899532318115,
1553
+ -2.031663417816162,
1554
+ -1.569917917251587,
1555
+ -1.569892168045044,
1556
+ -1.570419430732727,
1557
+ 0.0
1558
+ ],
1559
+ "q01": [
1560
+ -0.22453527510166169,
1561
+ -0.14820013284683228,
1562
+ -0.231589707583189,
1563
+ -0.3517994859814644,
1564
+ -0.4193011274933815,
1565
+ -0.43643461108207704,
1566
+ 0.0
1567
+ ],
1568
+ "q99": [
1569
+ 0.17824687153100965,
1570
+ 0.14938379630446405,
1571
+ 0.21842354819178575,
1572
+ 0.5892666035890578,
1573
+ 0.35272657424211445,
1574
+ 0.44796681255102094,
1575
+ 1.0
1576
+ ],
1577
+ "std": [
1578
+ 0.0692116990685463,
1579
+ 0.05970962345600128,
1580
+ 0.07353084534406662,
1581
+ 0.15610496699810028,
1582
+ 0.13164450228214264,
1583
+ 0.14593800902366638,
1584
+ 0.497110515832901
1585
+ ]
1586
+ },
1587
+ "num_trajectories": 87212,
1588
+ "num_transitions": 3786400,
1589
+ "proprio": {
1590
+ "max": [
1591
+ 0.0,
1592
+ 0.0,
1593
+ 0.0,
1594
+ 0.0,
1595
+ 0.0,
1596
+ 0.0,
1597
+ 0.0
1598
+ ],
1599
+ "mean": [
1600
+ 0.0,
1601
+ 0.0,
1602
+ 0.0,
1603
+ 0.0,
1604
+ 0.0,
1605
+ 0.0,
1606
+ 0.0
1607
+ ],
1608
+ "min": [
1609
+ 0.0,
1610
+ 0.0,
1611
+ 0.0,
1612
+ 0.0,
1613
+ 0.0,
1614
+ 0.0,
1615
+ 0.0
1616
+ ],
1617
+ "q01": [
1618
+ 0.0,
1619
+ 0.0,
1620
+ 0.0,
1621
+ 0.0,
1622
+ 0.0,
1623
+ 0.0,
1624
+ 0.0
1625
+ ],
1626
+ "q99": [
1627
+ 0.0,
1628
+ 0.0,
1629
+ 0.0,
1630
+ 0.0,
1631
+ 0.0,
1632
+ 0.0,
1633
+ 0.0
1634
+ ],
1635
+ "std": [
1636
+ 0.0,
1637
+ 0.0,
1638
+ 0.0,
1639
+ 0.0,
1640
+ 0.0,
1641
+ 0.0,
1642
+ 0.0
1643
+ ]
1644
+ }
1645
+ },
1646
+ "furniture_bench_dataset_converted_externally_to_rlds": {
1647
+ "action": {
1648
+ "mask": [
1649
+ true,
1650
+ true,
1651
+ true,
1652
+ true,
1653
+ true,
1654
+ true,
1655
+ false
1656
+ ],
1657
+ "max": [
1658
+ 0.10000000149011612,
1659
+ 0.10000000149011612,
1660
+ 0.10000000149011612,
1661
+ 0.8651833534240723,
1662
+ 1.0909736156463623,
1663
+ 2.863185405731201,
1664
+ 1.0
1665
+ ],
1666
+ "mean": [
1667
+ 0.00014610752987209707,
1668
+ 0.0010830952087417245,
1669
+ 0.0006224989192560315,
1670
+ -0.003303206292912364,
1671
+ -0.0026880695950239897,
1672
+ 0.018242603167891502,
1673
+ 0.48854944109916687
1674
+ ],
1675
+ "min": [
1676
+ -0.10495579987764359,
1677
+ -0.10939455777406693,
1678
+ -0.10000000149011612,
1679
+ -0.971906840801239,
1680
+ -1.0475432872772217,
1681
+ -3.06000018119812,
1682
+ 0.0
1683
+ ],
1684
+ "q01": [
1685
+ -0.053988199681043625,
1686
+ -0.05049169331789017,
1687
+ -0.032499241530895236,
1688
+ -0.1953887003660202,
1689
+ -0.41674559473991396,
1690
+ -0.8886768388748169,
1691
+ 0.0
1692
+ ],
1693
+ "q99": [
1694
+ 0.05414841488003723,
1695
+ 0.04965164884924884,
1696
+ 0.060055799782276154,
1697
+ 0.18231668293476103,
1698
+ 0.39867786407470646,
1699
+ 0.8772023963928218,
1700
+ 1.0
1701
+ ],
1702
+ "std": [
1703
+ 0.01610708422958851,
1704
+ 0.014891477301716805,
1705
+ 0.014014219865202904,
1706
+ 0.058274295181035995,
1707
+ 0.11417088657617569,
1708
+ 0.33479776978492737,
1709
+ 0.49991825222969055
1710
+ ]
1711
+ },
1712
+ "num_trajectories": 5100,
1713
+ "num_transitions": 3948057,
1714
+ "proprio": {
1715
+ "max": [
1716
+ 0.0,
1717
+ 0.0,
1718
+ 0.0,
1719
+ 0.0,
1720
+ 0.0,
1721
+ 0.0,
1722
+ 0.0
1723
+ ],
1724
+ "mean": [
1725
+ 0.0,
1726
+ 0.0,
1727
+ 0.0,
1728
+ 0.0,
1729
+ 0.0,
1730
+ 0.0,
1731
+ 0.0
1732
+ ],
1733
+ "min": [
1734
+ 0.0,
1735
+ 0.0,
1736
+ 0.0,
1737
+ 0.0,
1738
+ 0.0,
1739
+ 0.0,
1740
+ 0.0
1741
+ ],
1742
+ "q01": [
1743
+ 0.0,
1744
+ 0.0,
1745
+ 0.0,
1746
+ 0.0,
1747
+ 0.0,
1748
+ 0.0,
1749
+ 0.0
1750
+ ],
1751
+ "q99": [
1752
+ 0.0,
1753
+ 0.0,
1754
+ 0.0,
1755
+ 0.0,
1756
+ 0.0,
1757
+ 0.0,
1758
+ 0.0
1759
+ ],
1760
+ "std": [
1761
+ 0.0,
1762
+ 0.0,
1763
+ 0.0,
1764
+ 0.0,
1765
+ 0.0,
1766
+ 0.0,
1767
+ 0.0
1768
+ ]
1769
+ }
1770
+ },
1771
+ "iamlab_cmu_pickup_insert_converted_externally_to_rlds": {
1772
+ "action": {
1773
+ "mask": [
1774
+ true,
1775
+ true,
1776
+ true,
1777
+ true,
1778
+ true,
1779
+ true,
1780
+ false
1781
+ ],
1782
+ "max": [
1783
+ 0.6634981632232666,
1784
+ 0.23428471386432648,
1785
+ 0.4308285415172577,
1786
+ 3.1415927410125732,
1787
+ 0.13647015392780304,
1788
+ 3.141592502593994,
1789
+ 1.0
1790
+ ],
1791
+ "mean": [
1792
+ 0.5274372696876526,
1793
+ 0.02858201041817665,
1794
+ 0.18712575733661652,
1795
+ 1.2339589595794678,
1796
+ 0.03226623684167862,
1797
+ -1.4199490547180176,
1798
+ 0.5550631880760193
1799
+ ],
1800
+ "min": [
1801
+ 0.3071657121181488,
1802
+ -0.29754969477653503,
1803
+ 0.06578229367733002,
1804
+ -3.1415927410125732,
1805
+ -0.04584203287959099,
1806
+ -3.141592502593994,
1807
+ 0.0
1808
+ ],
1809
+ "q01": [
1810
+ 0.3148897051811218,
1811
+ -0.20317550599575043,
1812
+ 0.06785467118024827,
1813
+ -3.140952730178833,
1814
+ -0.029743434861302376,
1815
+ -3.141091251373291,
1816
+ 0.0
1817
+ ],
1818
+ "q99": [
1819
+ 0.6472805738449097,
1820
+ 0.20846802592277527,
1821
+ 0.36855655312538155,
1822
+ 3.1409926891326903,
1823
+ 0.11424950212240226,
1824
+ 3.1410969257354737,
1825
+ 1.0
1826
+ ],
1827
+ "std": [
1828
+ 0.08108345419168472,
1829
+ 0.1116757020354271,
1830
+ 0.07747554779052734,
1831
+ 2.8737246990203857,
1832
+ 0.02774704433977604,
1833
+ 2.7678682804107666,
1834
+ 0.49695101380348206
1835
+ ]
1836
+ },
1837
+ "num_trajectories": 631,
1838
+ "num_transitions": 146241,
1839
+ "proprio": {
1840
+ "max": [
1841
+ 0.0,
1842
+ 0.0,
1843
+ 0.0,
1844
+ 0.0,
1845
+ 0.0,
1846
+ 0.0,
1847
+ 0.0
1848
+ ],
1849
+ "mean": [
1850
+ 0.0,
1851
+ 0.0,
1852
+ 0.0,
1853
+ 0.0,
1854
+ 0.0,
1855
+ 0.0,
1856
+ 0.0
1857
+ ],
1858
+ "min": [
1859
+ 0.0,
1860
+ 0.0,
1861
+ 0.0,
1862
+ 0.0,
1863
+ 0.0,
1864
+ 0.0,
1865
+ 0.0
1866
+ ],
1867
+ "q01": [
1868
+ 0.0,
1869
+ 0.0,
1870
+ 0.0,
1871
+ 0.0,
1872
+ 0.0,
1873
+ 0.0,
1874
+ 0.0
1875
+ ],
1876
+ "q99": [
1877
+ 0.0,
1878
+ 0.0,
1879
+ 0.0,
1880
+ 0.0,
1881
+ 0.0,
1882
+ 0.0,
1883
+ 0.0
1884
+ ],
1885
+ "std": [
1886
+ 0.0,
1887
+ 0.0,
1888
+ 0.0,
1889
+ 0.0,
1890
+ 0.0,
1891
+ 0.0,
1892
+ 0.0
1893
+ ]
1894
+ }
1895
+ },
1896
+ "jaco_play": {
1897
+ "action": {
1898
+ "mask": [
1899
+ true,
1900
+ true,
1901
+ true,
1902
+ true,
1903
+ true,
1904
+ true,
1905
+ false
1906
+ ],
1907
+ "max": [
1908
+ 0.20000000298023224,
1909
+ 0.20000000298023224,
1910
+ 0.20000000298023224,
1911
+ 0.0,
1912
+ 0.0,
1913
+ 0.0,
1914
+ 1.0
1915
+ ],
1916
+ "mean": [
1917
+ 0.0009658430935814977,
1918
+ -0.00580078037455678,
1919
+ -0.00395062193274498,
1920
+ 0.0,
1921
+ 0.0,
1922
+ 0.0,
1923
+ 0.34934908151626587
1924
+ ],
1925
+ "min": [
1926
+ -0.20000000298023224,
1927
+ -0.20000000298023224,
1928
+ -0.20000000298023224,
1929
+ 0.0,
1930
+ 0.0,
1931
+ 0.0,
1932
+ 0.0
1933
+ ],
1934
+ "q01": [
1935
+ -0.20000000298023224,
1936
+ -0.20000000298023224,
1937
+ -0.20000000298023224,
1938
+ 0.0,
1939
+ 0.0,
1940
+ 0.0,
1941
+ 0.0
1942
+ ],
1943
+ "q99": [
1944
+ 0.20000000298023224,
1945
+ 0.20000000298023224,
1946
+ 0.20000000298023224,
1947
+ 0.0,
1948
+ 0.0,
1949
+ 0.0,
1950
+ 1.0
1951
+ ],
1952
+ "std": [
1953
+ 0.12235074490308762,
1954
+ 0.09678777307271957,
1955
+ 0.11155334860086441,
1956
+ 0.0,
1957
+ 0.0,
1958
+ 0.0,
1959
+ 0.4768252968788147
1960
+ ]
1961
+ },
1962
+ "num_trajectories": 1085,
1963
+ "num_transitions": 77965,
1964
+ "proprio": {
1965
+ "max": [
1966
+ 0.0,
1967
+ 0.0,
1968
+ 0.0,
1969
+ 0.0,
1970
+ 0.0,
1971
+ 0.0,
1972
+ 0.0
1973
+ ],
1974
+ "mean": [
1975
+ 0.0,
1976
+ 0.0,
1977
+ 0.0,
1978
+ 0.0,
1979
+ 0.0,
1980
+ 0.0,
1981
+ 0.0
1982
+ ],
1983
+ "min": [
1984
+ 0.0,
1985
+ 0.0,
1986
+ 0.0,
1987
+ 0.0,
1988
+ 0.0,
1989
+ 0.0,
1990
+ 0.0
1991
+ ],
1992
+ "q01": [
1993
+ 0.0,
1994
+ 0.0,
1995
+ 0.0,
1996
+ 0.0,
1997
+ 0.0,
1998
+ 0.0,
1999
+ 0.0
2000
+ ],
2001
+ "q99": [
2002
+ 0.0,
2003
+ 0.0,
2004
+ 0.0,
2005
+ 0.0,
2006
+ 0.0,
2007
+ 0.0,
2008
+ 0.0
2009
+ ],
2010
+ "std": [
2011
+ 0.0,
2012
+ 0.0,
2013
+ 0.0,
2014
+ 0.0,
2015
+ 0.0,
2016
+ 0.0,
2017
+ 0.0
2018
+ ]
2019
+ }
2020
+ },
2021
+ "kuka": {
2022
+ "action": {
2023
+ "mask": [
2024
+ true,
2025
+ true,
2026
+ true,
2027
+ true,
2028
+ true,
2029
+ true,
2030
+ false
2031
+ ],
2032
+ "max": [
2033
+ 0.1697135865688324,
2034
+ 0.2777623236179352,
2035
+ 0.43710532784461975,
2036
+ 0.0,
2037
+ 0.0,
2038
+ 1.9684287309646606,
2039
+ 1.0
2040
+ ],
2041
+ "mean": [
2042
+ -0.0004668905457947403,
2043
+ 0.00040138536132872105,
2044
+ -0.001280792523175478,
2045
+ 0.0,
2046
+ 0.0,
2047
+ -0.03722453489899635,
2048
+ 0.4131543040275574
2049
+ ],
2050
+ "min": [
2051
+ -0.159867063164711,
2052
+ -0.2892282009124756,
2053
+ -0.2795473635196686,
2054
+ 0.0,
2055
+ 0.0,
2056
+ -1.9875637292861938,
2057
+ 0.0
2058
+ ],
2059
+ "q01": [
2060
+ -0.06619441494345665,
2061
+ -0.08713878810405731,
2062
+ -0.15083016991615295,
2063
+ 0.0,
2064
+ 0.0,
2065
+ -0.5415697038173676,
2066
+ 0.0
2067
+ ],
2068
+ "q99": [
2069
+ 0.06601839080452929,
2070
+ 0.08732476785779003,
2071
+ 0.18168179214000715,
2072
+ 0.0,
2073
+ 0.0,
2074
+ 0.2923380345106127,
2075
+ 1.0
2076
+ ],
2077
+ "std": [
2078
+ 0.02083250693976879,
2079
+ 0.02915887162089348,
2080
+ 0.06422865390777588,
2081
+ 0.0,
2082
+ 0.0,
2083
+ 0.14224295318126678,
2084
+ 0.49086448550224304
2085
+ ]
2086
+ },
2087
+ "num_trajectories": 209880,
2088
+ "num_transitions": 2455879,
2089
+ "proprio": {
2090
+ "max": [
2091
+ 0.0,
2092
+ 0.0,
2093
+ 0.0,
2094
+ 0.0,
2095
+ 0.0,
2096
+ 0.0,
2097
+ 0.0
2098
+ ],
2099
+ "mean": [
2100
+ 0.0,
2101
+ 0.0,
2102
+ 0.0,
2103
+ 0.0,
2104
+ 0.0,
2105
+ 0.0,
2106
+ 0.0
2107
+ ],
2108
+ "min": [
2109
+ 0.0,
2110
+ 0.0,
2111
+ 0.0,
2112
+ 0.0,
2113
+ 0.0,
2114
+ 0.0,
2115
+ 0.0
2116
+ ],
2117
+ "q01": [
2118
+ 0.0,
2119
+ 0.0,
2120
+ 0.0,
2121
+ 0.0,
2122
+ 0.0,
2123
+ 0.0,
2124
+ 0.0
2125
+ ],
2126
+ "q99": [
2127
+ 0.0,
2128
+ 0.0,
2129
+ 0.0,
2130
+ 0.0,
2131
+ 0.0,
2132
+ 0.0,
2133
+ 0.0
2134
+ ],
2135
+ "std": [
2136
+ 0.0,
2137
+ 0.0,
2138
+ 0.0,
2139
+ 0.0,
2140
+ 0.0,
2141
+ 0.0,
2142
+ 0.0
2143
+ ]
2144
+ }
2145
+ },
2146
+ "nyu_franka_play_dataset_converted_externally_to_rlds": {
2147
+ "action": {
2148
+ "mask": [
2149
+ true,
2150
+ true,
2151
+ true,
2152
+ true,
2153
+ true,
2154
+ true,
2155
+ false
2156
+ ],
2157
+ "max": [
2158
+ 0.06424188613891602,
2159
+ 0.07027634978294373,
2160
+ 0.06129661202430725,
2161
+ 6.281067848205566,
2162
+ 0.1967729926109314,
2163
+ 0.26377415657043457,
2164
+ 1.0
2165
+ ],
2166
+ "mean": [
2167
+ 0.001021989737637341,
2168
+ -0.00012002651783404872,
2169
+ 0.00032894269679673016,
2170
+ 0.0015034361276775599,
2171
+ -0.002198522910475731,
2172
+ -0.001663230243138969,
2173
+ 0.7230083346366882
2174
+ ],
2175
+ "min": [
2176
+ -0.05952230095863342,
2177
+ -0.07232445478439331,
2178
+ -0.06730806827545166,
2179
+ -6.278434753417969,
2180
+ -0.21479034423828125,
2181
+ -0.3627619743347168,
2182
+ 0.0
2183
+ ],
2184
+ "q01": [
2185
+ -0.03199600875377655,
2186
+ -0.032861671447753905,
2187
+ -0.03368805110454559,
2188
+ -0.12080862045288086,
2189
+ -0.12175218224525451,
2190
+ -0.11370223641395569,
2191
+ 0.0
2192
+ ],
2193
+ "q99": [
2194
+ 0.03101520001888276,
2195
+ 0.0373908892273903,
2196
+ 0.03646374464035038,
2197
+ 0.11764093399047852,
2198
+ 0.1258920183777809,
2199
+ 0.09366151213645942,
2200
+ 1.0
2201
+ ],
2202
+ "std": [
2203
+ 0.01327415369451046,
2204
+ 0.013215910643339157,
2205
+ 0.012822109274566174,
2206
+ 0.2732451558113098,
2207
+ 0.057022541761398315,
2208
+ 0.039172880351543427,
2209
+ 0.44752755761146545
2210
+ ]
2211
+ },
2212
+ "num_trajectories": 456,
2213
+ "num_transitions": 44875,
2214
+ "proprio": {
2215
+ "max": [
2216
+ 0.0,
2217
+ 0.0,
2218
+ 0.0,
2219
+ 0.0,
2220
+ 0.0,
2221
+ 0.0,
2222
+ 0.0
2223
+ ],
2224
+ "mean": [
2225
+ 0.0,
2226
+ 0.0,
2227
+ 0.0,
2228
+ 0.0,
2229
+ 0.0,
2230
+ 0.0,
2231
+ 0.0
2232
+ ],
2233
+ "min": [
2234
+ 0.0,
2235
+ 0.0,
2236
+ 0.0,
2237
+ 0.0,
2238
+ 0.0,
2239
+ 0.0,
2240
+ 0.0
2241
+ ],
2242
+ "q01": [
2243
+ 0.0,
2244
+ 0.0,
2245
+ 0.0,
2246
+ 0.0,
2247
+ 0.0,
2248
+ 0.0,
2249
+ 0.0
2250
+ ],
2251
+ "q99": [
2252
+ 0.0,
2253
+ 0.0,
2254
+ 0.0,
2255
+ 0.0,
2256
+ 0.0,
2257
+ 0.0,
2258
+ 0.0
2259
+ ],
2260
+ "std": [
2261
+ 0.0,
2262
+ 0.0,
2263
+ 0.0,
2264
+ 0.0,
2265
+ 0.0,
2266
+ 0.0,
2267
+ 0.0
2268
+ ]
2269
+ }
2270
+ },
2271
+ "roboturk": {
2272
+ "action": {
2273
+ "mask": [
2274
+ true,
2275
+ true,
2276
+ true,
2277
+ true,
2278
+ true,
2279
+ true,
2280
+ false
2281
+ ],
2282
+ "max": [
2283
+ 0.39124172925949097,
2284
+ 0.4601028263568878,
2285
+ 0.4870833456516266,
2286
+ 1.816888689994812,
2287
+ 1.8240282535552979,
2288
+ 1.4824820756912231,
2289
+ 1.0
2290
+ ],
2291
+ "mean": [
2292
+ 0.0014448732836171985,
2293
+ -0.0015945249469950795,
2294
+ -0.0011753785656765103,
2295
+ 0.0023012510500848293,
2296
+ -0.0009382463176734746,
2297
+ -0.00011485807772260159,
2298
+ 0.5746025443077087
2299
+ ],
2300
+ "min": [
2301
+ -0.6546999216079712,
2302
+ -0.6365841031074524,
2303
+ -0.4217723608016968,
2304
+ -1.6695482730865479,
2305
+ -1.8023357391357422,
2306
+ -1.4630827903747559,
2307
+ 0.0
2308
+ ],
2309
+ "q01": [
2310
+ -0.1342635464668274,
2311
+ -0.19996687173843383,
2312
+ -0.1482972100377083,
2313
+ -0.20720748245716095,
2314
+ -0.09676413893699647,
2315
+ -0.18075634717941286,
2316
+ 0.0
2317
+ ],
2318
+ "q99": [
2319
+ 0.14956976801157001,
2320
+ 0.1805950567126275,
2321
+ 0.18841815620660796,
2322
+ 0.21615413755178453,
2323
+ 0.09457383215427405,
2324
+ 0.18543301910162005,
2325
+ 1.0
2326
+ ],
2327
+ "std": [
2328
+ 0.04935386776924133,
2329
+ 0.0635455846786499,
2330
+ 0.061164740473032,
2331
+ 0.09553450345993042,
2332
+ 0.08420111238956451,
2333
+ 0.06517903506755829,
2334
+ 0.49452081322669983
2335
+ ]
2336
+ },
2337
+ "num_trajectories": 1995,
2338
+ "num_transitions": 187507,
2339
+ "proprio": {
2340
+ "max": [
2341
+ 0.0,
2342
+ 0.0,
2343
+ 0.0,
2344
+ 0.0,
2345
+ 0.0,
2346
+ 0.0,
2347
+ 0.0
2348
+ ],
2349
+ "mean": [
2350
+ 0.0,
2351
+ 0.0,
2352
+ 0.0,
2353
+ 0.0,
2354
+ 0.0,
2355
+ 0.0,
2356
+ 0.0
2357
+ ],
2358
+ "min": [
2359
+ 0.0,
2360
+ 0.0,
2361
+ 0.0,
2362
+ 0.0,
2363
+ 0.0,
2364
+ 0.0,
2365
+ 0.0
2366
+ ],
2367
+ "q01": [
2368
+ 0.0,
2369
+ 0.0,
2370
+ 0.0,
2371
+ 0.0,
2372
+ 0.0,
2373
+ 0.0,
2374
+ 0.0
2375
+ ],
2376
+ "q99": [
2377
+ 0.0,
2378
+ 0.0,
2379
+ 0.0,
2380
+ 0.0,
2381
+ 0.0,
2382
+ 0.0,
2383
+ 0.0
2384
+ ],
2385
+ "std": [
2386
+ 0.0,
2387
+ 0.0,
2388
+ 0.0,
2389
+ 0.0,
2390
+ 0.0,
2391
+ 0.0,
2392
+ 0.0
2393
+ ]
2394
+ }
2395
+ },
2396
+ "stanford_hydra_dataset_converted_externally_to_rlds": {
2397
+ "action": {
2398
+ "mask": [
2399
+ true,
2400
+ true,
2401
+ true,
2402
+ true,
2403
+ true,
2404
+ true,
2405
+ false
2406
+ ],
2407
+ "max": [
2408
+ 0.02499854564666748,
2409
+ 0.02499903365969658,
2410
+ 0.024999922141432762,
2411
+ 0.24974457919597626,
2412
+ 0.24997030198574066,
2413
+ 0.24999946355819702,
2414
+ 1.0
2415
+ ],
2416
+ "mean": [
2417
+ 0.0007790001109242439,
2418
+ 0.00013707754260394722,
2419
+ -0.0002548607881180942,
2420
+ 0.0012903271708637476,
2421
+ -0.004751681815832853,
2422
+ 0.002692886395379901,
2423
+ 0.48855218291282654
2424
+ ],
2425
+ "min": [
2426
+ -0.024999044835567474,
2427
+ -0.024999700486660004,
2428
+ -0.02499929815530777,
2429
+ -0.24993225932121277,
2430
+ -0.2499666064977646,
2431
+ -0.2499932497739792,
2432
+ 0.0
2433
+ ],
2434
+ "q01": [
2435
+ -0.019992006458342076,
2436
+ -0.02415412735193968,
2437
+ -0.022941758055239916,
2438
+ -0.11085530579090118,
2439
+ -0.12024572037160397,
2440
+ -0.13314770206809043,
2441
+ 0.0
2442
+ ],
2443
+ "q99": [
2444
+ 0.022886231057345868,
2445
+ 0.022358838934451335,
2446
+ 0.02410089675337076,
2447
+ 0.12370114490389822,
2448
+ 0.11323311634361738,
2449
+ 0.18474749639630164,
2450
+ 1.0
2451
+ ],
2452
+ "std": [
2453
+ 0.008022161200642586,
2454
+ 0.009131459519267082,
2455
+ 0.009574338793754578,
2456
+ 0.04122216999530792,
2457
+ 0.0384303517639637,
2458
+ 0.04606688767671585,
2459
+ 0.49976691603660583
2460
+ ]
2461
+ },
2462
+ "num_trajectories": 570,
2463
+ "num_transitions": 358234,
2464
+ "proprio": {
2465
+ "max": [
2466
+ 0.0,
2467
+ 0.0,
2468
+ 0.0,
2469
+ 0.0,
2470
+ 0.0,
2471
+ 0.0,
2472
+ 0.0
2473
+ ],
2474
+ "mean": [
2475
+ 0.0,
2476
+ 0.0,
2477
+ 0.0,
2478
+ 0.0,
2479
+ 0.0,
2480
+ 0.0,
2481
+ 0.0
2482
+ ],
2483
+ "min": [
2484
+ 0.0,
2485
+ 0.0,
2486
+ 0.0,
2487
+ 0.0,
2488
+ 0.0,
2489
+ 0.0,
2490
+ 0.0
2491
+ ],
2492
+ "q01": [
2493
+ 0.0,
2494
+ 0.0,
2495
+ 0.0,
2496
+ 0.0,
2497
+ 0.0,
2498
+ 0.0,
2499
+ 0.0
2500
+ ],
2501
+ "q99": [
2502
+ 0.0,
2503
+ 0.0,
2504
+ 0.0,
2505
+ 0.0,
2506
+ 0.0,
2507
+ 0.0,
2508
+ 0.0
2509
+ ],
2510
+ "std": [
2511
+ 0.0,
2512
+ 0.0,
2513
+ 0.0,
2514
+ 0.0,
2515
+ 0.0,
2516
+ 0.0,
2517
+ 0.0
2518
+ ]
2519
+ }
2520
+ },
2521
+ "taco_play": {
2522
+ "action": {
2523
+ "mask": [
2524
+ true,
2525
+ true,
2526
+ true,
2527
+ true,
2528
+ true,
2529
+ true,
2530
+ false
2531
+ ],
2532
+ "max": [
2533
+ 1.4915844202041626,
2534
+ 2.1842432022094727,
2535
+ 2.6836395263671875,
2536
+ 5.035226821899414,
2537
+ 2.665864944458008,
2538
+ 4.250768661499023,
2539
+ 1.0
2540
+ ],
2541
+ "mean": [
2542
+ -0.003845922416076064,
2543
+ 0.009671456180512905,
2544
+ 0.012780580669641495,
2545
+ -0.005403771996498108,
2546
+ -0.009606587700545788,
2547
+ -0.002480733208358288,
2548
+ 0.4263913035392761
2549
+ ],
2550
+ "min": [
2551
+ -4.242457866668701,
2552
+ -3.192805051803589,
2553
+ -1.3371467590332031,
2554
+ -4.202683448791504,
2555
+ -2.6722638607025146,
2556
+ -3.3467135429382324,
2557
+ 0.0
2558
+ ],
2559
+ "q01": [
2560
+ -0.7106140398979186,
2561
+ -1.056944659948349,
2562
+ -0.5878450274467468,
2563
+ -0.7682853937149048,
2564
+ -0.7180147767066956,
2565
+ -1.5527938604354858,
2566
+ 0.0
2567
+ ],
2568
+ "q99": [
2569
+ 0.6482916426658629,
2570
+ 1.0051310062408447,
2571
+ 0.9480248689651489,
2572
+ 0.6926478147506714,
2573
+ 0.6351067513227462,
2574
+ 1.628010264635086,
2575
+ 1.0
2576
+ ],
2577
+ "std": [
2578
+ 0.23254038393497467,
2579
+ 0.36298269033432007,
2580
+ 0.28692901134490967,
2581
+ 0.2617705166339874,
2582
+ 0.2438892275094986,
2583
+ 0.5216503143310547,
2584
+ 0.4946896731853485
2585
+ ]
2586
+ },
2587
+ "num_trajectories": 3603,
2588
+ "num_transitions": 237798,
2589
+ "proprio": {
2590
+ "max": [
2591
+ 0.0,
2592
+ 0.0,
2593
+ 0.0,
2594
+ 0.0,
2595
+ 0.0,
2596
+ 0.0,
2597
+ 0.0
2598
+ ],
2599
+ "mean": [
2600
+ 0.0,
2601
+ 0.0,
2602
+ 0.0,
2603
+ 0.0,
2604
+ 0.0,
2605
+ 0.0,
2606
+ 0.0
2607
+ ],
2608
+ "min": [
2609
+ 0.0,
2610
+ 0.0,
2611
+ 0.0,
2612
+ 0.0,
2613
+ 0.0,
2614
+ 0.0,
2615
+ 0.0
2616
+ ],
2617
+ "q01": [
2618
+ 0.0,
2619
+ 0.0,
2620
+ 0.0,
2621
+ 0.0,
2622
+ 0.0,
2623
+ 0.0,
2624
+ 0.0
2625
+ ],
2626
+ "q99": [
2627
+ 0.0,
2628
+ 0.0,
2629
+ 0.0,
2630
+ 0.0,
2631
+ 0.0,
2632
+ 0.0,
2633
+ 0.0
2634
+ ],
2635
+ "std": [
2636
+ 0.0,
2637
+ 0.0,
2638
+ 0.0,
2639
+ 0.0,
2640
+ 0.0,
2641
+ 0.0,
2642
+ 0.0
2643
+ ]
2644
+ }
2645
+ },
2646
+ "toto": {
2647
+ "action": {
2648
+ "mask": [
2649
+ true,
2650
+ true,
2651
+ true,
2652
+ true,
2653
+ true,
2654
+ true,
2655
+ false
2656
+ ],
2657
+ "max": [
2658
+ 0.6839867234230042,
2659
+ 0.4454185664653778,
2660
+ 0.7984078526496887,
2661
+ 2.120781660079956,
2662
+ 1.371164321899414,
2663
+ 1.4118704795837402,
2664
+ 0.0
2665
+ ],
2666
+ "mean": [
2667
+ 0.38542115688323975,
2668
+ 0.007769413758069277,
2669
+ 0.3632740378379822,
2670
+ -0.6652036905288696,
2671
+ 0.1890396922826767,
2672
+ 0.03298724442720413,
2673
+ 0.0
2674
+ ],
2675
+ "min": [
2676
+ 0.09922284632921219,
2677
+ -0.5180193781852722,
2678
+ 0.13791072368621826,
2679
+ -2.635117530822754,
2680
+ -1.0734480619430542,
2681
+ -1.9282547235488892,
2682
+ 0.0
2683
+ ],
2684
+ "q01": [
2685
+ 0.1756722891330719,
2686
+ -0.3077590811252594,
2687
+ 0.235383919775486,
2688
+ -2.0908505964279174,
2689
+ -0.6191593289375306,
2690
+ -0.7488683319091797,
2691
+ 0.0
2692
+ ],
2693
+ "q99": [
2694
+ 0.6136963081359863,
2695
+ 0.33704194784164443,
2696
+ 0.6681221985816956,
2697
+ 0.7422861719131538,
2698
+ 0.7955395007133507,
2699
+ 0.740464625358582,
2700
+ 0.0
2701
+ ],
2702
+ "std": [
2703
+ 0.12211652100086212,
2704
+ 0.19378550350666046,
2705
+ 0.10178236663341522,
2706
+ 0.5725259184837341,
2707
+ 0.29884573817253113,
2708
+ 0.3259911835193634,
2709
+ 0.0
2710
+ ]
2711
+ },
2712
+ "num_trajectories": 1003,
2713
+ "num_transitions": 325699,
2714
+ "proprio": {
2715
+ "max": [
2716
+ 0.0,
2717
+ 0.0,
2718
+ 0.0,
2719
+ 0.0,
2720
+ 0.0,
2721
+ 0.0,
2722
+ 0.0
2723
+ ],
2724
+ "mean": [
2725
+ 0.0,
2726
+ 0.0,
2727
+ 0.0,
2728
+ 0.0,
2729
+ 0.0,
2730
+ 0.0,
2731
+ 0.0
2732
+ ],
2733
+ "min": [
2734
+ 0.0,
2735
+ 0.0,
2736
+ 0.0,
2737
+ 0.0,
2738
+ 0.0,
2739
+ 0.0,
2740
+ 0.0
2741
+ ],
2742
+ "q01": [
2743
+ 0.0,
2744
+ 0.0,
2745
+ 0.0,
2746
+ 0.0,
2747
+ 0.0,
2748
+ 0.0,
2749
+ 0.0
2750
+ ],
2751
+ "q99": [
2752
+ 0.0,
2753
+ 0.0,
2754
+ 0.0,
2755
+ 0.0,
2756
+ 0.0,
2757
+ 0.0,
2758
+ 0.0
2759
+ ],
2760
+ "std": [
2761
+ 0.0,
2762
+ 0.0,
2763
+ 0.0,
2764
+ 0.0,
2765
+ 0.0,
2766
+ 0.0,
2767
+ 0.0
2768
+ ]
2769
+ }
2770
+ },
2771
+ "ucsd_kitchen_dataset_converted_externally_to_rlds": {
2772
+ "action": {
2773
+ "mask": [
2774
+ true,
2775
+ true,
2776
+ true,
2777
+ true,
2778
+ true,
2779
+ true,
2780
+ false
2781
+ ],
2782
+ "max": [
2783
+ 678.0,
2784
+ 400.0,
2785
+ 507.0,
2786
+ 180.00001525878906,
2787
+ 6.000013828277588,
2788
+ 116.99998474121094,
2789
+ 1.0
2790
+ ],
2791
+ "mean": [
2792
+ 410.37567138671875,
2793
+ 116.9518814086914,
2794
+ 192.35032653808594,
2795
+ -121.22441864013672,
2796
+ -33.84893035888672,
2797
+ 50.016136169433594,
2798
+ 0.741813600063324
2799
+ ],
2800
+ "min": [
2801
+ 172.0,
2802
+ -166.0,
2803
+ -99.99999237060547,
2804
+ -180.00001525878906,
2805
+ -89.0,
2806
+ -96.00010681152344,
2807
+ 0.0
2808
+ ],
2809
+ "q01": [
2810
+ 200.00001052856445,
2811
+ -102.31004211425781,
2812
+ -94.99993370056153,
2813
+ -180.00001525878906,
2814
+ -88.00001525878906,
2815
+ -38.999977111816406,
2816
+ 0.0
2817
+ ],
2818
+ "q99": [
2819
+ 637.0,
2820
+ 368.30999999999995,
2821
+ 493.0,
2822
+ 180.00001525878906,
2823
+ 0.999983012676239,
2824
+ 105.00001525878906,
2825
+ 1.0
2826
+ ],
2827
+ "std": [
2828
+ 122.81494903564453,
2829
+ 108.8009033203125,
2830
+ 130.303466796875,
2831
+ 116.28205108642578,
2832
+ 27.621843338012695,
2833
+ 41.02094650268555,
2834
+ 0.43763357400894165
2835
+ ]
2836
+ },
2837
+ "num_trajectories": 150,
2838
+ "num_transitions": 3970,
2839
+ "proprio": {
2840
+ "max": [
2841
+ 0.0,
2842
+ 0.0,
2843
+ 0.0,
2844
+ 0.0,
2845
+ 0.0,
2846
+ 0.0,
2847
+ 0.0
2848
+ ],
2849
+ "mean": [
2850
+ 0.0,
2851
+ 0.0,
2852
+ 0.0,
2853
+ 0.0,
2854
+ 0.0,
2855
+ 0.0,
2856
+ 0.0
2857
+ ],
2858
+ "min": [
2859
+ 0.0,
2860
+ 0.0,
2861
+ 0.0,
2862
+ 0.0,
2863
+ 0.0,
2864
+ 0.0,
2865
+ 0.0
2866
+ ],
2867
+ "q01": [
2868
+ 0.0,
2869
+ 0.0,
2870
+ 0.0,
2871
+ 0.0,
2872
+ 0.0,
2873
+ 0.0,
2874
+ 0.0
2875
+ ],
2876
+ "q99": [
2877
+ 0.0,
2878
+ 0.0,
2879
+ 0.0,
2880
+ 0.0,
2881
+ 0.0,
2882
+ 0.0,
2883
+ 0.0
2884
+ ],
2885
+ "std": [
2886
+ 0.0,
2887
+ 0.0,
2888
+ 0.0,
2889
+ 0.0,
2890
+ 0.0,
2891
+ 0.0,
2892
+ 0.0
2893
+ ]
2894
+ }
2895
+ },
2896
+ "utaustin_mutex": {
2897
+ "action": {
2898
+ "mask": [
2899
+ true,
2900
+ true,
2901
+ true,
2902
+ true,
2903
+ true,
2904
+ true,
2905
+ false
2906
+ ],
2907
+ "max": [
2908
+ 1.0,
2909
+ 1.0,
2910
+ 1.0,
2911
+ 0.375,
2912
+ 0.375,
2913
+ 0.375,
2914
+ 1.0
2915
+ ],
2916
+ "mean": [
2917
+ 0.06176406890153885,
2918
+ -0.005005486309528351,
2919
+ 0.10216785222291946,
2920
+ -0.03314131125807762,
2921
+ 0.013895004987716675,
2922
+ -0.011317633092403412,
2923
+ 0.5038976669311523
2924
+ ],
2925
+ "min": [
2926
+ -1.0,
2927
+ -1.0,
2928
+ -1.0,
2929
+ -0.375,
2930
+ -0.375,
2931
+ -0.375,
2932
+ 0.0
2933
+ ],
2934
+ "q01": [
2935
+ -0.4285714328289032,
2936
+ -0.9800000190734863,
2937
+ -0.5571428537368774,
2938
+ -0.375,
2939
+ -0.15642857551574707,
2940
+ -0.335357129573822,
2941
+ 0.0
2942
+ ],
2943
+ "q99": [
2944
+ 0.5914285778999329,
2945
+ 0.9714285731315613,
2946
+ 1.0,
2947
+ 0.3278571367263794,
2948
+ 0.207857146859169,
2949
+ 0.25607141852378845,
2950
+ 1.0
2951
+ ],
2952
+ "std": [
2953
+ 0.1875014752149582,
2954
+ 0.4468473494052887,
2955
+ 0.3792876601219177,
2956
+ 0.14097853004932404,
2957
+ 0.06453701853752136,
2958
+ 0.11765272170305252,
2959
+ 0.501045286655426
2960
+ ]
2961
+ },
2962
+ "num_trajectories": 1500,
2963
+ "num_transitions": 361883,
2964
+ "proprio": {
2965
+ "max": [
2966
+ 0.0,
2967
+ 0.0,
2968
+ 0.0,
2969
+ 0.0,
2970
+ 0.0,
2971
+ 0.0,
2972
+ 0.0
2973
+ ],
2974
+ "mean": [
2975
+ 0.0,
2976
+ 0.0,
2977
+ 0.0,
2978
+ 0.0,
2979
+ 0.0,
2980
+ 0.0,
2981
+ 0.0
2982
+ ],
2983
+ "min": [
2984
+ 0.0,
2985
+ 0.0,
2986
+ 0.0,
2987
+ 0.0,
2988
+ 0.0,
2989
+ 0.0,
2990
+ 0.0
2991
+ ],
2992
+ "q01": [
2993
+ 0.0,
2994
+ 0.0,
2995
+ 0.0,
2996
+ 0.0,
2997
+ 0.0,
2998
+ 0.0,
2999
+ 0.0
3000
+ ],
3001
+ "q99": [
3002
+ 0.0,
3003
+ 0.0,
3004
+ 0.0,
3005
+ 0.0,
3006
+ 0.0,
3007
+ 0.0,
3008
+ 0.0
3009
+ ],
3010
+ "std": [
3011
+ 0.0,
3012
+ 0.0,
3013
+ 0.0,
3014
+ 0.0,
3015
+ 0.0,
3016
+ 0.0,
3017
+ 0.0
3018
+ ]
3019
+ }
3020
+ },
3021
+ "viola": {
3022
+ "action": {
3023
+ "mask": [
3024
+ true,
3025
+ true,
3026
+ true,
3027
+ true,
3028
+ true,
3029
+ true,
3030
+ false
3031
+ ],
3032
+ "max": [
3033
+ 1.0,
3034
+ 1.0,
3035
+ 1.0,
3036
+ 0.375,
3037
+ 0.36321428418159485,
3038
+ 0.375,
3039
+ 1.0
3040
+ ],
3041
+ "mean": [
3042
+ 0.04761844128370285,
3043
+ -0.029204415157437325,
3044
+ 0.05586736649274826,
3045
+ -0.002618510741740465,
3046
+ 0.006867344491183758,
3047
+ -0.01682133786380291,
3048
+ 0.7323777675628662
3049
+ ],
3050
+ "min": [
3051
+ -1.0,
3052
+ -1.0,
3053
+ -1.0,
3054
+ -0.375,
3055
+ -0.375,
3056
+ -0.375,
3057
+ 0.0
3058
+ ],
3059
+ "q01": [
3060
+ -0.9628571271896362,
3061
+ -1.0,
3062
+ -1.0,
3063
+ -0.26249998807907104,
3064
+ -0.21321429312229156,
3065
+ -0.3385714292526245,
3066
+ 0.0
3067
+ ],
3068
+ "q99": [
3069
+ 0.9114285707473755,
3070
+ 0.868571400642395,
3071
+ 1.0,
3072
+ 0.2817857265472412,
3073
+ 0.2239285707473755,
3074
+ 0.3557142913341522,
3075
+ 1.0
3076
+ ],
3077
+ "std": [
3078
+ 0.39157867431640625,
3079
+ 0.4076525568962097,
3080
+ 0.40077948570251465,
3081
+ 0.10023996233940125,
3082
+ 0.0844319611787796,
3083
+ 0.10375042259693146,
3084
+ 0.44260647892951965
3085
+ ]
3086
+ },
3087
+ "num_trajectories": 150,
3088
+ "num_transitions": 76324,
3089
+ "proprio": {
3090
+ "max": [
3091
+ 0.0,
3092
+ 0.0,
3093
+ 0.0,
3094
+ 0.0,
3095
+ 0.0,
3096
+ 0.0,
3097
+ 0.0
3098
+ ],
3099
+ "mean": [
3100
+ 0.0,
3101
+ 0.0,
3102
+ 0.0,
3103
+ 0.0,
3104
+ 0.0,
3105
+ 0.0,
3106
+ 0.0
3107
+ ],
3108
+ "min": [
3109
+ 0.0,
3110
+ 0.0,
3111
+ 0.0,
3112
+ 0.0,
3113
+ 0.0,
3114
+ 0.0,
3115
+ 0.0
3116
+ ],
3117
+ "q01": [
3118
+ 0.0,
3119
+ 0.0,
3120
+ 0.0,
3121
+ 0.0,
3122
+ 0.0,
3123
+ 0.0,
3124
+ 0.0
3125
+ ],
3126
+ "q99": [
3127
+ 0.0,
3128
+ 0.0,
3129
+ 0.0,
3130
+ 0.0,
3131
+ 0.0,
3132
+ 0.0,
3133
+ 0.0
3134
+ ],
3135
+ "std": [
3136
+ 0.0,
3137
+ 0.0,
3138
+ 0.0,
3139
+ 0.0,
3140
+ 0.0,
3141
+ 0.0,
3142
+ 0.0
3143
+ ]
3144
+ }
3145
+ }
3146
+ },
3147
+ "output_projector_states": false,
3148
+ "pad_to_multiple_of": 64,
3149
+ "pad_token_id": 32000,
3150
+ "text_config": {
3151
+ "model_type": "llama",
3152
+ "pad_token_id": 32000,
3153
+ "torch_dtype": "bfloat16",
3154
+ "vocab_size": 32064
3155
+ },
3156
+ "timm_model_ids": [
3157
+ "vit_large_patch14_reg4_dinov2.lvd142m",
3158
+ "vit_so400m_patch14_siglip_224"
3159
+ ],
3160
+ "timm_override_act_layers": [
3161
+ null,
3162
+ null
3163
+ ],
3164
+ "torch_dtype": "bfloat16",
3165
+ "transformers_version": "4.40.1",
3166
+ "use_fused_vision_backbone": true,
3167
+ "vision_backbone_id": "dinosiglip-vit-so-224px"
3168
+ }
output_hf_model_openx/configuration_prismatic.py ADDED
@@ -0,0 +1,140 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ configuration_prismatic.py
3
+
4
+ HuggingFace-style configuration definition for Prismatic VLMs, inheriting from `transformers.PretrainedConfig`.
5
+ Default configuration specifies `siglip-224px+7b`.
6
+ """
7
+
8
+ from typing import Any, Dict, List, Optional
9
+
10
+ from transformers import PretrainedConfig
11
+ from transformers.models.auto import CONFIG_MAPPING
12
+
13
+ # === Utilities for Mapping Prismatic names to HF names ===
14
+ # fmt: off
15
+ VISION_BACKBONE_TO_RESOLUTION: Dict[str, List[int]] = {
16
+ "clip-vit-l": [224], "siglip-vit-so400m": [224], "dinov2-vit-l": [224], "in1k-vit-l": [224],
17
+
18
+ "clip-vit-l-336px": [336],
19
+ "siglip-vit-so400m-384px": [384],
20
+
21
+ "dinoclip-vit-l-336px": [336, 336],
22
+ "dinosiglip-vit-so-224px": [224, 224],
23
+ "dinosiglip-vit-so-384px": [384, 384],
24
+ }
25
+ VISION_BACKBONE_TO_TIMM_ID: Dict[str, List[str]] = {
26
+ "clip-vit-l": ["vit_large_patch14_clip_224.openai"],
27
+ "clip-vit-l-336px": ["vit_large_patch14_clip_336.openai"],
28
+
29
+ "dinov2-vit-l": ["vit_large_patch14_reg4_dinov2.lvd142m"],
30
+ "in1k-vit-l": ["vit_large_patch16_224.augreg_in21k_ft_in1k"],
31
+
32
+ "siglip-vit-so400m": ["vit_so400m_patch14_siglip_224"],
33
+ "siglip-vit-so400m-384px": ["vit_so400m_patch14_siglip_384"],
34
+
35
+ "dinoclip-vit-l-336px": ["vit_large_patch14_reg4_dinov2.lvd142m", "vit_large_patch14_clip_336.openai"],
36
+ "dinosiglip-vit-so-224px": ["vit_large_patch14_reg4_dinov2.lvd142m", "vit_so400m_patch14_siglip_224"],
37
+ "dinosiglip-vit-so-384px": ["vit_large_patch14_reg4_dinov2.lvd142m", "vit_so400m_patch14_siglip_384"],
38
+ }
39
+ TIMM_OVERRIDE_ACT_LAYER: Dict[str, List[Optional[str]]] = {
40
+ "clip-vit-l": ["quick_gelu"], "clip-vit-l-336px": ["quick_gelu"],
41
+ "dinov2-vit-l": [None], "in1k-vit-l": [None],
42
+ "siglip-vit-so400m": [None], "siglip-vit-so400m-384px": [None],
43
+ "dinoclip-vit-l-336px": [None, "quick_gelu"],
44
+ "dinosiglip-vit-so-224px": [None, None], "dinosiglip-vit-so-384px": [None, None]
45
+ }
46
+
47
+ LLM_BACKBONE_TO_HF_PATH = {
48
+ "llama2-7b-pure": "meta-llama/Llama-2-7b-hf", "llama2-13b-pure": "meta-llama/Llama-2-13b-hf",
49
+ "llama2-7b-chat": "meta-llama/Llama-2-7b-chat-hf", "llama2-13b-chat": "meta-llama/Llama-2-13b-chat-hf",
50
+
51
+ "vicuna-v15-7b": "lmsys/vicuna-7b-v1.5", "vicuna-v15-13b": "lmsys/vicuna-13b-v1.5",
52
+
53
+ "mistral-v0.1-7b-pure": "mistralai/Mistral-7B-v0.1",
54
+ "mistral-v0.1-7b-instruct": "mistralai/Mistral-7B-Instruct-v0.1",
55
+
56
+ "phi-2-3b": "microsoft/phi-2",
57
+ }
58
+ LLM_BACKBONE_TO_HF_METACLASS = {
59
+ "llama2-7b-pure": "llama", "llama2-13b-pure": "llama", "llama2-7b-chat": "llama", "llama2-13b-chat": "llama",
60
+ "vicuna-v15-7b": "llama", "vicuna-v15-13b": "llama",
61
+
62
+ "mistral-v0.1-7b-pure": "mistral", "mistral-v0.1-7b-instruct": "mistral",
63
+
64
+ "phi-2-3b": "phi",
65
+ }
66
+
67
+ VALID_VISION_BACKBONES = set(VISION_BACKBONE_TO_RESOLUTION.keys())
68
+ VALID_LLM_BACKBONES = set(LLM_BACKBONE_TO_HF_PATH)
69
+ # fmt: on
70
+
71
+
72
+ class PrismaticConfig(PretrainedConfig):
73
+ model_type: str = "prismatic"
74
+ is_composition: bool = False
75
+
76
+ def __init__(
77
+ self,
78
+ vision_backbone_id: str = "siglip-vit-so400m",
79
+ llm_backbone_id: str = "vicuna-v15-7b",
80
+ arch_specifier: str = "no-align+gelu-mlp",
81
+ use_fused_vision_backbone: Optional[bool] = None,
82
+ image_resize_strategy: str = "letterbox",
83
+ text_config: Optional[Dict[str, Any]] = None,
84
+ llm_max_length: int = 2048,
85
+ pad_token_id: int = 32000,
86
+ pad_to_multiple_of: int = 64,
87
+ output_projector_states: bool = False,
88
+ **kwargs: str,
89
+ ) -> None:
90
+ if vision_backbone_id not in VALID_VISION_BACKBONES:
91
+ raise ValueError(f"Vision backbone `{vision_backbone_id}` not in {VALID_VISION_BACKBONES = }")
92
+
93
+ if llm_backbone_id not in VALID_LLM_BACKBONES:
94
+ raise ValueError(f"LLM backbone `{llm_backbone_id}` not in {VALID_LLM_BACKBONES = }")
95
+
96
+ # Set Prismatic Configuration Fields
97
+ self.vision_backbone_id = vision_backbone_id
98
+ self.llm_backbone_id = llm_backbone_id
99
+ self.arch_specifier = arch_specifier
100
+ self.output_projector_states = output_projector_states
101
+
102
+ # [Contract] All vision backbone parameters are lists =>> supports fused backbones with different preprocessing
103
+ self.use_fused_vision_backbone = (
104
+ use_fused_vision_backbone
105
+ if use_fused_vision_backbone is not None
106
+ else any(self.vision_backbone_id.startswith(v) for v in ["dinoclip", "dinosiglip"])
107
+ )
108
+
109
+ self.timm_model_ids = VISION_BACKBONE_TO_TIMM_ID[self.vision_backbone_id]
110
+ self.timm_override_act_layers = TIMM_OVERRIDE_ACT_LAYER[self.vision_backbone_id]
111
+ self.image_sizes = VISION_BACKBONE_TO_RESOLUTION[self.vision_backbone_id]
112
+ self.image_resize_strategy = image_resize_strategy
113
+
114
+ self.hf_llm_id = LLM_BACKBONE_TO_HF_PATH[self.llm_backbone_id]
115
+ self.llm_max_length = llm_max_length
116
+ self.pad_token_id, self.pad_to_multiple_of = pad_token_id, pad_to_multiple_of
117
+
118
+ # [IMPORTANT] HF Utilities actually look for a `text_config` field... we need to use that specific naming!
119
+ self.text_config = (
120
+ CONFIG_MAPPING[LLM_BACKBONE_TO_HF_METACLASS[self.llm_backbone_id]](**text_config)
121
+ if text_config is not None
122
+ else CONFIG_MAPPING[LLM_BACKBONE_TO_HF_METACLASS[self.llm_backbone_id]]()
123
+ )
124
+
125
+ # Dispatch **kwargs to super() =>> note that `pad_token_id` collides, so we pass it in here as well...
126
+ super().__init__(pad_token_id=pad_token_id, **kwargs)
127
+
128
+
129
+ class OpenVLAConfig(PrismaticConfig):
130
+ model_type: str = "openvla"
131
+
132
+ def __init__(
133
+ self,
134
+ norm_stats: Optional[Dict[str, Dict[str, Dict[str, Dict[str, List[float]]]]]] = None,
135
+ n_action_bins: int = 256,
136
+ **kwargs: str,
137
+ ) -> None:
138
+ self.norm_stats, self.n_action_bins = norm_stats, n_action_bins
139
+
140
+ super().__init__(**kwargs)
output_hf_model_openx/generation_config.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 1,
4
+ "eos_token_id": 2,
5
+ "pad_token_id": 32000,
6
+ "transformers_version": "4.40.1"
7
+ }
output_hf_model_openx/model-00001-of-00003.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:10d8636256018712c5e5c823d12e22b5797f99bb721bd123bf6bf2379892be85
3
+ size 6948961960
output_hf_model_openx/model-00002-of-00003.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2050b14f21d48904d269f48d5a980fecea87cd7b36641d9b0f015e72d1fe216a
3
+ size 6971232040
output_hf_model_openx/model-00003-of-00003.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ea65305a1577f36f721965bf84c8caec0a948ce7ce84d754701637376c531fef
3
+ size 1162406824
output_hf_model_openx/model.safetensors.index.json ADDED
@@ -0,0 +1,989 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "metadata": {
3
+ "total_size": 15082474368
4
+ },
5
+ "weight_map": {
6
+ "language_model.lm_head.weight": "model-00003-of-00003.safetensors",
7
+ "language_model.model.embed_tokens.weight": "model-00001-of-00003.safetensors",
8
+ "language_model.model.layers.0.input_layernorm.weight": "model-00001-of-00003.safetensors",
9
+ "language_model.model.layers.0.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
10
+ "language_model.model.layers.0.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
11
+ "language_model.model.layers.0.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
12
+ "language_model.model.layers.0.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
13
+ "language_model.model.layers.0.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
14
+ "language_model.model.layers.0.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
15
+ "language_model.model.layers.0.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
16
+ "language_model.model.layers.0.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
17
+ "language_model.model.layers.1.input_layernorm.weight": "model-00001-of-00003.safetensors",
18
+ "language_model.model.layers.1.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
19
+ "language_model.model.layers.1.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
20
+ "language_model.model.layers.1.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
21
+ "language_model.model.layers.1.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
22
+ "language_model.model.layers.1.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
23
+ "language_model.model.layers.1.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
24
+ "language_model.model.layers.1.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
25
+ "language_model.model.layers.1.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
26
+ "language_model.model.layers.10.input_layernorm.weight": "model-00001-of-00003.safetensors",
27
+ "language_model.model.layers.10.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
28
+ "language_model.model.layers.10.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
29
+ "language_model.model.layers.10.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
30
+ "language_model.model.layers.10.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
31
+ "language_model.model.layers.10.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
32
+ "language_model.model.layers.10.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
33
+ "language_model.model.layers.10.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
34
+ "language_model.model.layers.10.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
35
+ "language_model.model.layers.11.input_layernorm.weight": "model-00001-of-00003.safetensors",
36
+ "language_model.model.layers.11.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
37
+ "language_model.model.layers.11.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
38
+ "language_model.model.layers.11.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
39
+ "language_model.model.layers.11.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
40
+ "language_model.model.layers.11.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
41
+ "language_model.model.layers.11.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
42
+ "language_model.model.layers.11.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
43
+ "language_model.model.layers.11.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
44
+ "language_model.model.layers.12.input_layernorm.weight": "model-00002-of-00003.safetensors",
45
+ "language_model.model.layers.12.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
46
+ "language_model.model.layers.12.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
47
+ "language_model.model.layers.12.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
48
+ "language_model.model.layers.12.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
49
+ "language_model.model.layers.12.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
50
+ "language_model.model.layers.12.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
51
+ "language_model.model.layers.12.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
52
+ "language_model.model.layers.12.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
53
+ "language_model.model.layers.13.input_layernorm.weight": "model-00002-of-00003.safetensors",
54
+ "language_model.model.layers.13.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
55
+ "language_model.model.layers.13.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
56
+ "language_model.model.layers.13.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
57
+ "language_model.model.layers.13.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
58
+ "language_model.model.layers.13.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
59
+ "language_model.model.layers.13.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
60
+ "language_model.model.layers.13.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
61
+ "language_model.model.layers.13.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
62
+ "language_model.model.layers.14.input_layernorm.weight": "model-00002-of-00003.safetensors",
63
+ "language_model.model.layers.14.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
64
+ "language_model.model.layers.14.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
65
+ "language_model.model.layers.14.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
66
+ "language_model.model.layers.14.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
67
+ "language_model.model.layers.14.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
68
+ "language_model.model.layers.14.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
69
+ "language_model.model.layers.14.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
70
+ "language_model.model.layers.14.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
71
+ "language_model.model.layers.15.input_layernorm.weight": "model-00002-of-00003.safetensors",
72
+ "language_model.model.layers.15.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
73
+ "language_model.model.layers.15.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
74
+ "language_model.model.layers.15.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
75
+ "language_model.model.layers.15.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
76
+ "language_model.model.layers.15.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
77
+ "language_model.model.layers.15.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
78
+ "language_model.model.layers.15.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
79
+ "language_model.model.layers.15.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
80
+ "language_model.model.layers.16.input_layernorm.weight": "model-00002-of-00003.safetensors",
81
+ "language_model.model.layers.16.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
82
+ "language_model.model.layers.16.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
83
+ "language_model.model.layers.16.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
84
+ "language_model.model.layers.16.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
85
+ "language_model.model.layers.16.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
86
+ "language_model.model.layers.16.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
87
+ "language_model.model.layers.16.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
88
+ "language_model.model.layers.16.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
89
+ "language_model.model.layers.17.input_layernorm.weight": "model-00002-of-00003.safetensors",
90
+ "language_model.model.layers.17.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
91
+ "language_model.model.layers.17.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
92
+ "language_model.model.layers.17.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
93
+ "language_model.model.layers.17.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
94
+ "language_model.model.layers.17.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
95
+ "language_model.model.layers.17.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
96
+ "language_model.model.layers.17.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
97
+ "language_model.model.layers.17.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
98
+ "language_model.model.layers.18.input_layernorm.weight": "model-00002-of-00003.safetensors",
99
+ "language_model.model.layers.18.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
100
+ "language_model.model.layers.18.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
101
+ "language_model.model.layers.18.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
102
+ "language_model.model.layers.18.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
103
+ "language_model.model.layers.18.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
104
+ "language_model.model.layers.18.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
105
+ "language_model.model.layers.18.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
106
+ "language_model.model.layers.18.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
107
+ "language_model.model.layers.19.input_layernorm.weight": "model-00002-of-00003.safetensors",
108
+ "language_model.model.layers.19.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
109
+ "language_model.model.layers.19.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
110
+ "language_model.model.layers.19.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
111
+ "language_model.model.layers.19.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
112
+ "language_model.model.layers.19.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
113
+ "language_model.model.layers.19.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
114
+ "language_model.model.layers.19.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
115
+ "language_model.model.layers.19.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
116
+ "language_model.model.layers.2.input_layernorm.weight": "model-00001-of-00003.safetensors",
117
+ "language_model.model.layers.2.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
118
+ "language_model.model.layers.2.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
119
+ "language_model.model.layers.2.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
120
+ "language_model.model.layers.2.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
121
+ "language_model.model.layers.2.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
122
+ "language_model.model.layers.2.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
123
+ "language_model.model.layers.2.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
124
+ "language_model.model.layers.2.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
125
+ "language_model.model.layers.20.input_layernorm.weight": "model-00002-of-00003.safetensors",
126
+ "language_model.model.layers.20.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
127
+ "language_model.model.layers.20.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
128
+ "language_model.model.layers.20.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
129
+ "language_model.model.layers.20.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
130
+ "language_model.model.layers.20.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
131
+ "language_model.model.layers.20.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
132
+ "language_model.model.layers.20.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
133
+ "language_model.model.layers.20.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
134
+ "language_model.model.layers.21.input_layernorm.weight": "model-00002-of-00003.safetensors",
135
+ "language_model.model.layers.21.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
136
+ "language_model.model.layers.21.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
137
+ "language_model.model.layers.21.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
138
+ "language_model.model.layers.21.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
139
+ "language_model.model.layers.21.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
140
+ "language_model.model.layers.21.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
141
+ "language_model.model.layers.21.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
142
+ "language_model.model.layers.21.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
143
+ "language_model.model.layers.22.input_layernorm.weight": "model-00002-of-00003.safetensors",
144
+ "language_model.model.layers.22.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
145
+ "language_model.model.layers.22.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
146
+ "language_model.model.layers.22.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
147
+ "language_model.model.layers.22.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
148
+ "language_model.model.layers.22.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
149
+ "language_model.model.layers.22.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
150
+ "language_model.model.layers.22.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
151
+ "language_model.model.layers.22.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
152
+ "language_model.model.layers.23.input_layernorm.weight": "model-00002-of-00003.safetensors",
153
+ "language_model.model.layers.23.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
154
+ "language_model.model.layers.23.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
155
+ "language_model.model.layers.23.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
156
+ "language_model.model.layers.23.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
157
+ "language_model.model.layers.23.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
158
+ "language_model.model.layers.23.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
159
+ "language_model.model.layers.23.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
160
+ "language_model.model.layers.23.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
161
+ "language_model.model.layers.24.input_layernorm.weight": "model-00002-of-00003.safetensors",
162
+ "language_model.model.layers.24.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
163
+ "language_model.model.layers.24.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
164
+ "language_model.model.layers.24.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
165
+ "language_model.model.layers.24.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
166
+ "language_model.model.layers.24.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
167
+ "language_model.model.layers.24.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
168
+ "language_model.model.layers.24.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
169
+ "language_model.model.layers.24.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
170
+ "language_model.model.layers.25.input_layernorm.weight": "model-00002-of-00003.safetensors",
171
+ "language_model.model.layers.25.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
172
+ "language_model.model.layers.25.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
173
+ "language_model.model.layers.25.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
174
+ "language_model.model.layers.25.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
175
+ "language_model.model.layers.25.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
176
+ "language_model.model.layers.25.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
177
+ "language_model.model.layers.25.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
178
+ "language_model.model.layers.25.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
179
+ "language_model.model.layers.26.input_layernorm.weight": "model-00002-of-00003.safetensors",
180
+ "language_model.model.layers.26.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
181
+ "language_model.model.layers.26.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
182
+ "language_model.model.layers.26.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
183
+ "language_model.model.layers.26.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
184
+ "language_model.model.layers.26.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
185
+ "language_model.model.layers.26.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
186
+ "language_model.model.layers.26.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
187
+ "language_model.model.layers.26.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
188
+ "language_model.model.layers.27.input_layernorm.weight": "model-00002-of-00003.safetensors",
189
+ "language_model.model.layers.27.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
190
+ "language_model.model.layers.27.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
191
+ "language_model.model.layers.27.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
192
+ "language_model.model.layers.27.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
193
+ "language_model.model.layers.27.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
194
+ "language_model.model.layers.27.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
195
+ "language_model.model.layers.27.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
196
+ "language_model.model.layers.27.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
197
+ "language_model.model.layers.28.input_layernorm.weight": "model-00002-of-00003.safetensors",
198
+ "language_model.model.layers.28.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
199
+ "language_model.model.layers.28.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
200
+ "language_model.model.layers.28.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
201
+ "language_model.model.layers.28.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
202
+ "language_model.model.layers.28.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
203
+ "language_model.model.layers.28.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
204
+ "language_model.model.layers.28.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
205
+ "language_model.model.layers.28.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
206
+ "language_model.model.layers.29.input_layernorm.weight": "model-00003-of-00003.safetensors",
207
+ "language_model.model.layers.29.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
208
+ "language_model.model.layers.29.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
209
+ "language_model.model.layers.29.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
210
+ "language_model.model.layers.29.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
211
+ "language_model.model.layers.29.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
212
+ "language_model.model.layers.29.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
213
+ "language_model.model.layers.29.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
214
+ "language_model.model.layers.29.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
215
+ "language_model.model.layers.3.input_layernorm.weight": "model-00001-of-00003.safetensors",
216
+ "language_model.model.layers.3.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
217
+ "language_model.model.layers.3.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
218
+ "language_model.model.layers.3.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
219
+ "language_model.model.layers.3.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
220
+ "language_model.model.layers.3.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
221
+ "language_model.model.layers.3.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
222
+ "language_model.model.layers.3.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
223
+ "language_model.model.layers.3.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
224
+ "language_model.model.layers.30.input_layernorm.weight": "model-00003-of-00003.safetensors",
225
+ "language_model.model.layers.30.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
226
+ "language_model.model.layers.30.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
227
+ "language_model.model.layers.30.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
228
+ "language_model.model.layers.30.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
229
+ "language_model.model.layers.30.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
230
+ "language_model.model.layers.30.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
231
+ "language_model.model.layers.30.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
232
+ "language_model.model.layers.30.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
233
+ "language_model.model.layers.31.input_layernorm.weight": "model-00003-of-00003.safetensors",
234
+ "language_model.model.layers.31.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
235
+ "language_model.model.layers.31.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
236
+ "language_model.model.layers.31.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
237
+ "language_model.model.layers.31.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
238
+ "language_model.model.layers.31.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
239
+ "language_model.model.layers.31.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
240
+ "language_model.model.layers.31.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
241
+ "language_model.model.layers.31.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
242
+ "language_model.model.layers.4.input_layernorm.weight": "model-00001-of-00003.safetensors",
243
+ "language_model.model.layers.4.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
244
+ "language_model.model.layers.4.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
245
+ "language_model.model.layers.4.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
246
+ "language_model.model.layers.4.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
247
+ "language_model.model.layers.4.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
248
+ "language_model.model.layers.4.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
249
+ "language_model.model.layers.4.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
250
+ "language_model.model.layers.4.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
251
+ "language_model.model.layers.5.input_layernorm.weight": "model-00001-of-00003.safetensors",
252
+ "language_model.model.layers.5.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
253
+ "language_model.model.layers.5.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
254
+ "language_model.model.layers.5.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
255
+ "language_model.model.layers.5.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
256
+ "language_model.model.layers.5.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
257
+ "language_model.model.layers.5.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
258
+ "language_model.model.layers.5.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
259
+ "language_model.model.layers.5.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
260
+ "language_model.model.layers.6.input_layernorm.weight": "model-00001-of-00003.safetensors",
261
+ "language_model.model.layers.6.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
262
+ "language_model.model.layers.6.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
263
+ "language_model.model.layers.6.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
264
+ "language_model.model.layers.6.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
265
+ "language_model.model.layers.6.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
266
+ "language_model.model.layers.6.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
267
+ "language_model.model.layers.6.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
268
+ "language_model.model.layers.6.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
269
+ "language_model.model.layers.7.input_layernorm.weight": "model-00001-of-00003.safetensors",
270
+ "language_model.model.layers.7.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
271
+ "language_model.model.layers.7.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
272
+ "language_model.model.layers.7.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
273
+ "language_model.model.layers.7.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
274
+ "language_model.model.layers.7.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
275
+ "language_model.model.layers.7.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
276
+ "language_model.model.layers.7.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
277
+ "language_model.model.layers.7.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
278
+ "language_model.model.layers.8.input_layernorm.weight": "model-00001-of-00003.safetensors",
279
+ "language_model.model.layers.8.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
280
+ "language_model.model.layers.8.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
281
+ "language_model.model.layers.8.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
282
+ "language_model.model.layers.8.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
283
+ "language_model.model.layers.8.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
284
+ "language_model.model.layers.8.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
285
+ "language_model.model.layers.8.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
286
+ "language_model.model.layers.8.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
287
+ "language_model.model.layers.9.input_layernorm.weight": "model-00001-of-00003.safetensors",
288
+ "language_model.model.layers.9.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
289
+ "language_model.model.layers.9.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
290
+ "language_model.model.layers.9.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
291
+ "language_model.model.layers.9.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
292
+ "language_model.model.layers.9.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
293
+ "language_model.model.layers.9.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
294
+ "language_model.model.layers.9.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
295
+ "language_model.model.layers.9.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
296
+ "language_model.model.norm.weight": "model-00003-of-00003.safetensors",
297
+ "projector.fc1.bias": "model-00001-of-00003.safetensors",
298
+ "projector.fc1.weight": "model-00001-of-00003.safetensors",
299
+ "projector.fc2.bias": "model-00001-of-00003.safetensors",
300
+ "projector.fc2.weight": "model-00001-of-00003.safetensors",
301
+ "projector.fc3.bias": "model-00001-of-00003.safetensors",
302
+ "projector.fc3.weight": "model-00001-of-00003.safetensors",
303
+ "vision_backbone.featurizer.blocks.0.attn.proj.bias": "model-00001-of-00003.safetensors",
304
+ "vision_backbone.featurizer.blocks.0.attn.proj.weight": "model-00001-of-00003.safetensors",
305
+ "vision_backbone.featurizer.blocks.0.attn.qkv.bias": "model-00001-of-00003.safetensors",
306
+ "vision_backbone.featurizer.blocks.0.attn.qkv.weight": "model-00001-of-00003.safetensors",
307
+ "vision_backbone.featurizer.blocks.0.ls1.scale_factor": "model-00001-of-00003.safetensors",
308
+ "vision_backbone.featurizer.blocks.0.ls2.scale_factor": "model-00001-of-00003.safetensors",
309
+ "vision_backbone.featurizer.blocks.0.mlp.fc1.bias": "model-00001-of-00003.safetensors",
310
+ "vision_backbone.featurizer.blocks.0.mlp.fc1.weight": "model-00001-of-00003.safetensors",
311
+ "vision_backbone.featurizer.blocks.0.mlp.fc2.bias": "model-00001-of-00003.safetensors",
312
+ "vision_backbone.featurizer.blocks.0.mlp.fc2.weight": "model-00001-of-00003.safetensors",
313
+ "vision_backbone.featurizer.blocks.0.norm1.bias": "model-00001-of-00003.safetensors",
314
+ "vision_backbone.featurizer.blocks.0.norm1.weight": "model-00001-of-00003.safetensors",
315
+ "vision_backbone.featurizer.blocks.0.norm2.bias": "model-00001-of-00003.safetensors",
316
+ "vision_backbone.featurizer.blocks.0.norm2.weight": "model-00001-of-00003.safetensors",
317
+ "vision_backbone.featurizer.blocks.1.attn.proj.bias": "model-00001-of-00003.safetensors",
318
+ "vision_backbone.featurizer.blocks.1.attn.proj.weight": "model-00001-of-00003.safetensors",
319
+ "vision_backbone.featurizer.blocks.1.attn.qkv.bias": "model-00001-of-00003.safetensors",
320
+ "vision_backbone.featurizer.blocks.1.attn.qkv.weight": "model-00001-of-00003.safetensors",
321
+ "vision_backbone.featurizer.blocks.1.ls1.scale_factor": "model-00001-of-00003.safetensors",
322
+ "vision_backbone.featurizer.blocks.1.ls2.scale_factor": "model-00001-of-00003.safetensors",
323
+ "vision_backbone.featurizer.blocks.1.mlp.fc1.bias": "model-00001-of-00003.safetensors",
324
+ "vision_backbone.featurizer.blocks.1.mlp.fc1.weight": "model-00001-of-00003.safetensors",
325
+ "vision_backbone.featurizer.blocks.1.mlp.fc2.bias": "model-00001-of-00003.safetensors",
326
+ "vision_backbone.featurizer.blocks.1.mlp.fc2.weight": "model-00001-of-00003.safetensors",
327
+ "vision_backbone.featurizer.blocks.1.norm1.bias": "model-00001-of-00003.safetensors",
328
+ "vision_backbone.featurizer.blocks.1.norm1.weight": "model-00001-of-00003.safetensors",
329
+ "vision_backbone.featurizer.blocks.1.norm2.bias": "model-00001-of-00003.safetensors",
330
+ "vision_backbone.featurizer.blocks.1.norm2.weight": "model-00001-of-00003.safetensors",
331
+ "vision_backbone.featurizer.blocks.10.attn.proj.bias": "model-00001-of-00003.safetensors",
332
+ "vision_backbone.featurizer.blocks.10.attn.proj.weight": "model-00001-of-00003.safetensors",
333
+ "vision_backbone.featurizer.blocks.10.attn.qkv.bias": "model-00001-of-00003.safetensors",
334
+ "vision_backbone.featurizer.blocks.10.attn.qkv.weight": "model-00001-of-00003.safetensors",
335
+ "vision_backbone.featurizer.blocks.10.ls1.scale_factor": "model-00001-of-00003.safetensors",
336
+ "vision_backbone.featurizer.blocks.10.ls2.scale_factor": "model-00001-of-00003.safetensors",
337
+ "vision_backbone.featurizer.blocks.10.mlp.fc1.bias": "model-00001-of-00003.safetensors",
338
+ "vision_backbone.featurizer.blocks.10.mlp.fc1.weight": "model-00001-of-00003.safetensors",
339
+ "vision_backbone.featurizer.blocks.10.mlp.fc2.bias": "model-00001-of-00003.safetensors",
340
+ "vision_backbone.featurizer.blocks.10.mlp.fc2.weight": "model-00001-of-00003.safetensors",
341
+ "vision_backbone.featurizer.blocks.10.norm1.bias": "model-00001-of-00003.safetensors",
342
+ "vision_backbone.featurizer.blocks.10.norm1.weight": "model-00001-of-00003.safetensors",
343
+ "vision_backbone.featurizer.blocks.10.norm2.bias": "model-00001-of-00003.safetensors",
344
+ "vision_backbone.featurizer.blocks.10.norm2.weight": "model-00001-of-00003.safetensors",
345
+ "vision_backbone.featurizer.blocks.11.attn.proj.bias": "model-00001-of-00003.safetensors",
346
+ "vision_backbone.featurizer.blocks.11.attn.proj.weight": "model-00001-of-00003.safetensors",
347
+ "vision_backbone.featurizer.blocks.11.attn.qkv.bias": "model-00001-of-00003.safetensors",
348
+ "vision_backbone.featurizer.blocks.11.attn.qkv.weight": "model-00001-of-00003.safetensors",
349
+ "vision_backbone.featurizer.blocks.11.ls1.scale_factor": "model-00001-of-00003.safetensors",
350
+ "vision_backbone.featurizer.blocks.11.ls2.scale_factor": "model-00001-of-00003.safetensors",
351
+ "vision_backbone.featurizer.blocks.11.mlp.fc1.bias": "model-00001-of-00003.safetensors",
352
+ "vision_backbone.featurizer.blocks.11.mlp.fc1.weight": "model-00001-of-00003.safetensors",
353
+ "vision_backbone.featurizer.blocks.11.mlp.fc2.bias": "model-00001-of-00003.safetensors",
354
+ "vision_backbone.featurizer.blocks.11.mlp.fc2.weight": "model-00001-of-00003.safetensors",
355
+ "vision_backbone.featurizer.blocks.11.norm1.bias": "model-00001-of-00003.safetensors",
356
+ "vision_backbone.featurizer.blocks.11.norm1.weight": "model-00001-of-00003.safetensors",
357
+ "vision_backbone.featurizer.blocks.11.norm2.bias": "model-00001-of-00003.safetensors",
358
+ "vision_backbone.featurizer.blocks.11.norm2.weight": "model-00001-of-00003.safetensors",
359
+ "vision_backbone.featurizer.blocks.12.attn.proj.bias": "model-00001-of-00003.safetensors",
360
+ "vision_backbone.featurizer.blocks.12.attn.proj.weight": "model-00001-of-00003.safetensors",
361
+ "vision_backbone.featurizer.blocks.12.attn.qkv.bias": "model-00001-of-00003.safetensors",
362
+ "vision_backbone.featurizer.blocks.12.attn.qkv.weight": "model-00001-of-00003.safetensors",
363
+ "vision_backbone.featurizer.blocks.12.ls1.scale_factor": "model-00001-of-00003.safetensors",
364
+ "vision_backbone.featurizer.blocks.12.ls2.scale_factor": "model-00001-of-00003.safetensors",
365
+ "vision_backbone.featurizer.blocks.12.mlp.fc1.bias": "model-00001-of-00003.safetensors",
366
+ "vision_backbone.featurizer.blocks.12.mlp.fc1.weight": "model-00001-of-00003.safetensors",
367
+ "vision_backbone.featurizer.blocks.12.mlp.fc2.bias": "model-00001-of-00003.safetensors",
368
+ "vision_backbone.featurizer.blocks.12.mlp.fc2.weight": "model-00001-of-00003.safetensors",
369
+ "vision_backbone.featurizer.blocks.12.norm1.bias": "model-00001-of-00003.safetensors",
370
+ "vision_backbone.featurizer.blocks.12.norm1.weight": "model-00001-of-00003.safetensors",
371
+ "vision_backbone.featurizer.blocks.12.norm2.bias": "model-00001-of-00003.safetensors",
372
+ "vision_backbone.featurizer.blocks.12.norm2.weight": "model-00001-of-00003.safetensors",
373
+ "vision_backbone.featurizer.blocks.13.attn.proj.bias": "model-00001-of-00003.safetensors",
374
+ "vision_backbone.featurizer.blocks.13.attn.proj.weight": "model-00001-of-00003.safetensors",
375
+ "vision_backbone.featurizer.blocks.13.attn.qkv.bias": "model-00001-of-00003.safetensors",
376
+ "vision_backbone.featurizer.blocks.13.attn.qkv.weight": "model-00001-of-00003.safetensors",
377
+ "vision_backbone.featurizer.blocks.13.ls1.scale_factor": "model-00001-of-00003.safetensors",
378
+ "vision_backbone.featurizer.blocks.13.ls2.scale_factor": "model-00001-of-00003.safetensors",
379
+ "vision_backbone.featurizer.blocks.13.mlp.fc1.bias": "model-00001-of-00003.safetensors",
380
+ "vision_backbone.featurizer.blocks.13.mlp.fc1.weight": "model-00001-of-00003.safetensors",
381
+ "vision_backbone.featurizer.blocks.13.mlp.fc2.bias": "model-00001-of-00003.safetensors",
382
+ "vision_backbone.featurizer.blocks.13.mlp.fc2.weight": "model-00001-of-00003.safetensors",
383
+ "vision_backbone.featurizer.blocks.13.norm1.bias": "model-00001-of-00003.safetensors",
384
+ "vision_backbone.featurizer.blocks.13.norm1.weight": "model-00001-of-00003.safetensors",
385
+ "vision_backbone.featurizer.blocks.13.norm2.bias": "model-00001-of-00003.safetensors",
386
+ "vision_backbone.featurizer.blocks.13.norm2.weight": "model-00001-of-00003.safetensors",
387
+ "vision_backbone.featurizer.blocks.14.attn.proj.bias": "model-00001-of-00003.safetensors",
388
+ "vision_backbone.featurizer.blocks.14.attn.proj.weight": "model-00001-of-00003.safetensors",
389
+ "vision_backbone.featurizer.blocks.14.attn.qkv.bias": "model-00001-of-00003.safetensors",
390
+ "vision_backbone.featurizer.blocks.14.attn.qkv.weight": "model-00001-of-00003.safetensors",
391
+ "vision_backbone.featurizer.blocks.14.ls1.scale_factor": "model-00001-of-00003.safetensors",
392
+ "vision_backbone.featurizer.blocks.14.ls2.scale_factor": "model-00001-of-00003.safetensors",
393
+ "vision_backbone.featurizer.blocks.14.mlp.fc1.bias": "model-00001-of-00003.safetensors",
394
+ "vision_backbone.featurizer.blocks.14.mlp.fc1.weight": "model-00001-of-00003.safetensors",
395
+ "vision_backbone.featurizer.blocks.14.mlp.fc2.bias": "model-00001-of-00003.safetensors",
396
+ "vision_backbone.featurizer.blocks.14.mlp.fc2.weight": "model-00001-of-00003.safetensors",
397
+ "vision_backbone.featurizer.blocks.14.norm1.bias": "model-00001-of-00003.safetensors",
398
+ "vision_backbone.featurizer.blocks.14.norm1.weight": "model-00001-of-00003.safetensors",
399
+ "vision_backbone.featurizer.blocks.14.norm2.bias": "model-00001-of-00003.safetensors",
400
+ "vision_backbone.featurizer.blocks.14.norm2.weight": "model-00001-of-00003.safetensors",
401
+ "vision_backbone.featurizer.blocks.15.attn.proj.bias": "model-00001-of-00003.safetensors",
402
+ "vision_backbone.featurizer.blocks.15.attn.proj.weight": "model-00001-of-00003.safetensors",
403
+ "vision_backbone.featurizer.blocks.15.attn.qkv.bias": "model-00001-of-00003.safetensors",
404
+ "vision_backbone.featurizer.blocks.15.attn.qkv.weight": "model-00001-of-00003.safetensors",
405
+ "vision_backbone.featurizer.blocks.15.ls1.scale_factor": "model-00001-of-00003.safetensors",
406
+ "vision_backbone.featurizer.blocks.15.ls2.scale_factor": "model-00001-of-00003.safetensors",
407
+ "vision_backbone.featurizer.blocks.15.mlp.fc1.bias": "model-00001-of-00003.safetensors",
408
+ "vision_backbone.featurizer.blocks.15.mlp.fc1.weight": "model-00001-of-00003.safetensors",
409
+ "vision_backbone.featurizer.blocks.15.mlp.fc2.bias": "model-00001-of-00003.safetensors",
410
+ "vision_backbone.featurizer.blocks.15.mlp.fc2.weight": "model-00001-of-00003.safetensors",
411
+ "vision_backbone.featurizer.blocks.15.norm1.bias": "model-00001-of-00003.safetensors",
412
+ "vision_backbone.featurizer.blocks.15.norm1.weight": "model-00001-of-00003.safetensors",
413
+ "vision_backbone.featurizer.blocks.15.norm2.bias": "model-00001-of-00003.safetensors",
414
+ "vision_backbone.featurizer.blocks.15.norm2.weight": "model-00001-of-00003.safetensors",
415
+ "vision_backbone.featurizer.blocks.16.attn.proj.bias": "model-00001-of-00003.safetensors",
416
+ "vision_backbone.featurizer.blocks.16.attn.proj.weight": "model-00001-of-00003.safetensors",
417
+ "vision_backbone.featurizer.blocks.16.attn.qkv.bias": "model-00001-of-00003.safetensors",
418
+ "vision_backbone.featurizer.blocks.16.attn.qkv.weight": "model-00001-of-00003.safetensors",
419
+ "vision_backbone.featurizer.blocks.16.ls1.scale_factor": "model-00001-of-00003.safetensors",
420
+ "vision_backbone.featurizer.blocks.16.ls2.scale_factor": "model-00001-of-00003.safetensors",
421
+ "vision_backbone.featurizer.blocks.16.mlp.fc1.bias": "model-00001-of-00003.safetensors",
422
+ "vision_backbone.featurizer.blocks.16.mlp.fc1.weight": "model-00001-of-00003.safetensors",
423
+ "vision_backbone.featurizer.blocks.16.mlp.fc2.bias": "model-00001-of-00003.safetensors",
424
+ "vision_backbone.featurizer.blocks.16.mlp.fc2.weight": "model-00001-of-00003.safetensors",
425
+ "vision_backbone.featurizer.blocks.16.norm1.bias": "model-00001-of-00003.safetensors",
426
+ "vision_backbone.featurizer.blocks.16.norm1.weight": "model-00001-of-00003.safetensors",
427
+ "vision_backbone.featurizer.blocks.16.norm2.bias": "model-00001-of-00003.safetensors",
428
+ "vision_backbone.featurizer.blocks.16.norm2.weight": "model-00001-of-00003.safetensors",
429
+ "vision_backbone.featurizer.blocks.17.attn.proj.bias": "model-00001-of-00003.safetensors",
430
+ "vision_backbone.featurizer.blocks.17.attn.proj.weight": "model-00001-of-00003.safetensors",
431
+ "vision_backbone.featurizer.blocks.17.attn.qkv.bias": "model-00001-of-00003.safetensors",
432
+ "vision_backbone.featurizer.blocks.17.attn.qkv.weight": "model-00001-of-00003.safetensors",
433
+ "vision_backbone.featurizer.blocks.17.ls1.scale_factor": "model-00001-of-00003.safetensors",
434
+ "vision_backbone.featurizer.blocks.17.ls2.scale_factor": "model-00001-of-00003.safetensors",
435
+ "vision_backbone.featurizer.blocks.17.mlp.fc1.bias": "model-00001-of-00003.safetensors",
436
+ "vision_backbone.featurizer.blocks.17.mlp.fc1.weight": "model-00001-of-00003.safetensors",
437
+ "vision_backbone.featurizer.blocks.17.mlp.fc2.bias": "model-00001-of-00003.safetensors",
438
+ "vision_backbone.featurizer.blocks.17.mlp.fc2.weight": "model-00001-of-00003.safetensors",
439
+ "vision_backbone.featurizer.blocks.17.norm1.bias": "model-00001-of-00003.safetensors",
440
+ "vision_backbone.featurizer.blocks.17.norm1.weight": "model-00001-of-00003.safetensors",
441
+ "vision_backbone.featurizer.blocks.17.norm2.bias": "model-00001-of-00003.safetensors",
442
+ "vision_backbone.featurizer.blocks.17.norm2.weight": "model-00001-of-00003.safetensors",
443
+ "vision_backbone.featurizer.blocks.18.attn.proj.bias": "model-00001-of-00003.safetensors",
444
+ "vision_backbone.featurizer.blocks.18.attn.proj.weight": "model-00001-of-00003.safetensors",
445
+ "vision_backbone.featurizer.blocks.18.attn.qkv.bias": "model-00001-of-00003.safetensors",
446
+ "vision_backbone.featurizer.blocks.18.attn.qkv.weight": "model-00001-of-00003.safetensors",
447
+ "vision_backbone.featurizer.blocks.18.ls1.scale_factor": "model-00001-of-00003.safetensors",
448
+ "vision_backbone.featurizer.blocks.18.ls2.scale_factor": "model-00001-of-00003.safetensors",
449
+ "vision_backbone.featurizer.blocks.18.mlp.fc1.bias": "model-00001-of-00003.safetensors",
450
+ "vision_backbone.featurizer.blocks.18.mlp.fc1.weight": "model-00001-of-00003.safetensors",
451
+ "vision_backbone.featurizer.blocks.18.mlp.fc2.bias": "model-00001-of-00003.safetensors",
452
+ "vision_backbone.featurizer.blocks.18.mlp.fc2.weight": "model-00001-of-00003.safetensors",
453
+ "vision_backbone.featurizer.blocks.18.norm1.bias": "model-00001-of-00003.safetensors",
454
+ "vision_backbone.featurizer.blocks.18.norm1.weight": "model-00001-of-00003.safetensors",
455
+ "vision_backbone.featurizer.blocks.18.norm2.bias": "model-00001-of-00003.safetensors",
456
+ "vision_backbone.featurizer.blocks.18.norm2.weight": "model-00001-of-00003.safetensors",
457
+ "vision_backbone.featurizer.blocks.19.attn.proj.bias": "model-00001-of-00003.safetensors",
458
+ "vision_backbone.featurizer.blocks.19.attn.proj.weight": "model-00001-of-00003.safetensors",
459
+ "vision_backbone.featurizer.blocks.19.attn.qkv.bias": "model-00001-of-00003.safetensors",
460
+ "vision_backbone.featurizer.blocks.19.attn.qkv.weight": "model-00001-of-00003.safetensors",
461
+ "vision_backbone.featurizer.blocks.19.ls1.scale_factor": "model-00001-of-00003.safetensors",
462
+ "vision_backbone.featurizer.blocks.19.ls2.scale_factor": "model-00001-of-00003.safetensors",
463
+ "vision_backbone.featurizer.blocks.19.mlp.fc1.bias": "model-00001-of-00003.safetensors",
464
+ "vision_backbone.featurizer.blocks.19.mlp.fc1.weight": "model-00001-of-00003.safetensors",
465
+ "vision_backbone.featurizer.blocks.19.mlp.fc2.bias": "model-00001-of-00003.safetensors",
466
+ "vision_backbone.featurizer.blocks.19.mlp.fc2.weight": "model-00001-of-00003.safetensors",
467
+ "vision_backbone.featurizer.blocks.19.norm1.bias": "model-00001-of-00003.safetensors",
468
+ "vision_backbone.featurizer.blocks.19.norm1.weight": "model-00001-of-00003.safetensors",
469
+ "vision_backbone.featurizer.blocks.19.norm2.bias": "model-00001-of-00003.safetensors",
470
+ "vision_backbone.featurizer.blocks.19.norm2.weight": "model-00001-of-00003.safetensors",
471
+ "vision_backbone.featurizer.blocks.2.attn.proj.bias": "model-00001-of-00003.safetensors",
472
+ "vision_backbone.featurizer.blocks.2.attn.proj.weight": "model-00001-of-00003.safetensors",
473
+ "vision_backbone.featurizer.blocks.2.attn.qkv.bias": "model-00001-of-00003.safetensors",
474
+ "vision_backbone.featurizer.blocks.2.attn.qkv.weight": "model-00001-of-00003.safetensors",
475
+ "vision_backbone.featurizer.blocks.2.ls1.scale_factor": "model-00001-of-00003.safetensors",
476
+ "vision_backbone.featurizer.blocks.2.ls2.scale_factor": "model-00001-of-00003.safetensors",
477
+ "vision_backbone.featurizer.blocks.2.mlp.fc1.bias": "model-00001-of-00003.safetensors",
478
+ "vision_backbone.featurizer.blocks.2.mlp.fc1.weight": "model-00001-of-00003.safetensors",
479
+ "vision_backbone.featurizer.blocks.2.mlp.fc2.bias": "model-00001-of-00003.safetensors",
480
+ "vision_backbone.featurizer.blocks.2.mlp.fc2.weight": "model-00001-of-00003.safetensors",
481
+ "vision_backbone.featurizer.blocks.2.norm1.bias": "model-00001-of-00003.safetensors",
482
+ "vision_backbone.featurizer.blocks.2.norm1.weight": "model-00001-of-00003.safetensors",
483
+ "vision_backbone.featurizer.blocks.2.norm2.bias": "model-00001-of-00003.safetensors",
484
+ "vision_backbone.featurizer.blocks.2.norm2.weight": "model-00001-of-00003.safetensors",
485
+ "vision_backbone.featurizer.blocks.20.attn.proj.bias": "model-00001-of-00003.safetensors",
486
+ "vision_backbone.featurizer.blocks.20.attn.proj.weight": "model-00001-of-00003.safetensors",
487
+ "vision_backbone.featurizer.blocks.20.attn.qkv.bias": "model-00001-of-00003.safetensors",
488
+ "vision_backbone.featurizer.blocks.20.attn.qkv.weight": "model-00001-of-00003.safetensors",
489
+ "vision_backbone.featurizer.blocks.20.ls1.scale_factor": "model-00001-of-00003.safetensors",
490
+ "vision_backbone.featurizer.blocks.20.ls2.scale_factor": "model-00001-of-00003.safetensors",
491
+ "vision_backbone.featurizer.blocks.20.mlp.fc1.bias": "model-00001-of-00003.safetensors",
492
+ "vision_backbone.featurizer.blocks.20.mlp.fc1.weight": "model-00001-of-00003.safetensors",
493
+ "vision_backbone.featurizer.blocks.20.mlp.fc2.bias": "model-00001-of-00003.safetensors",
494
+ "vision_backbone.featurizer.blocks.20.mlp.fc2.weight": "model-00001-of-00003.safetensors",
495
+ "vision_backbone.featurizer.blocks.20.norm1.bias": "model-00001-of-00003.safetensors",
496
+ "vision_backbone.featurizer.blocks.20.norm1.weight": "model-00001-of-00003.safetensors",
497
+ "vision_backbone.featurizer.blocks.20.norm2.bias": "model-00001-of-00003.safetensors",
498
+ "vision_backbone.featurizer.blocks.20.norm2.weight": "model-00001-of-00003.safetensors",
499
+ "vision_backbone.featurizer.blocks.21.attn.proj.bias": "model-00001-of-00003.safetensors",
500
+ "vision_backbone.featurizer.blocks.21.attn.proj.weight": "model-00001-of-00003.safetensors",
501
+ "vision_backbone.featurizer.blocks.21.attn.qkv.bias": "model-00001-of-00003.safetensors",
502
+ "vision_backbone.featurizer.blocks.21.attn.qkv.weight": "model-00001-of-00003.safetensors",
503
+ "vision_backbone.featurizer.blocks.21.ls1.scale_factor": "model-00001-of-00003.safetensors",
504
+ "vision_backbone.featurizer.blocks.21.ls2.scale_factor": "model-00001-of-00003.safetensors",
505
+ "vision_backbone.featurizer.blocks.21.mlp.fc1.bias": "model-00001-of-00003.safetensors",
506
+ "vision_backbone.featurizer.blocks.21.mlp.fc1.weight": "model-00001-of-00003.safetensors",
507
+ "vision_backbone.featurizer.blocks.21.mlp.fc2.bias": "model-00001-of-00003.safetensors",
508
+ "vision_backbone.featurizer.blocks.21.mlp.fc2.weight": "model-00001-of-00003.safetensors",
509
+ "vision_backbone.featurizer.blocks.21.norm1.bias": "model-00001-of-00003.safetensors",
510
+ "vision_backbone.featurizer.blocks.21.norm1.weight": "model-00001-of-00003.safetensors",
511
+ "vision_backbone.featurizer.blocks.21.norm2.bias": "model-00001-of-00003.safetensors",
512
+ "vision_backbone.featurizer.blocks.21.norm2.weight": "model-00001-of-00003.safetensors",
513
+ "vision_backbone.featurizer.blocks.22.attn.proj.bias": "model-00001-of-00003.safetensors",
514
+ "vision_backbone.featurizer.blocks.22.attn.proj.weight": "model-00001-of-00003.safetensors",
515
+ "vision_backbone.featurizer.blocks.22.attn.qkv.bias": "model-00001-of-00003.safetensors",
516
+ "vision_backbone.featurizer.blocks.22.attn.qkv.weight": "model-00001-of-00003.safetensors",
517
+ "vision_backbone.featurizer.blocks.22.ls1.scale_factor": "model-00001-of-00003.safetensors",
518
+ "vision_backbone.featurizer.blocks.22.ls2.scale_factor": "model-00001-of-00003.safetensors",
519
+ "vision_backbone.featurizer.blocks.22.mlp.fc1.bias": "model-00001-of-00003.safetensors",
520
+ "vision_backbone.featurizer.blocks.22.mlp.fc1.weight": "model-00001-of-00003.safetensors",
521
+ "vision_backbone.featurizer.blocks.22.mlp.fc2.bias": "model-00001-of-00003.safetensors",
522
+ "vision_backbone.featurizer.blocks.22.mlp.fc2.weight": "model-00001-of-00003.safetensors",
523
+ "vision_backbone.featurizer.blocks.22.norm1.bias": "model-00001-of-00003.safetensors",
524
+ "vision_backbone.featurizer.blocks.22.norm1.weight": "model-00001-of-00003.safetensors",
525
+ "vision_backbone.featurizer.blocks.22.norm2.bias": "model-00001-of-00003.safetensors",
526
+ "vision_backbone.featurizer.blocks.22.norm2.weight": "model-00001-of-00003.safetensors",
527
+ "vision_backbone.featurizer.blocks.23.attn.proj.bias": "model-00001-of-00003.safetensors",
528
+ "vision_backbone.featurizer.blocks.23.attn.proj.weight": "model-00001-of-00003.safetensors",
529
+ "vision_backbone.featurizer.blocks.23.attn.qkv.bias": "model-00001-of-00003.safetensors",
530
+ "vision_backbone.featurizer.blocks.23.attn.qkv.weight": "model-00001-of-00003.safetensors",
531
+ "vision_backbone.featurizer.blocks.23.ls1.scale_factor": "model-00001-of-00003.safetensors",
532
+ "vision_backbone.featurizer.blocks.23.ls2.scale_factor": "model-00001-of-00003.safetensors",
533
+ "vision_backbone.featurizer.blocks.23.mlp.fc1.bias": "model-00001-of-00003.safetensors",
534
+ "vision_backbone.featurizer.blocks.23.mlp.fc1.weight": "model-00001-of-00003.safetensors",
535
+ "vision_backbone.featurizer.blocks.23.mlp.fc2.bias": "model-00001-of-00003.safetensors",
536
+ "vision_backbone.featurizer.blocks.23.mlp.fc2.weight": "model-00001-of-00003.safetensors",
537
+ "vision_backbone.featurizer.blocks.23.norm1.bias": "model-00001-of-00003.safetensors",
538
+ "vision_backbone.featurizer.blocks.23.norm1.weight": "model-00001-of-00003.safetensors",
539
+ "vision_backbone.featurizer.blocks.23.norm2.bias": "model-00001-of-00003.safetensors",
540
+ "vision_backbone.featurizer.blocks.23.norm2.weight": "model-00001-of-00003.safetensors",
541
+ "vision_backbone.featurizer.blocks.3.attn.proj.bias": "model-00001-of-00003.safetensors",
542
+ "vision_backbone.featurizer.blocks.3.attn.proj.weight": "model-00001-of-00003.safetensors",
543
+ "vision_backbone.featurizer.blocks.3.attn.qkv.bias": "model-00001-of-00003.safetensors",
544
+ "vision_backbone.featurizer.blocks.3.attn.qkv.weight": "model-00001-of-00003.safetensors",
545
+ "vision_backbone.featurizer.blocks.3.ls1.scale_factor": "model-00001-of-00003.safetensors",
546
+ "vision_backbone.featurizer.blocks.3.ls2.scale_factor": "model-00001-of-00003.safetensors",
547
+ "vision_backbone.featurizer.blocks.3.mlp.fc1.bias": "model-00001-of-00003.safetensors",
548
+ "vision_backbone.featurizer.blocks.3.mlp.fc1.weight": "model-00001-of-00003.safetensors",
549
+ "vision_backbone.featurizer.blocks.3.mlp.fc2.bias": "model-00001-of-00003.safetensors",
550
+ "vision_backbone.featurizer.blocks.3.mlp.fc2.weight": "model-00001-of-00003.safetensors",
551
+ "vision_backbone.featurizer.blocks.3.norm1.bias": "model-00001-of-00003.safetensors",
552
+ "vision_backbone.featurizer.blocks.3.norm1.weight": "model-00001-of-00003.safetensors",
553
+ "vision_backbone.featurizer.blocks.3.norm2.bias": "model-00001-of-00003.safetensors",
554
+ "vision_backbone.featurizer.blocks.3.norm2.weight": "model-00001-of-00003.safetensors",
555
+ "vision_backbone.featurizer.blocks.4.attn.proj.bias": "model-00001-of-00003.safetensors",
556
+ "vision_backbone.featurizer.blocks.4.attn.proj.weight": "model-00001-of-00003.safetensors",
557
+ "vision_backbone.featurizer.blocks.4.attn.qkv.bias": "model-00001-of-00003.safetensors",
558
+ "vision_backbone.featurizer.blocks.4.attn.qkv.weight": "model-00001-of-00003.safetensors",
559
+ "vision_backbone.featurizer.blocks.4.ls1.scale_factor": "model-00001-of-00003.safetensors",
560
+ "vision_backbone.featurizer.blocks.4.ls2.scale_factor": "model-00001-of-00003.safetensors",
561
+ "vision_backbone.featurizer.blocks.4.mlp.fc1.bias": "model-00001-of-00003.safetensors",
562
+ "vision_backbone.featurizer.blocks.4.mlp.fc1.weight": "model-00001-of-00003.safetensors",
563
+ "vision_backbone.featurizer.blocks.4.mlp.fc2.bias": "model-00001-of-00003.safetensors",
564
+ "vision_backbone.featurizer.blocks.4.mlp.fc2.weight": "model-00001-of-00003.safetensors",
565
+ "vision_backbone.featurizer.blocks.4.norm1.bias": "model-00001-of-00003.safetensors",
566
+ "vision_backbone.featurizer.blocks.4.norm1.weight": "model-00001-of-00003.safetensors",
567
+ "vision_backbone.featurizer.blocks.4.norm2.bias": "model-00001-of-00003.safetensors",
568
+ "vision_backbone.featurizer.blocks.4.norm2.weight": "model-00001-of-00003.safetensors",
569
+ "vision_backbone.featurizer.blocks.5.attn.proj.bias": "model-00001-of-00003.safetensors",
570
+ "vision_backbone.featurizer.blocks.5.attn.proj.weight": "model-00001-of-00003.safetensors",
571
+ "vision_backbone.featurizer.blocks.5.attn.qkv.bias": "model-00001-of-00003.safetensors",
572
+ "vision_backbone.featurizer.blocks.5.attn.qkv.weight": "model-00001-of-00003.safetensors",
573
+ "vision_backbone.featurizer.blocks.5.ls1.scale_factor": "model-00001-of-00003.safetensors",
574
+ "vision_backbone.featurizer.blocks.5.ls2.scale_factor": "model-00001-of-00003.safetensors",
575
+ "vision_backbone.featurizer.blocks.5.mlp.fc1.bias": "model-00001-of-00003.safetensors",
576
+ "vision_backbone.featurizer.blocks.5.mlp.fc1.weight": "model-00001-of-00003.safetensors",
577
+ "vision_backbone.featurizer.blocks.5.mlp.fc2.bias": "model-00001-of-00003.safetensors",
578
+ "vision_backbone.featurizer.blocks.5.mlp.fc2.weight": "model-00001-of-00003.safetensors",
579
+ "vision_backbone.featurizer.blocks.5.norm1.bias": "model-00001-of-00003.safetensors",
580
+ "vision_backbone.featurizer.blocks.5.norm1.weight": "model-00001-of-00003.safetensors",
581
+ "vision_backbone.featurizer.blocks.5.norm2.bias": "model-00001-of-00003.safetensors",
582
+ "vision_backbone.featurizer.blocks.5.norm2.weight": "model-00001-of-00003.safetensors",
583
+ "vision_backbone.featurizer.blocks.6.attn.proj.bias": "model-00001-of-00003.safetensors",
584
+ "vision_backbone.featurizer.blocks.6.attn.proj.weight": "model-00001-of-00003.safetensors",
585
+ "vision_backbone.featurizer.blocks.6.attn.qkv.bias": "model-00001-of-00003.safetensors",
586
+ "vision_backbone.featurizer.blocks.6.attn.qkv.weight": "model-00001-of-00003.safetensors",
587
+ "vision_backbone.featurizer.blocks.6.ls1.scale_factor": "model-00001-of-00003.safetensors",
588
+ "vision_backbone.featurizer.blocks.6.ls2.scale_factor": "model-00001-of-00003.safetensors",
589
+ "vision_backbone.featurizer.blocks.6.mlp.fc1.bias": "model-00001-of-00003.safetensors",
590
+ "vision_backbone.featurizer.blocks.6.mlp.fc1.weight": "model-00001-of-00003.safetensors",
591
+ "vision_backbone.featurizer.blocks.6.mlp.fc2.bias": "model-00001-of-00003.safetensors",
592
+ "vision_backbone.featurizer.blocks.6.mlp.fc2.weight": "model-00001-of-00003.safetensors",
593
+ "vision_backbone.featurizer.blocks.6.norm1.bias": "model-00001-of-00003.safetensors",
594
+ "vision_backbone.featurizer.blocks.6.norm1.weight": "model-00001-of-00003.safetensors",
595
+ "vision_backbone.featurizer.blocks.6.norm2.bias": "model-00001-of-00003.safetensors",
596
+ "vision_backbone.featurizer.blocks.6.norm2.weight": "model-00001-of-00003.safetensors",
597
+ "vision_backbone.featurizer.blocks.7.attn.proj.bias": "model-00001-of-00003.safetensors",
598
+ "vision_backbone.featurizer.blocks.7.attn.proj.weight": "model-00001-of-00003.safetensors",
599
+ "vision_backbone.featurizer.blocks.7.attn.qkv.bias": "model-00001-of-00003.safetensors",
600
+ "vision_backbone.featurizer.blocks.7.attn.qkv.weight": "model-00001-of-00003.safetensors",
601
+ "vision_backbone.featurizer.blocks.7.ls1.scale_factor": "model-00001-of-00003.safetensors",
602
+ "vision_backbone.featurizer.blocks.7.ls2.scale_factor": "model-00001-of-00003.safetensors",
603
+ "vision_backbone.featurizer.blocks.7.mlp.fc1.bias": "model-00001-of-00003.safetensors",
604
+ "vision_backbone.featurizer.blocks.7.mlp.fc1.weight": "model-00001-of-00003.safetensors",
605
+ "vision_backbone.featurizer.blocks.7.mlp.fc2.bias": "model-00001-of-00003.safetensors",
606
+ "vision_backbone.featurizer.blocks.7.mlp.fc2.weight": "model-00001-of-00003.safetensors",
607
+ "vision_backbone.featurizer.blocks.7.norm1.bias": "model-00001-of-00003.safetensors",
608
+ "vision_backbone.featurizer.blocks.7.norm1.weight": "model-00001-of-00003.safetensors",
609
+ "vision_backbone.featurizer.blocks.7.norm2.bias": "model-00001-of-00003.safetensors",
610
+ "vision_backbone.featurizer.blocks.7.norm2.weight": "model-00001-of-00003.safetensors",
611
+ "vision_backbone.featurizer.blocks.8.attn.proj.bias": "model-00001-of-00003.safetensors",
612
+ "vision_backbone.featurizer.blocks.8.attn.proj.weight": "model-00001-of-00003.safetensors",
613
+ "vision_backbone.featurizer.blocks.8.attn.qkv.bias": "model-00001-of-00003.safetensors",
614
+ "vision_backbone.featurizer.blocks.8.attn.qkv.weight": "model-00001-of-00003.safetensors",
615
+ "vision_backbone.featurizer.blocks.8.ls1.scale_factor": "model-00001-of-00003.safetensors",
616
+ "vision_backbone.featurizer.blocks.8.ls2.scale_factor": "model-00001-of-00003.safetensors",
617
+ "vision_backbone.featurizer.blocks.8.mlp.fc1.bias": "model-00001-of-00003.safetensors",
618
+ "vision_backbone.featurizer.blocks.8.mlp.fc1.weight": "model-00001-of-00003.safetensors",
619
+ "vision_backbone.featurizer.blocks.8.mlp.fc2.bias": "model-00001-of-00003.safetensors",
620
+ "vision_backbone.featurizer.blocks.8.mlp.fc2.weight": "model-00001-of-00003.safetensors",
621
+ "vision_backbone.featurizer.blocks.8.norm1.bias": "model-00001-of-00003.safetensors",
622
+ "vision_backbone.featurizer.blocks.8.norm1.weight": "model-00001-of-00003.safetensors",
623
+ "vision_backbone.featurizer.blocks.8.norm2.bias": "model-00001-of-00003.safetensors",
624
+ "vision_backbone.featurizer.blocks.8.norm2.weight": "model-00001-of-00003.safetensors",
625
+ "vision_backbone.featurizer.blocks.9.attn.proj.bias": "model-00001-of-00003.safetensors",
626
+ "vision_backbone.featurizer.blocks.9.attn.proj.weight": "model-00001-of-00003.safetensors",
627
+ "vision_backbone.featurizer.blocks.9.attn.qkv.bias": "model-00001-of-00003.safetensors",
628
+ "vision_backbone.featurizer.blocks.9.attn.qkv.weight": "model-00001-of-00003.safetensors",
629
+ "vision_backbone.featurizer.blocks.9.ls1.scale_factor": "model-00001-of-00003.safetensors",
630
+ "vision_backbone.featurizer.blocks.9.ls2.scale_factor": "model-00001-of-00003.safetensors",
631
+ "vision_backbone.featurizer.blocks.9.mlp.fc1.bias": "model-00001-of-00003.safetensors",
632
+ "vision_backbone.featurizer.blocks.9.mlp.fc1.weight": "model-00001-of-00003.safetensors",
633
+ "vision_backbone.featurizer.blocks.9.mlp.fc2.bias": "model-00001-of-00003.safetensors",
634
+ "vision_backbone.featurizer.blocks.9.mlp.fc2.weight": "model-00001-of-00003.safetensors",
635
+ "vision_backbone.featurizer.blocks.9.norm1.bias": "model-00001-of-00003.safetensors",
636
+ "vision_backbone.featurizer.blocks.9.norm1.weight": "model-00001-of-00003.safetensors",
637
+ "vision_backbone.featurizer.blocks.9.norm2.bias": "model-00001-of-00003.safetensors",
638
+ "vision_backbone.featurizer.blocks.9.norm2.weight": "model-00001-of-00003.safetensors",
639
+ "vision_backbone.featurizer.cls_token": "model-00001-of-00003.safetensors",
640
+ "vision_backbone.featurizer.norm.bias": "model-00001-of-00003.safetensors",
641
+ "vision_backbone.featurizer.norm.weight": "model-00001-of-00003.safetensors",
642
+ "vision_backbone.featurizer.patch_embed.proj.bias": "model-00001-of-00003.safetensors",
643
+ "vision_backbone.featurizer.patch_embed.proj.weight": "model-00001-of-00003.safetensors",
644
+ "vision_backbone.featurizer.pos_embed": "model-00001-of-00003.safetensors",
645
+ "vision_backbone.featurizer.reg_token": "model-00001-of-00003.safetensors",
646
+ "vision_backbone.fused_featurizer.attn_pool.kv.bias": "model-00001-of-00003.safetensors",
647
+ "vision_backbone.fused_featurizer.attn_pool.kv.weight": "model-00001-of-00003.safetensors",
648
+ "vision_backbone.fused_featurizer.attn_pool.latent": "model-00001-of-00003.safetensors",
649
+ "vision_backbone.fused_featurizer.attn_pool.mlp.fc1.bias": "model-00001-of-00003.safetensors",
650
+ "vision_backbone.fused_featurizer.attn_pool.mlp.fc1.weight": "model-00001-of-00003.safetensors",
651
+ "vision_backbone.fused_featurizer.attn_pool.mlp.fc2.bias": "model-00001-of-00003.safetensors",
652
+ "vision_backbone.fused_featurizer.attn_pool.mlp.fc2.weight": "model-00001-of-00003.safetensors",
653
+ "vision_backbone.fused_featurizer.attn_pool.norm.bias": "model-00001-of-00003.safetensors",
654
+ "vision_backbone.fused_featurizer.attn_pool.norm.weight": "model-00001-of-00003.safetensors",
655
+ "vision_backbone.fused_featurizer.attn_pool.proj.bias": "model-00001-of-00003.safetensors",
656
+ "vision_backbone.fused_featurizer.attn_pool.proj.weight": "model-00001-of-00003.safetensors",
657
+ "vision_backbone.fused_featurizer.attn_pool.q.bias": "model-00001-of-00003.safetensors",
658
+ "vision_backbone.fused_featurizer.attn_pool.q.weight": "model-00001-of-00003.safetensors",
659
+ "vision_backbone.fused_featurizer.blocks.0.attn.proj.bias": "model-00001-of-00003.safetensors",
660
+ "vision_backbone.fused_featurizer.blocks.0.attn.proj.weight": "model-00001-of-00003.safetensors",
661
+ "vision_backbone.fused_featurizer.blocks.0.attn.qkv.bias": "model-00001-of-00003.safetensors",
662
+ "vision_backbone.fused_featurizer.blocks.0.attn.qkv.weight": "model-00001-of-00003.safetensors",
663
+ "vision_backbone.fused_featurizer.blocks.0.mlp.fc1.bias": "model-00001-of-00003.safetensors",
664
+ "vision_backbone.fused_featurizer.blocks.0.mlp.fc1.weight": "model-00001-of-00003.safetensors",
665
+ "vision_backbone.fused_featurizer.blocks.0.mlp.fc2.bias": "model-00001-of-00003.safetensors",
666
+ "vision_backbone.fused_featurizer.blocks.0.mlp.fc2.weight": "model-00001-of-00003.safetensors",
667
+ "vision_backbone.fused_featurizer.blocks.0.norm1.bias": "model-00001-of-00003.safetensors",
668
+ "vision_backbone.fused_featurizer.blocks.0.norm1.weight": "model-00001-of-00003.safetensors",
669
+ "vision_backbone.fused_featurizer.blocks.0.norm2.bias": "model-00001-of-00003.safetensors",
670
+ "vision_backbone.fused_featurizer.blocks.0.norm2.weight": "model-00001-of-00003.safetensors",
671
+ "vision_backbone.fused_featurizer.blocks.1.attn.proj.bias": "model-00001-of-00003.safetensors",
672
+ "vision_backbone.fused_featurizer.blocks.1.attn.proj.weight": "model-00001-of-00003.safetensors",
673
+ "vision_backbone.fused_featurizer.blocks.1.attn.qkv.bias": "model-00001-of-00003.safetensors",
674
+ "vision_backbone.fused_featurizer.blocks.1.attn.qkv.weight": "model-00001-of-00003.safetensors",
675
+ "vision_backbone.fused_featurizer.blocks.1.mlp.fc1.bias": "model-00001-of-00003.safetensors",
676
+ "vision_backbone.fused_featurizer.blocks.1.mlp.fc1.weight": "model-00001-of-00003.safetensors",
677
+ "vision_backbone.fused_featurizer.blocks.1.mlp.fc2.bias": "model-00001-of-00003.safetensors",
678
+ "vision_backbone.fused_featurizer.blocks.1.mlp.fc2.weight": "model-00001-of-00003.safetensors",
679
+ "vision_backbone.fused_featurizer.blocks.1.norm1.bias": "model-00001-of-00003.safetensors",
680
+ "vision_backbone.fused_featurizer.blocks.1.norm1.weight": "model-00001-of-00003.safetensors",
681
+ "vision_backbone.fused_featurizer.blocks.1.norm2.bias": "model-00001-of-00003.safetensors",
682
+ "vision_backbone.fused_featurizer.blocks.1.norm2.weight": "model-00001-of-00003.safetensors",
683
+ "vision_backbone.fused_featurizer.blocks.10.attn.proj.bias": "model-00001-of-00003.safetensors",
684
+ "vision_backbone.fused_featurizer.blocks.10.attn.proj.weight": "model-00001-of-00003.safetensors",
685
+ "vision_backbone.fused_featurizer.blocks.10.attn.qkv.bias": "model-00001-of-00003.safetensors",
686
+ "vision_backbone.fused_featurizer.blocks.10.attn.qkv.weight": "model-00001-of-00003.safetensors",
687
+ "vision_backbone.fused_featurizer.blocks.10.mlp.fc1.bias": "model-00001-of-00003.safetensors",
688
+ "vision_backbone.fused_featurizer.blocks.10.mlp.fc1.weight": "model-00001-of-00003.safetensors",
689
+ "vision_backbone.fused_featurizer.blocks.10.mlp.fc2.bias": "model-00001-of-00003.safetensors",
690
+ "vision_backbone.fused_featurizer.blocks.10.mlp.fc2.weight": "model-00001-of-00003.safetensors",
691
+ "vision_backbone.fused_featurizer.blocks.10.norm1.bias": "model-00001-of-00003.safetensors",
692
+ "vision_backbone.fused_featurizer.blocks.10.norm1.weight": "model-00001-of-00003.safetensors",
693
+ "vision_backbone.fused_featurizer.blocks.10.norm2.bias": "model-00001-of-00003.safetensors",
694
+ "vision_backbone.fused_featurizer.blocks.10.norm2.weight": "model-00001-of-00003.safetensors",
695
+ "vision_backbone.fused_featurizer.blocks.11.attn.proj.bias": "model-00001-of-00003.safetensors",
696
+ "vision_backbone.fused_featurizer.blocks.11.attn.proj.weight": "model-00001-of-00003.safetensors",
697
+ "vision_backbone.fused_featurizer.blocks.11.attn.qkv.bias": "model-00001-of-00003.safetensors",
698
+ "vision_backbone.fused_featurizer.blocks.11.attn.qkv.weight": "model-00001-of-00003.safetensors",
699
+ "vision_backbone.fused_featurizer.blocks.11.mlp.fc1.bias": "model-00001-of-00003.safetensors",
700
+ "vision_backbone.fused_featurizer.blocks.11.mlp.fc1.weight": "model-00001-of-00003.safetensors",
701
+ "vision_backbone.fused_featurizer.blocks.11.mlp.fc2.bias": "model-00001-of-00003.safetensors",
702
+ "vision_backbone.fused_featurizer.blocks.11.mlp.fc2.weight": "model-00001-of-00003.safetensors",
703
+ "vision_backbone.fused_featurizer.blocks.11.norm1.bias": "model-00001-of-00003.safetensors",
704
+ "vision_backbone.fused_featurizer.blocks.11.norm1.weight": "model-00001-of-00003.safetensors",
705
+ "vision_backbone.fused_featurizer.blocks.11.norm2.bias": "model-00001-of-00003.safetensors",
706
+ "vision_backbone.fused_featurizer.blocks.11.norm2.weight": "model-00001-of-00003.safetensors",
707
+ "vision_backbone.fused_featurizer.blocks.12.attn.proj.bias": "model-00001-of-00003.safetensors",
708
+ "vision_backbone.fused_featurizer.blocks.12.attn.proj.weight": "model-00001-of-00003.safetensors",
709
+ "vision_backbone.fused_featurizer.blocks.12.attn.qkv.bias": "model-00001-of-00003.safetensors",
710
+ "vision_backbone.fused_featurizer.blocks.12.attn.qkv.weight": "model-00001-of-00003.safetensors",
711
+ "vision_backbone.fused_featurizer.blocks.12.mlp.fc1.bias": "model-00001-of-00003.safetensors",
712
+ "vision_backbone.fused_featurizer.blocks.12.mlp.fc1.weight": "model-00001-of-00003.safetensors",
713
+ "vision_backbone.fused_featurizer.blocks.12.mlp.fc2.bias": "model-00001-of-00003.safetensors",
714
+ "vision_backbone.fused_featurizer.blocks.12.mlp.fc2.weight": "model-00001-of-00003.safetensors",
715
+ "vision_backbone.fused_featurizer.blocks.12.norm1.bias": "model-00001-of-00003.safetensors",
716
+ "vision_backbone.fused_featurizer.blocks.12.norm1.weight": "model-00001-of-00003.safetensors",
717
+ "vision_backbone.fused_featurizer.blocks.12.norm2.bias": "model-00001-of-00003.safetensors",
718
+ "vision_backbone.fused_featurizer.blocks.12.norm2.weight": "model-00001-of-00003.safetensors",
719
+ "vision_backbone.fused_featurizer.blocks.13.attn.proj.bias": "model-00001-of-00003.safetensors",
720
+ "vision_backbone.fused_featurizer.blocks.13.attn.proj.weight": "model-00001-of-00003.safetensors",
721
+ "vision_backbone.fused_featurizer.blocks.13.attn.qkv.bias": "model-00001-of-00003.safetensors",
722
+ "vision_backbone.fused_featurizer.blocks.13.attn.qkv.weight": "model-00001-of-00003.safetensors",
723
+ "vision_backbone.fused_featurizer.blocks.13.mlp.fc1.bias": "model-00001-of-00003.safetensors",
724
+ "vision_backbone.fused_featurizer.blocks.13.mlp.fc1.weight": "model-00001-of-00003.safetensors",
725
+ "vision_backbone.fused_featurizer.blocks.13.mlp.fc2.bias": "model-00001-of-00003.safetensors",
726
+ "vision_backbone.fused_featurizer.blocks.13.mlp.fc2.weight": "model-00001-of-00003.safetensors",
727
+ "vision_backbone.fused_featurizer.blocks.13.norm1.bias": "model-00001-of-00003.safetensors",
728
+ "vision_backbone.fused_featurizer.blocks.13.norm1.weight": "model-00001-of-00003.safetensors",
729
+ "vision_backbone.fused_featurizer.blocks.13.norm2.bias": "model-00001-of-00003.safetensors",
730
+ "vision_backbone.fused_featurizer.blocks.13.norm2.weight": "model-00001-of-00003.safetensors",
731
+ "vision_backbone.fused_featurizer.blocks.14.attn.proj.bias": "model-00001-of-00003.safetensors",
732
+ "vision_backbone.fused_featurizer.blocks.14.attn.proj.weight": "model-00001-of-00003.safetensors",
733
+ "vision_backbone.fused_featurizer.blocks.14.attn.qkv.bias": "model-00001-of-00003.safetensors",
734
+ "vision_backbone.fused_featurizer.blocks.14.attn.qkv.weight": "model-00001-of-00003.safetensors",
735
+ "vision_backbone.fused_featurizer.blocks.14.mlp.fc1.bias": "model-00001-of-00003.safetensors",
736
+ "vision_backbone.fused_featurizer.blocks.14.mlp.fc1.weight": "model-00001-of-00003.safetensors",
737
+ "vision_backbone.fused_featurizer.blocks.14.mlp.fc2.bias": "model-00001-of-00003.safetensors",
738
+ "vision_backbone.fused_featurizer.blocks.14.mlp.fc2.weight": "model-00001-of-00003.safetensors",
739
+ "vision_backbone.fused_featurizer.blocks.14.norm1.bias": "model-00001-of-00003.safetensors",
740
+ "vision_backbone.fused_featurizer.blocks.14.norm1.weight": "model-00001-of-00003.safetensors",
741
+ "vision_backbone.fused_featurizer.blocks.14.norm2.bias": "model-00001-of-00003.safetensors",
742
+ "vision_backbone.fused_featurizer.blocks.14.norm2.weight": "model-00001-of-00003.safetensors",
743
+ "vision_backbone.fused_featurizer.blocks.15.attn.proj.bias": "model-00001-of-00003.safetensors",
744
+ "vision_backbone.fused_featurizer.blocks.15.attn.proj.weight": "model-00001-of-00003.safetensors",
745
+ "vision_backbone.fused_featurizer.blocks.15.attn.qkv.bias": "model-00001-of-00003.safetensors",
746
+ "vision_backbone.fused_featurizer.blocks.15.attn.qkv.weight": "model-00001-of-00003.safetensors",
747
+ "vision_backbone.fused_featurizer.blocks.15.mlp.fc1.bias": "model-00001-of-00003.safetensors",
748
+ "vision_backbone.fused_featurizer.blocks.15.mlp.fc1.weight": "model-00001-of-00003.safetensors",
749
+ "vision_backbone.fused_featurizer.blocks.15.mlp.fc2.bias": "model-00001-of-00003.safetensors",
750
+ "vision_backbone.fused_featurizer.blocks.15.mlp.fc2.weight": "model-00001-of-00003.safetensors",
751
+ "vision_backbone.fused_featurizer.blocks.15.norm1.bias": "model-00001-of-00003.safetensors",
752
+ "vision_backbone.fused_featurizer.blocks.15.norm1.weight": "model-00001-of-00003.safetensors",
753
+ "vision_backbone.fused_featurizer.blocks.15.norm2.bias": "model-00001-of-00003.safetensors",
754
+ "vision_backbone.fused_featurizer.blocks.15.norm2.weight": "model-00001-of-00003.safetensors",
755
+ "vision_backbone.fused_featurizer.blocks.16.attn.proj.bias": "model-00001-of-00003.safetensors",
756
+ "vision_backbone.fused_featurizer.blocks.16.attn.proj.weight": "model-00001-of-00003.safetensors",
757
+ "vision_backbone.fused_featurizer.blocks.16.attn.qkv.bias": "model-00001-of-00003.safetensors",
758
+ "vision_backbone.fused_featurizer.blocks.16.attn.qkv.weight": "model-00001-of-00003.safetensors",
759
+ "vision_backbone.fused_featurizer.blocks.16.mlp.fc1.bias": "model-00001-of-00003.safetensors",
760
+ "vision_backbone.fused_featurizer.blocks.16.mlp.fc1.weight": "model-00001-of-00003.safetensors",
761
+ "vision_backbone.fused_featurizer.blocks.16.mlp.fc2.bias": "model-00001-of-00003.safetensors",
762
+ "vision_backbone.fused_featurizer.blocks.16.mlp.fc2.weight": "model-00001-of-00003.safetensors",
763
+ "vision_backbone.fused_featurizer.blocks.16.norm1.bias": "model-00001-of-00003.safetensors",
764
+ "vision_backbone.fused_featurizer.blocks.16.norm1.weight": "model-00001-of-00003.safetensors",
765
+ "vision_backbone.fused_featurizer.blocks.16.norm2.bias": "model-00001-of-00003.safetensors",
766
+ "vision_backbone.fused_featurizer.blocks.16.norm2.weight": "model-00001-of-00003.safetensors",
767
+ "vision_backbone.fused_featurizer.blocks.17.attn.proj.bias": "model-00001-of-00003.safetensors",
768
+ "vision_backbone.fused_featurizer.blocks.17.attn.proj.weight": "model-00001-of-00003.safetensors",
769
+ "vision_backbone.fused_featurizer.blocks.17.attn.qkv.bias": "model-00001-of-00003.safetensors",
770
+ "vision_backbone.fused_featurizer.blocks.17.attn.qkv.weight": "model-00001-of-00003.safetensors",
771
+ "vision_backbone.fused_featurizer.blocks.17.mlp.fc1.bias": "model-00001-of-00003.safetensors",
772
+ "vision_backbone.fused_featurizer.blocks.17.mlp.fc1.weight": "model-00001-of-00003.safetensors",
773
+ "vision_backbone.fused_featurizer.blocks.17.mlp.fc2.bias": "model-00001-of-00003.safetensors",
774
+ "vision_backbone.fused_featurizer.blocks.17.mlp.fc2.weight": "model-00001-of-00003.safetensors",
775
+ "vision_backbone.fused_featurizer.blocks.17.norm1.bias": "model-00001-of-00003.safetensors",
776
+ "vision_backbone.fused_featurizer.blocks.17.norm1.weight": "model-00001-of-00003.safetensors",
777
+ "vision_backbone.fused_featurizer.blocks.17.norm2.bias": "model-00001-of-00003.safetensors",
778
+ "vision_backbone.fused_featurizer.blocks.17.norm2.weight": "model-00001-of-00003.safetensors",
779
+ "vision_backbone.fused_featurizer.blocks.18.attn.proj.bias": "model-00001-of-00003.safetensors",
780
+ "vision_backbone.fused_featurizer.blocks.18.attn.proj.weight": "model-00001-of-00003.safetensors",
781
+ "vision_backbone.fused_featurizer.blocks.18.attn.qkv.bias": "model-00001-of-00003.safetensors",
782
+ "vision_backbone.fused_featurizer.blocks.18.attn.qkv.weight": "model-00001-of-00003.safetensors",
783
+ "vision_backbone.fused_featurizer.blocks.18.mlp.fc1.bias": "model-00001-of-00003.safetensors",
784
+ "vision_backbone.fused_featurizer.blocks.18.mlp.fc1.weight": "model-00001-of-00003.safetensors",
785
+ "vision_backbone.fused_featurizer.blocks.18.mlp.fc2.bias": "model-00001-of-00003.safetensors",
786
+ "vision_backbone.fused_featurizer.blocks.18.mlp.fc2.weight": "model-00001-of-00003.safetensors",
787
+ "vision_backbone.fused_featurizer.blocks.18.norm1.bias": "model-00001-of-00003.safetensors",
788
+ "vision_backbone.fused_featurizer.blocks.18.norm1.weight": "model-00001-of-00003.safetensors",
789
+ "vision_backbone.fused_featurizer.blocks.18.norm2.bias": "model-00001-of-00003.safetensors",
790
+ "vision_backbone.fused_featurizer.blocks.18.norm2.weight": "model-00001-of-00003.safetensors",
791
+ "vision_backbone.fused_featurizer.blocks.19.attn.proj.bias": "model-00001-of-00003.safetensors",
792
+ "vision_backbone.fused_featurizer.blocks.19.attn.proj.weight": "model-00001-of-00003.safetensors",
793
+ "vision_backbone.fused_featurizer.blocks.19.attn.qkv.bias": "model-00001-of-00003.safetensors",
794
+ "vision_backbone.fused_featurizer.blocks.19.attn.qkv.weight": "model-00001-of-00003.safetensors",
795
+ "vision_backbone.fused_featurizer.blocks.19.mlp.fc1.bias": "model-00001-of-00003.safetensors",
796
+ "vision_backbone.fused_featurizer.blocks.19.mlp.fc1.weight": "model-00001-of-00003.safetensors",
797
+ "vision_backbone.fused_featurizer.blocks.19.mlp.fc2.bias": "model-00001-of-00003.safetensors",
798
+ "vision_backbone.fused_featurizer.blocks.19.mlp.fc2.weight": "model-00001-of-00003.safetensors",
799
+ "vision_backbone.fused_featurizer.blocks.19.norm1.bias": "model-00001-of-00003.safetensors",
800
+ "vision_backbone.fused_featurizer.blocks.19.norm1.weight": "model-00001-of-00003.safetensors",
801
+ "vision_backbone.fused_featurizer.blocks.19.norm2.bias": "model-00001-of-00003.safetensors",
802
+ "vision_backbone.fused_featurizer.blocks.19.norm2.weight": "model-00001-of-00003.safetensors",
803
+ "vision_backbone.fused_featurizer.blocks.2.attn.proj.bias": "model-00001-of-00003.safetensors",
804
+ "vision_backbone.fused_featurizer.blocks.2.attn.proj.weight": "model-00001-of-00003.safetensors",
805
+ "vision_backbone.fused_featurizer.blocks.2.attn.qkv.bias": "model-00001-of-00003.safetensors",
806
+ "vision_backbone.fused_featurizer.blocks.2.attn.qkv.weight": "model-00001-of-00003.safetensors",
807
+ "vision_backbone.fused_featurizer.blocks.2.mlp.fc1.bias": "model-00001-of-00003.safetensors",
808
+ "vision_backbone.fused_featurizer.blocks.2.mlp.fc1.weight": "model-00001-of-00003.safetensors",
809
+ "vision_backbone.fused_featurizer.blocks.2.mlp.fc2.bias": "model-00001-of-00003.safetensors",
810
+ "vision_backbone.fused_featurizer.blocks.2.mlp.fc2.weight": "model-00001-of-00003.safetensors",
811
+ "vision_backbone.fused_featurizer.blocks.2.norm1.bias": "model-00001-of-00003.safetensors",
812
+ "vision_backbone.fused_featurizer.blocks.2.norm1.weight": "model-00001-of-00003.safetensors",
813
+ "vision_backbone.fused_featurizer.blocks.2.norm2.bias": "model-00001-of-00003.safetensors",
814
+ "vision_backbone.fused_featurizer.blocks.2.norm2.weight": "model-00001-of-00003.safetensors",
815
+ "vision_backbone.fused_featurizer.blocks.20.attn.proj.bias": "model-00001-of-00003.safetensors",
816
+ "vision_backbone.fused_featurizer.blocks.20.attn.proj.weight": "model-00001-of-00003.safetensors",
817
+ "vision_backbone.fused_featurizer.blocks.20.attn.qkv.bias": "model-00001-of-00003.safetensors",
818
+ "vision_backbone.fused_featurizer.blocks.20.attn.qkv.weight": "model-00001-of-00003.safetensors",
819
+ "vision_backbone.fused_featurizer.blocks.20.mlp.fc1.bias": "model-00001-of-00003.safetensors",
820
+ "vision_backbone.fused_featurizer.blocks.20.mlp.fc1.weight": "model-00001-of-00003.safetensors",
821
+ "vision_backbone.fused_featurizer.blocks.20.mlp.fc2.bias": "model-00001-of-00003.safetensors",
822
+ "vision_backbone.fused_featurizer.blocks.20.mlp.fc2.weight": "model-00001-of-00003.safetensors",
823
+ "vision_backbone.fused_featurizer.blocks.20.norm1.bias": "model-00001-of-00003.safetensors",
824
+ "vision_backbone.fused_featurizer.blocks.20.norm1.weight": "model-00001-of-00003.safetensors",
825
+ "vision_backbone.fused_featurizer.blocks.20.norm2.bias": "model-00001-of-00003.safetensors",
826
+ "vision_backbone.fused_featurizer.blocks.20.norm2.weight": "model-00001-of-00003.safetensors",
827
+ "vision_backbone.fused_featurizer.blocks.21.attn.proj.bias": "model-00001-of-00003.safetensors",
828
+ "vision_backbone.fused_featurizer.blocks.21.attn.proj.weight": "model-00001-of-00003.safetensors",
829
+ "vision_backbone.fused_featurizer.blocks.21.attn.qkv.bias": "model-00001-of-00003.safetensors",
830
+ "vision_backbone.fused_featurizer.blocks.21.attn.qkv.weight": "model-00001-of-00003.safetensors",
831
+ "vision_backbone.fused_featurizer.blocks.21.mlp.fc1.bias": "model-00001-of-00003.safetensors",
832
+ "vision_backbone.fused_featurizer.blocks.21.mlp.fc1.weight": "model-00001-of-00003.safetensors",
833
+ "vision_backbone.fused_featurizer.blocks.21.mlp.fc2.bias": "model-00001-of-00003.safetensors",
834
+ "vision_backbone.fused_featurizer.blocks.21.mlp.fc2.weight": "model-00001-of-00003.safetensors",
835
+ "vision_backbone.fused_featurizer.blocks.21.norm1.bias": "model-00001-of-00003.safetensors",
836
+ "vision_backbone.fused_featurizer.blocks.21.norm1.weight": "model-00001-of-00003.safetensors",
837
+ "vision_backbone.fused_featurizer.blocks.21.norm2.bias": "model-00001-of-00003.safetensors",
838
+ "vision_backbone.fused_featurizer.blocks.21.norm2.weight": "model-00001-of-00003.safetensors",
839
+ "vision_backbone.fused_featurizer.blocks.22.attn.proj.bias": "model-00001-of-00003.safetensors",
840
+ "vision_backbone.fused_featurizer.blocks.22.attn.proj.weight": "model-00001-of-00003.safetensors",
841
+ "vision_backbone.fused_featurizer.blocks.22.attn.qkv.bias": "model-00001-of-00003.safetensors",
842
+ "vision_backbone.fused_featurizer.blocks.22.attn.qkv.weight": "model-00001-of-00003.safetensors",
843
+ "vision_backbone.fused_featurizer.blocks.22.mlp.fc1.bias": "model-00001-of-00003.safetensors",
844
+ "vision_backbone.fused_featurizer.blocks.22.mlp.fc1.weight": "model-00001-of-00003.safetensors",
845
+ "vision_backbone.fused_featurizer.blocks.22.mlp.fc2.bias": "model-00001-of-00003.safetensors",
846
+ "vision_backbone.fused_featurizer.blocks.22.mlp.fc2.weight": "model-00001-of-00003.safetensors",
847
+ "vision_backbone.fused_featurizer.blocks.22.norm1.bias": "model-00001-of-00003.safetensors",
848
+ "vision_backbone.fused_featurizer.blocks.22.norm1.weight": "model-00001-of-00003.safetensors",
849
+ "vision_backbone.fused_featurizer.blocks.22.norm2.bias": "model-00001-of-00003.safetensors",
850
+ "vision_backbone.fused_featurizer.blocks.22.norm2.weight": "model-00001-of-00003.safetensors",
851
+ "vision_backbone.fused_featurizer.blocks.23.attn.proj.bias": "model-00001-of-00003.safetensors",
852
+ "vision_backbone.fused_featurizer.blocks.23.attn.proj.weight": "model-00001-of-00003.safetensors",
853
+ "vision_backbone.fused_featurizer.blocks.23.attn.qkv.bias": "model-00001-of-00003.safetensors",
854
+ "vision_backbone.fused_featurizer.blocks.23.attn.qkv.weight": "model-00001-of-00003.safetensors",
855
+ "vision_backbone.fused_featurizer.blocks.23.mlp.fc1.bias": "model-00001-of-00003.safetensors",
856
+ "vision_backbone.fused_featurizer.blocks.23.mlp.fc1.weight": "model-00001-of-00003.safetensors",
857
+ "vision_backbone.fused_featurizer.blocks.23.mlp.fc2.bias": "model-00001-of-00003.safetensors",
858
+ "vision_backbone.fused_featurizer.blocks.23.mlp.fc2.weight": "model-00001-of-00003.safetensors",
859
+ "vision_backbone.fused_featurizer.blocks.23.norm1.bias": "model-00001-of-00003.safetensors",
860
+ "vision_backbone.fused_featurizer.blocks.23.norm1.weight": "model-00001-of-00003.safetensors",
861
+ "vision_backbone.fused_featurizer.blocks.23.norm2.bias": "model-00001-of-00003.safetensors",
862
+ "vision_backbone.fused_featurizer.blocks.23.norm2.weight": "model-00001-of-00003.safetensors",
863
+ "vision_backbone.fused_featurizer.blocks.24.attn.proj.bias": "model-00001-of-00003.safetensors",
864
+ "vision_backbone.fused_featurizer.blocks.24.attn.proj.weight": "model-00001-of-00003.safetensors",
865
+ "vision_backbone.fused_featurizer.blocks.24.attn.qkv.bias": "model-00001-of-00003.safetensors",
866
+ "vision_backbone.fused_featurizer.blocks.24.attn.qkv.weight": "model-00001-of-00003.safetensors",
867
+ "vision_backbone.fused_featurizer.blocks.24.mlp.fc1.bias": "model-00001-of-00003.safetensors",
868
+ "vision_backbone.fused_featurizer.blocks.24.mlp.fc1.weight": "model-00001-of-00003.safetensors",
869
+ "vision_backbone.fused_featurizer.blocks.24.mlp.fc2.bias": "model-00001-of-00003.safetensors",
870
+ "vision_backbone.fused_featurizer.blocks.24.mlp.fc2.weight": "model-00001-of-00003.safetensors",
871
+ "vision_backbone.fused_featurizer.blocks.24.norm1.bias": "model-00001-of-00003.safetensors",
872
+ "vision_backbone.fused_featurizer.blocks.24.norm1.weight": "model-00001-of-00003.safetensors",
873
+ "vision_backbone.fused_featurizer.blocks.24.norm2.bias": "model-00001-of-00003.safetensors",
874
+ "vision_backbone.fused_featurizer.blocks.24.norm2.weight": "model-00001-of-00003.safetensors",
875
+ "vision_backbone.fused_featurizer.blocks.25.attn.proj.bias": "model-00001-of-00003.safetensors",
876
+ "vision_backbone.fused_featurizer.blocks.25.attn.proj.weight": "model-00001-of-00003.safetensors",
877
+ "vision_backbone.fused_featurizer.blocks.25.attn.qkv.bias": "model-00001-of-00003.safetensors",
878
+ "vision_backbone.fused_featurizer.blocks.25.attn.qkv.weight": "model-00001-of-00003.safetensors",
879
+ "vision_backbone.fused_featurizer.blocks.25.mlp.fc1.bias": "model-00001-of-00003.safetensors",
880
+ "vision_backbone.fused_featurizer.blocks.25.mlp.fc1.weight": "model-00001-of-00003.safetensors",
881
+ "vision_backbone.fused_featurizer.blocks.25.mlp.fc2.bias": "model-00001-of-00003.safetensors",
882
+ "vision_backbone.fused_featurizer.blocks.25.mlp.fc2.weight": "model-00001-of-00003.safetensors",
883
+ "vision_backbone.fused_featurizer.blocks.25.norm1.bias": "model-00001-of-00003.safetensors",
884
+ "vision_backbone.fused_featurizer.blocks.25.norm1.weight": "model-00001-of-00003.safetensors",
885
+ "vision_backbone.fused_featurizer.blocks.25.norm2.bias": "model-00001-of-00003.safetensors",
886
+ "vision_backbone.fused_featurizer.blocks.25.norm2.weight": "model-00001-of-00003.safetensors",
887
+ "vision_backbone.fused_featurizer.blocks.26.attn.proj.bias": "model-00001-of-00003.safetensors",
888
+ "vision_backbone.fused_featurizer.blocks.26.attn.proj.weight": "model-00001-of-00003.safetensors",
889
+ "vision_backbone.fused_featurizer.blocks.26.attn.qkv.bias": "model-00001-of-00003.safetensors",
890
+ "vision_backbone.fused_featurizer.blocks.26.attn.qkv.weight": "model-00001-of-00003.safetensors",
891
+ "vision_backbone.fused_featurizer.blocks.26.mlp.fc1.bias": "model-00001-of-00003.safetensors",
892
+ "vision_backbone.fused_featurizer.blocks.26.mlp.fc1.weight": "model-00001-of-00003.safetensors",
893
+ "vision_backbone.fused_featurizer.blocks.26.mlp.fc2.bias": "model-00001-of-00003.safetensors",
894
+ "vision_backbone.fused_featurizer.blocks.26.mlp.fc2.weight": "model-00001-of-00003.safetensors",
895
+ "vision_backbone.fused_featurizer.blocks.26.norm1.bias": "model-00001-of-00003.safetensors",
896
+ "vision_backbone.fused_featurizer.blocks.26.norm1.weight": "model-00001-of-00003.safetensors",
897
+ "vision_backbone.fused_featurizer.blocks.26.norm2.bias": "model-00001-of-00003.safetensors",
898
+ "vision_backbone.fused_featurizer.blocks.26.norm2.weight": "model-00001-of-00003.safetensors",
899
+ "vision_backbone.fused_featurizer.blocks.3.attn.proj.bias": "model-00001-of-00003.safetensors",
900
+ "vision_backbone.fused_featurizer.blocks.3.attn.proj.weight": "model-00001-of-00003.safetensors",
901
+ "vision_backbone.fused_featurizer.blocks.3.attn.qkv.bias": "model-00001-of-00003.safetensors",
902
+ "vision_backbone.fused_featurizer.blocks.3.attn.qkv.weight": "model-00001-of-00003.safetensors",
903
+ "vision_backbone.fused_featurizer.blocks.3.mlp.fc1.bias": "model-00001-of-00003.safetensors",
904
+ "vision_backbone.fused_featurizer.blocks.3.mlp.fc1.weight": "model-00001-of-00003.safetensors",
905
+ "vision_backbone.fused_featurizer.blocks.3.mlp.fc2.bias": "model-00001-of-00003.safetensors",
906
+ "vision_backbone.fused_featurizer.blocks.3.mlp.fc2.weight": "model-00001-of-00003.safetensors",
907
+ "vision_backbone.fused_featurizer.blocks.3.norm1.bias": "model-00001-of-00003.safetensors",
908
+ "vision_backbone.fused_featurizer.blocks.3.norm1.weight": "model-00001-of-00003.safetensors",
909
+ "vision_backbone.fused_featurizer.blocks.3.norm2.bias": "model-00001-of-00003.safetensors",
910
+ "vision_backbone.fused_featurizer.blocks.3.norm2.weight": "model-00001-of-00003.safetensors",
911
+ "vision_backbone.fused_featurizer.blocks.4.attn.proj.bias": "model-00001-of-00003.safetensors",
912
+ "vision_backbone.fused_featurizer.blocks.4.attn.proj.weight": "model-00001-of-00003.safetensors",
913
+ "vision_backbone.fused_featurizer.blocks.4.attn.qkv.bias": "model-00001-of-00003.safetensors",
914
+ "vision_backbone.fused_featurizer.blocks.4.attn.qkv.weight": "model-00001-of-00003.safetensors",
915
+ "vision_backbone.fused_featurizer.blocks.4.mlp.fc1.bias": "model-00001-of-00003.safetensors",
916
+ "vision_backbone.fused_featurizer.blocks.4.mlp.fc1.weight": "model-00001-of-00003.safetensors",
917
+ "vision_backbone.fused_featurizer.blocks.4.mlp.fc2.bias": "model-00001-of-00003.safetensors",
918
+ "vision_backbone.fused_featurizer.blocks.4.mlp.fc2.weight": "model-00001-of-00003.safetensors",
919
+ "vision_backbone.fused_featurizer.blocks.4.norm1.bias": "model-00001-of-00003.safetensors",
920
+ "vision_backbone.fused_featurizer.blocks.4.norm1.weight": "model-00001-of-00003.safetensors",
921
+ "vision_backbone.fused_featurizer.blocks.4.norm2.bias": "model-00001-of-00003.safetensors",
922
+ "vision_backbone.fused_featurizer.blocks.4.norm2.weight": "model-00001-of-00003.safetensors",
923
+ "vision_backbone.fused_featurizer.blocks.5.attn.proj.bias": "model-00001-of-00003.safetensors",
924
+ "vision_backbone.fused_featurizer.blocks.5.attn.proj.weight": "model-00001-of-00003.safetensors",
925
+ "vision_backbone.fused_featurizer.blocks.5.attn.qkv.bias": "model-00001-of-00003.safetensors",
926
+ "vision_backbone.fused_featurizer.blocks.5.attn.qkv.weight": "model-00001-of-00003.safetensors",
927
+ "vision_backbone.fused_featurizer.blocks.5.mlp.fc1.bias": "model-00001-of-00003.safetensors",
928
+ "vision_backbone.fused_featurizer.blocks.5.mlp.fc1.weight": "model-00001-of-00003.safetensors",
929
+ "vision_backbone.fused_featurizer.blocks.5.mlp.fc2.bias": "model-00001-of-00003.safetensors",
930
+ "vision_backbone.fused_featurizer.blocks.5.mlp.fc2.weight": "model-00001-of-00003.safetensors",
931
+ "vision_backbone.fused_featurizer.blocks.5.norm1.bias": "model-00001-of-00003.safetensors",
932
+ "vision_backbone.fused_featurizer.blocks.5.norm1.weight": "model-00001-of-00003.safetensors",
933
+ "vision_backbone.fused_featurizer.blocks.5.norm2.bias": "model-00001-of-00003.safetensors",
934
+ "vision_backbone.fused_featurizer.blocks.5.norm2.weight": "model-00001-of-00003.safetensors",
935
+ "vision_backbone.fused_featurizer.blocks.6.attn.proj.bias": "model-00001-of-00003.safetensors",
936
+ "vision_backbone.fused_featurizer.blocks.6.attn.proj.weight": "model-00001-of-00003.safetensors",
937
+ "vision_backbone.fused_featurizer.blocks.6.attn.qkv.bias": "model-00001-of-00003.safetensors",
938
+ "vision_backbone.fused_featurizer.blocks.6.attn.qkv.weight": "model-00001-of-00003.safetensors",
939
+ "vision_backbone.fused_featurizer.blocks.6.mlp.fc1.bias": "model-00001-of-00003.safetensors",
940
+ "vision_backbone.fused_featurizer.blocks.6.mlp.fc1.weight": "model-00001-of-00003.safetensors",
941
+ "vision_backbone.fused_featurizer.blocks.6.mlp.fc2.bias": "model-00001-of-00003.safetensors",
942
+ "vision_backbone.fused_featurizer.blocks.6.mlp.fc2.weight": "model-00001-of-00003.safetensors",
943
+ "vision_backbone.fused_featurizer.blocks.6.norm1.bias": "model-00001-of-00003.safetensors",
944
+ "vision_backbone.fused_featurizer.blocks.6.norm1.weight": "model-00001-of-00003.safetensors",
945
+ "vision_backbone.fused_featurizer.blocks.6.norm2.bias": "model-00001-of-00003.safetensors",
946
+ "vision_backbone.fused_featurizer.blocks.6.norm2.weight": "model-00001-of-00003.safetensors",
947
+ "vision_backbone.fused_featurizer.blocks.7.attn.proj.bias": "model-00001-of-00003.safetensors",
948
+ "vision_backbone.fused_featurizer.blocks.7.attn.proj.weight": "model-00001-of-00003.safetensors",
949
+ "vision_backbone.fused_featurizer.blocks.7.attn.qkv.bias": "model-00001-of-00003.safetensors",
950
+ "vision_backbone.fused_featurizer.blocks.7.attn.qkv.weight": "model-00001-of-00003.safetensors",
951
+ "vision_backbone.fused_featurizer.blocks.7.mlp.fc1.bias": "model-00001-of-00003.safetensors",
952
+ "vision_backbone.fused_featurizer.blocks.7.mlp.fc1.weight": "model-00001-of-00003.safetensors",
953
+ "vision_backbone.fused_featurizer.blocks.7.mlp.fc2.bias": "model-00001-of-00003.safetensors",
954
+ "vision_backbone.fused_featurizer.blocks.7.mlp.fc2.weight": "model-00001-of-00003.safetensors",
955
+ "vision_backbone.fused_featurizer.blocks.7.norm1.bias": "model-00001-of-00003.safetensors",
956
+ "vision_backbone.fused_featurizer.blocks.7.norm1.weight": "model-00001-of-00003.safetensors",
957
+ "vision_backbone.fused_featurizer.blocks.7.norm2.bias": "model-00001-of-00003.safetensors",
958
+ "vision_backbone.fused_featurizer.blocks.7.norm2.weight": "model-00001-of-00003.safetensors",
959
+ "vision_backbone.fused_featurizer.blocks.8.attn.proj.bias": "model-00001-of-00003.safetensors",
960
+ "vision_backbone.fused_featurizer.blocks.8.attn.proj.weight": "model-00001-of-00003.safetensors",
961
+ "vision_backbone.fused_featurizer.blocks.8.attn.qkv.bias": "model-00001-of-00003.safetensors",
962
+ "vision_backbone.fused_featurizer.blocks.8.attn.qkv.weight": "model-00001-of-00003.safetensors",
963
+ "vision_backbone.fused_featurizer.blocks.8.mlp.fc1.bias": "model-00001-of-00003.safetensors",
964
+ "vision_backbone.fused_featurizer.blocks.8.mlp.fc1.weight": "model-00001-of-00003.safetensors",
965
+ "vision_backbone.fused_featurizer.blocks.8.mlp.fc2.bias": "model-00001-of-00003.safetensors",
966
+ "vision_backbone.fused_featurizer.blocks.8.mlp.fc2.weight": "model-00001-of-00003.safetensors",
967
+ "vision_backbone.fused_featurizer.blocks.8.norm1.bias": "model-00001-of-00003.safetensors",
968
+ "vision_backbone.fused_featurizer.blocks.8.norm1.weight": "model-00001-of-00003.safetensors",
969
+ "vision_backbone.fused_featurizer.blocks.8.norm2.bias": "model-00001-of-00003.safetensors",
970
+ "vision_backbone.fused_featurizer.blocks.8.norm2.weight": "model-00001-of-00003.safetensors",
971
+ "vision_backbone.fused_featurizer.blocks.9.attn.proj.bias": "model-00001-of-00003.safetensors",
972
+ "vision_backbone.fused_featurizer.blocks.9.attn.proj.weight": "model-00001-of-00003.safetensors",
973
+ "vision_backbone.fused_featurizer.blocks.9.attn.qkv.bias": "model-00001-of-00003.safetensors",
974
+ "vision_backbone.fused_featurizer.blocks.9.attn.qkv.weight": "model-00001-of-00003.safetensors",
975
+ "vision_backbone.fused_featurizer.blocks.9.mlp.fc1.bias": "model-00001-of-00003.safetensors",
976
+ "vision_backbone.fused_featurizer.blocks.9.mlp.fc1.weight": "model-00001-of-00003.safetensors",
977
+ "vision_backbone.fused_featurizer.blocks.9.mlp.fc2.bias": "model-00001-of-00003.safetensors",
978
+ "vision_backbone.fused_featurizer.blocks.9.mlp.fc2.weight": "model-00001-of-00003.safetensors",
979
+ "vision_backbone.fused_featurizer.blocks.9.norm1.bias": "model-00001-of-00003.safetensors",
980
+ "vision_backbone.fused_featurizer.blocks.9.norm1.weight": "model-00001-of-00003.safetensors",
981
+ "vision_backbone.fused_featurizer.blocks.9.norm2.bias": "model-00001-of-00003.safetensors",
982
+ "vision_backbone.fused_featurizer.blocks.9.norm2.weight": "model-00001-of-00003.safetensors",
983
+ "vision_backbone.fused_featurizer.norm.bias": "model-00001-of-00003.safetensors",
984
+ "vision_backbone.fused_featurizer.norm.weight": "model-00001-of-00003.safetensors",
985
+ "vision_backbone.fused_featurizer.patch_embed.proj.bias": "model-00001-of-00003.safetensors",
986
+ "vision_backbone.fused_featurizer.patch_embed.proj.weight": "model-00001-of-00003.safetensors",
987
+ "vision_backbone.fused_featurizer.pos_embed": "model-00001-of-00003.safetensors"
988
+ }
989
+ }
output_hf_model_openx/modeling_prismatic.py ADDED
@@ -0,0 +1,562 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ modeling_prismatic.py
3
+
4
+ Core HuggingFace-style PrismaticPreTrainedModel and PrismaticForConditionalGeneration class definitions, inheriting
5
+ from the default `transformers.PretrainedModel`. Meant to be standalone and self-contained, but exactly replicate the
6
+ logic in `prismatic.models.vlms.prismatic.py`.
7
+
8
+ Note =>> for the time being, not adding the custom HF "docstring" formatting.
9
+
10
+ References [LLaVa, IDEFICS-2]:
11
+ => https://github.com/huggingface/transformers/blob/main/src/transformers/models/llava/modeling_llava.py
12
+ => https://github.com/huggingface/transformers/blob/main/src/transformers/models/idefics2/modeling_idefics2.py
13
+ """
14
+
15
+ import logging
16
+ from dataclasses import dataclass
17
+ from functools import partial
18
+ from typing import Any, Callable, ClassVar, Dict, List, Optional, Tuple, Union
19
+
20
+ import numpy as np
21
+ import timm
22
+ import tokenizers
23
+ import torch
24
+ import torch.nn as nn
25
+ import transformers
26
+ from timm.models.vision_transformer import LayerScale
27
+ from transformers import AutoModelForCausalLM, PretrainedConfig, PreTrainedModel
28
+ from transformers.modeling_outputs import ModelOutput
29
+
30
+ from .configuration_prismatic import OpenVLAConfig, PrismaticConfig
31
+
32
+ # Get Logger
33
+ logger = logging.getLogger(__name__)
34
+
35
+
36
+ # === PyTorch/HuggingFace Default IGNORE_INDEX (for CrossEntropyLoss labels)
37
+ IGNORE_INDEX = -100
38
+
39
+
40
+ # === Utility Functions for Monkey-Patching ===
41
+ def unpack_tuple(fn: Callable[[Any], Tuple[Any]]) -> Callable[[Any], Any]:
42
+ def wrapper(*args: Any, **kwargs: Any) -> Any:
43
+ result = fn(*args, **kwargs)
44
+ return result[0] if isinstance(result, tuple) else result
45
+
46
+ return wrapper
47
+
48
+
49
+ # HF Transformers overwrites parameters with names containing `gamma`; we're going to patch VisionBackbone.LayerScale.
50
+ # =>> TIMM :: https://github.com/huggingface/pytorch-image-models/blob/main/timm/models/vision_transformer.py#L109
51
+ # =>> Transformers :: https://github.com/huggingface/transformers/blob/main/src/transformers/modeling_utils.py#L3960
52
+ def _ls_new_forward(self, x: torch.Tensor) -> torch.Tensor:
53
+ return x.mul_(self.scale_factor) if self.inplace else x * self.scale_factor
54
+
55
+
56
+ def ls_apply_patch(ls_module: LayerScale):
57
+ ls_module.scale_factor = nn.Parameter(ls_module.gamma.clone())
58
+ ls_module.forward = _ls_new_forward.__get__(ls_module, LayerScale)
59
+ del ls_module.gamma
60
+
61
+
62
+ # === Prismatic Vision Backbone (nn.Module) Definitions (w/ Fused Backbone Support) ===
63
+ class PrismaticVisionBackbone(nn.Module):
64
+ def __init__(
65
+ self,
66
+ use_fused_vision_backbone: bool,
67
+ image_sizes: List[int],
68
+ timm_model_ids: List[str],
69
+ timm_override_act_layers: List[Optional[str]],
70
+ ) -> None:
71
+ super().__init__()
72
+ self.use_fused_vision_backbone = use_fused_vision_backbone
73
+
74
+ # [Contract] Validate number of (fused) vision backbones, create "alpha" featurizer and Instantiate
75
+ # =>> Note :: Monkey-Patch the `forward()` function of the backbone to ensure FSDP-compatibility
76
+ # Hardcodes `get_intermediate_layers` to return the **SECOND-TO-LAST** layer patches!
77
+ assert len(timm_model_ids) <= 2, "Prismatic models only support up to 2 (fused) vision backbones!"
78
+ self.featurizer = timm.create_model(
79
+ timm_model_ids[0],
80
+ pretrained=False,
81
+ num_classes=0,
82
+ img_size=image_sizes[0],
83
+ act_layer=timm_override_act_layers[0],
84
+ )
85
+ self.featurizer.forward = unpack_tuple(
86
+ partial(self.featurizer.get_intermediate_layers, n={len(self.featurizer.blocks) - 2})
87
+ )
88
+ self.embed_dim = self.featurizer.embed_dim
89
+
90
+ # If `use_fused_vision_backbone` =>> create "beta" featurizer
91
+ if self.use_fused_vision_backbone:
92
+ self.fused_featurizer = timm.create_model(
93
+ timm_model_ids[1],
94
+ pretrained=False,
95
+ num_classes=0,
96
+ img_size=image_sizes[1],
97
+ act_layer=timm_override_act_layers[1],
98
+ )
99
+ self.fused_featurizer.forward = unpack_tuple(
100
+ partial(self.fused_featurizer.get_intermediate_layers, n={len(self.fused_featurizer.blocks) - 2})
101
+ )
102
+ self.embed_dim += self.fused_featurizer.embed_dim
103
+
104
+ # Patch `vision_backbone.featurizer` and `vision_backbone.fused_featurizer` with HF-Compatible LayerScale
105
+ for module in self.featurizer.modules():
106
+ if isinstance(module, LayerScale):
107
+ ls_apply_patch(module)
108
+
109
+ if self.use_fused_vision_backbone:
110
+ for module in self.fused_featurizer.modules():
111
+ if isinstance(module, LayerScale):
112
+ ls_apply_patch(module)
113
+
114
+ def forward(self, pixel_values: torch.Tensor) -> torch.Tensor:
115
+ """Run image (`pixel_values`) through featurizer; if channel-stacked, then dispatch and sequence stack."""
116
+ if not self.use_fused_vision_backbone:
117
+ return self.featurizer(pixel_values)
118
+
119
+ # Split `pixel_values :: [bsz, 2 * 3, resolution, resolution]` =>> featurize =>> channel stack
120
+ img, img_fused = torch.split(pixel_values, [3, 3], dim=1)
121
+ patches, patches_fused = self.featurizer(img), self.fused_featurizer(img_fused)
122
+
123
+ return torch.cat([patches, patches_fused], dim=2)
124
+
125
+
126
+ # === Prismatic Projector (nn.Module) Definitions ===
127
+ class PrismaticProjector(nn.Module):
128
+ def __init__(self, use_fused_vision_backbone: bool, vision_dim: int, llm_dim: int) -> None:
129
+ super().__init__()
130
+ self.use_fused_vision_backbone = use_fused_vision_backbone
131
+ self.vision_dim, self.llm_dim = vision_dim, llm_dim
132
+
133
+ # Switch on `use_fused_vision_backbone` =>> use slightly different MLPs and projection factors!
134
+ if not self.use_fused_vision_backbone:
135
+ self.fc1 = nn.Linear(self.vision_dim, self.llm_dim, bias=True)
136
+ self.fc2 = nn.Linear(self.llm_dim, self.llm_dim, bias=True)
137
+ self.act_fn1 = nn.GELU()
138
+ else:
139
+ initial_projection_dim = 4 * vision_dim
140
+ self.fc1 = nn.Linear(self.vision_dim, initial_projection_dim, bias=True)
141
+ self.fc2 = nn.Linear(initial_projection_dim, self.llm_dim, bias=True)
142
+ self.fc3 = nn.Linear(self.llm_dim, self.llm_dim, bias=True)
143
+ self.act_fn1 = nn.GELU()
144
+ self.act_fn2 = nn.GELU()
145
+
146
+ def forward(self, img_patches: torch.Tensor) -> torch.Tensor:
147
+ if not self.use_fused_vision_backbone:
148
+ projected_features = self.fc1(img_patches)
149
+ projected_features = self.act_fn1(projected_features)
150
+ projected_features = self.fc2(projected_features)
151
+ else:
152
+ projected_features = self.fc1(img_patches)
153
+ projected_features = self.act_fn1(projected_features)
154
+ projected_features = self.fc2(projected_features)
155
+ projected_features = self.act_fn2(projected_features)
156
+ projected_features = self.fc3(projected_features)
157
+
158
+ return projected_features
159
+
160
+
161
+ # === Main HF Class Definitions ===
162
+ @dataclass
163
+ class PrismaticCausalLMOutputWithPast(ModelOutput):
164
+ """Base class for Prismatic casual (visually-conditioned) language model outputs; also exposes visual features."""
165
+
166
+ loss: Optional[torch.FloatTensor] = None
167
+ logits: torch.FloatTensor = None
168
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
169
+ hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
170
+ attentions: Optional[Tuple[torch.FloatTensor]] = None
171
+
172
+ # Additions for VLMs
173
+ projector_features: Optional[torch.FloatTensor] = None
174
+
175
+
176
+ class PrismaticPreTrainedModel(PreTrainedModel):
177
+ config_class: PretrainedConfig = PrismaticConfig
178
+ base_model_prefix: str = "model"
179
+ supports_gradient_checkpointing: bool = True
180
+
181
+ _no_split_modules: ClassVar[List[str]] = ["PrismaticProjector"]
182
+ _skip_keys_device_placement: str = "past_key_values"
183
+ _supports_flash_attn_2: bool = True
184
+
185
+ def _init_weights(self, module: nn.Module) -> None:
186
+ # Important :: this HF ported version is *not* meant for training from scratch; only inference and fine-tuning!
187
+ # => As such, this init_weights code is not correct; if training VLMs from scratch, use the main codebase at
188
+ # https://github.com/TRI-ML/prismatic-vlms
189
+ std = (
190
+ self.config.initializer_range
191
+ if hasattr(self.config, "initializer_range")
192
+ else self.config.text_config.initializer_range
193
+ )
194
+
195
+ if hasattr(module, "class_embedding"):
196
+ module.class_embedding.data.normal_(mean=0.0, std=std)
197
+
198
+ if isinstance(module, (nn.Linear, nn.Conv2d)):
199
+ module.weight.data.normal_(mean=0.0, std=std)
200
+ if module.bias is not None:
201
+ module.bias.data.zero_()
202
+ elif isinstance(module, nn.Embedding):
203
+ module.weight.data.normal_(mean=0.0, std=std)
204
+ if module.padding_idx is not None:
205
+ module.weight.data[module.padding_idx].zero_()
206
+
207
+ @property
208
+ def _supports_sdpa(self) -> bool:
209
+ """Check LLM supports SDPA Attention"""
210
+ return self.language_model._supports_sdpa
211
+
212
+
213
+ class PrismaticForConditionalGeneration(PrismaticPreTrainedModel):
214
+ def __init__(self, config: PrismaticConfig) -> None:
215
+ super().__init__(config)
216
+
217
+ # [Validation] Lightweight Validate on `config` Fields + Dependency Versions
218
+ if config.use_fused_vision_backbone is None:
219
+ raise ValueError("Missing config field `use_fused_vision_backbone`")
220
+
221
+ if timm.__version__ not in {"0.9.10", "0.9.11", "0.9.12", "0.9.16"}:
222
+ raise NotImplementedError(
223
+ "TIMM Version must be >= 0.9.10 and < 1.0.0 (breaking); please raise a GitHub Issue "
224
+ "if you urgently need support for latest TIMM versions."
225
+ )
226
+
227
+ if (transformers.__version__ != "4.40.1") or (tokenizers.__version__ != "0.19.1"):
228
+ logger.warning(
229
+ f"Expected `transformers==4.40.1` and `tokenizers==0.19.1` but got "
230
+ f"`transformers=={transformers.__version__}` and `tokenizers=={tokenizers.__version__}`; "
231
+ f"there might be inference-time regressions due to dependency changes. If in doubt, please"
232
+ f"use the above versions."
233
+ )
234
+
235
+ # Instantiate PrismaticVisionBackbone (w/ Potential Fused Backbone)
236
+ self.vision_backbone = PrismaticVisionBackbone(
237
+ config.use_fused_vision_backbone, config.image_sizes, config.timm_model_ids, config.timm_override_act_layers
238
+ )
239
+
240
+ # Create Multimodal Projector
241
+ self.projector = PrismaticProjector(
242
+ config.use_fused_vision_backbone,
243
+ vision_dim=self.vision_backbone.embed_dim,
244
+ llm_dim=config.text_config.hidden_size,
245
+ )
246
+
247
+ # Instantiate LLM Backbone
248
+ self.language_model = AutoModelForCausalLM.from_config(
249
+ config.text_config, attn_implementation=config._attn_implementation
250
+ )
251
+ self.vocab_size = config.text_config.vocab_size
252
+ self.pad_token_id = config.pad_token_id
253
+
254
+ # HF Boilerplate =>> initializes weights via `_init_weights()` and sets gradient checkpointing
255
+ self.post_init()
256
+
257
+ # === `PreTrainedModel` Boilerplate ===
258
+ def get_input_embeddings(self) -> nn.Module:
259
+ return self.language_model.get_input_embeddings()
260
+
261
+ def set_input_embeddings(self, value: nn.Module) -> None:
262
+ self.language_model.set_input_embeddings(value)
263
+
264
+ def get_output_embeddings(self) -> nn.Module:
265
+ return self.language_model.get_output_embeddings()
266
+
267
+ def set_output_embeddings(self, new_embeddings: nn.Module) -> None:
268
+ self.language_model.set_output_embeddings(new_embeddings)
269
+
270
+ def get_decoder(self) -> nn.Module:
271
+ return self.language_model.get_decoder()
272
+
273
+ def set_decoder(self, decoder: nn.Module) -> None:
274
+ self.language_model.set_decoder(decoder)
275
+
276
+ def tie_weights(self) -> None:
277
+ self.language_model.tie_weights() # Note: `Llama-2` and `Mistral` don't tie weights (no-op)
278
+
279
+ def resize_token_embeddings(
280
+ self, new_num_tokens: Optional[int] = None, pad_to_multiple_of: Optional[int] = None
281
+ ) -> nn.Embedding:
282
+ updated_embeddings = self.language_model.resize_token_embeddings(new_num_tokens, pad_to_multiple_of)
283
+
284
+ # Update config/instance variables
285
+ self.config.text_config.vocab_size = updated_embeddings.num_embeddings
286
+ self.vocab_size = updated_embeddings.num_embeddings
287
+
288
+ return updated_embeddings
289
+
290
+ # === Core Prismatic VLM `forward()` Logic ===
291
+ def forward(
292
+ self,
293
+ input_ids: Optional[torch.LongTensor] = None,
294
+ attention_mask: Optional[torch.Tensor] = None,
295
+ pixel_values: Optional[torch.FloatTensor] = None,
296
+ labels: Optional[torch.LongTensor] = None,
297
+ inputs_embeds: Optional[torch.FloatTensor] = None,
298
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
299
+ use_cache: Optional[bool] = None,
300
+ output_attentions: Optional[bool] = None,
301
+ output_hidden_states: Optional[bool] = None,
302
+ output_projector_features: Optional[bool] = None,
303
+ return_dict: Optional[bool] = None,
304
+ ) -> Union[Tuple, PrismaticCausalLMOutputWithPast]:
305
+ """Run a forward pass through the VLM, returning a PrismaticCausalLMOutputWithPast instance."""
306
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
307
+ output_hidden_states = (
308
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
309
+ )
310
+ output_projector_features = output_projector_features if output_projector_features is not None else False
311
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
312
+
313
+ # Respect `use_cache` only if not training (even if `gradient_checkpointing` is off)
314
+ use_cache = use_cache and not self.training
315
+
316
+ # Instantiate Placeholder for Projector Features
317
+ projected_patch_embeddings = None
318
+
319
+ # Note :: We only support forward passes with the following cases:
320
+ # => Cached Generation :: (input_ids.shape[1] == 1) and (past_key_values is not None)
321
+ # => Unimodal Forward :: (pixel_values is None)
322
+ # => Multimodal Forward :: (pixel_values is not None) and (input_ids/embeds.shape[0] == pixel_values.shape[0])
323
+
324
+ # === Handle Generation with Cache (`input_ids.shape[1] == 1`) =>> requires `past_keys_values` ===
325
+ if input_ids.shape[1] == 1:
326
+ assert input_ids.shape[0] == 1, "Generation is only currently supported for batch size of 1!"
327
+ assert past_key_values is not None, "You must provide `past_key_values` during cached generation!"
328
+ assert labels is None, "Unexpected key `labels` provided during cached generation!"
329
+
330
+ language_model_output = self.language_model(
331
+ input_ids=input_ids,
332
+ attention_mask=None,
333
+ position_ids=None,
334
+ past_key_values=past_key_values,
335
+ inputs_embeds=None,
336
+ labels=None,
337
+ use_cache=use_cache,
338
+ output_attentions=output_attentions,
339
+ output_hidden_states=output_hidden_states,
340
+ return_dict=return_dict,
341
+ )
342
+
343
+ # === Handle Unimodal Forward ===
344
+ elif pixel_values is None:
345
+ assert (input_ids is not None) and (inputs_embeds is None), "Missing `input_ids` in language-only forward!"
346
+ assert past_key_values is None, "Unexpected key `past_key_values` provided during language-only forward!"
347
+
348
+ language_model_output = self.language_model(
349
+ input_ids=input_ids,
350
+ attention_mask=attention_mask,
351
+ position_ids=None,
352
+ past_key_values=None,
353
+ inputs_embeds=None,
354
+ labels=labels,
355
+ use_cache=use_cache,
356
+ output_attentions=output_attentions,
357
+ output_hidden_states=output_hidden_states,
358
+ return_dict=return_dict,
359
+ )
360
+
361
+ # === Handle Multimodal Forward ===
362
+ elif (input_ids.shape[0] == pixel_values.shape[0]) or (inputs_embeds.shape[0] == pixel_values.shape[0]):
363
+ assert past_key_values is None, "Unexpected key `past_key_values` provided during language-only forward!"
364
+
365
+ # Visual Feature Extraction
366
+ patch_features = self.vision_backbone(pixel_values)
367
+
368
+ # Projection Logic =>> Update Attention Mask
369
+ projected_patch_embeddings = self.projector(patch_features)
370
+ projected_patch_attention_mask = None
371
+ if attention_mask is not None:
372
+ projected_patch_attention_mask = torch.full(
373
+ (projected_patch_embeddings.shape[0], projected_patch_embeddings.shape[1]),
374
+ fill_value=True,
375
+ dtype=attention_mask.dtype,
376
+ device=attention_mask.device,
377
+ )
378
+
379
+ # Get Input Embeddings (from Language Model Embeddings)
380
+ input_embeddings = self.get_input_embeddings()(input_ids)
381
+
382
+ # Build Multimodal Embeddings & Attention Mask =>> Prismatic defaults to inserting after <BOS> token (1:)
383
+ multimodal_embeddings = torch.cat(
384
+ [input_embeddings[:, :1, :], projected_patch_embeddings, input_embeddings[:, 1:, :]], dim=1
385
+ )
386
+ multimodal_attention_mask = None
387
+ if attention_mask is not None:
388
+ multimodal_attention_mask = torch.cat(
389
+ [attention_mask[:, :1], projected_patch_attention_mask, attention_mask[:, 1:]], dim=1
390
+ )
391
+
392
+ # Build Labels (if specified) =>> Ignore Labels for Patch Embeddings
393
+ multimodal_labels = None
394
+ if labels is not None:
395
+ projected_patch_labels = torch.full(
396
+ (projected_patch_embeddings.shape[0], projected_patch_embeddings.shape[1]),
397
+ fill_value=IGNORE_INDEX,
398
+ dtype=labels.dtype,
399
+ device=labels.device,
400
+ )
401
+ multimodal_labels = torch.cat([labels[:, :1], projected_patch_labels, labels[:, 1:]], dim=1)
402
+
403
+ # Dispatch to Language Model
404
+ language_model_output = self.language_model(
405
+ input_ids=None,
406
+ attention_mask=multimodal_attention_mask,
407
+ position_ids=None,
408
+ past_key_values=None,
409
+ inputs_embeds=multimodal_embeddings,
410
+ labels=multimodal_labels,
411
+ use_cache=use_cache,
412
+ output_attentions=output_attentions,
413
+ output_hidden_states=output_hidden_states,
414
+ return_dict=return_dict,
415
+ )
416
+
417
+ # === Otherwise =>> Assume Invalid! ===
418
+ elif (input_ids.shape[0] != pixel_values.shape[0]) or (inputs_embeds.shape[0] != pixel_values.shape[0]):
419
+ raise ValueError("Non-homogenous batch of (text, image) input -- forward() does not support mixed batches!")
420
+
421
+ else:
422
+ raise ValueError(
423
+ "Invalid PrismaticForConditionalGeneration `forward()` call with provided arguments:\n"
424
+ f"=> `input_ids` = {input_ids is not None}\n"
425
+ f"=> `attention_mask` = {attention_mask is not None}\n"
426
+ f"=> `pixel_values` = {pixel_values is not None}\n"
427
+ f"=> `labels` = {labels is not None}\n"
428
+ f"=> `input_embeds` = {inputs_embeds is not None}\n"
429
+ f"=> `past_key_values` = {past_key_values is not None}\n"
430
+ f"=> `use_cache` = {use_cache}"
431
+ )
432
+
433
+ # Unpack `language_model_output` and return PrismaticCausalLMOutputWithPast (or tuple if not `return_dict`)
434
+ if not return_dict:
435
+ if output_projector_features and (projected_patch_embeddings is not None):
436
+ return *language_model_output, projected_patch_embeddings
437
+
438
+ return language_model_output
439
+
440
+ return PrismaticCausalLMOutputWithPast(
441
+ loss=language_model_output.loss,
442
+ logits=language_model_output.logits,
443
+ past_key_values=language_model_output.past_key_values,
444
+ hidden_states=language_model_output.hidden_states,
445
+ attentions=language_model_output.attentions,
446
+ projector_features=projected_patch_embeddings,
447
+ )
448
+
449
+ # === GenerationMixin Methods ===
450
+ def prepare_inputs_for_generation(
451
+ self,
452
+ input_ids: Optional[torch.Tensor] = None,
453
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
454
+ inputs_embeds: Optional[torch.FloatTensor] = None,
455
+ pixel_values: Optional[torch.FloatTensor] = None,
456
+ attention_mask: Optional[torch.Tensor] = None,
457
+ **kwargs: str,
458
+ ) -> Dict[str, torch.Tensor]:
459
+ """Borrowed from `LlamaForCausalLM` and simplified for batch size = 1; mirrors original PrismaticVLM logic."""
460
+ if ((input_ids is not None) and (input_ids.shape[0] > 1)) or (
461
+ (inputs_embeds is not None) and (inputs_embeds.shape[0] > 1)
462
+ ):
463
+ raise ValueError("Generation with batch size > 1 is not currently supported!")
464
+
465
+ # Handle `past_key_values` (cache) =>> assume `input_ids` just has unprocessed tokens
466
+ if past_key_values is not None:
467
+ input_ids = input_ids[:, -1:]
468
+
469
+ # If `input_embeds` are passed, we only want to use them in the 1st generation step
470
+ if inputs_embeds is not None and past_key_values is None:
471
+ model_inputs = {"input_embeds": inputs_embeds}
472
+ else:
473
+ model_inputs = {"input_ids": input_ids}
474
+
475
+ # Make sure `pixel_values` are preserved in `model_inputs`
476
+ model_inputs.update(
477
+ {
478
+ "attention_mask": attention_mask,
479
+ "pixel_values": pixel_values,
480
+ "past_key_values": past_key_values,
481
+ "use_cache": kwargs.get("use_cache"),
482
+ }
483
+ )
484
+
485
+ return model_inputs
486
+
487
+ # Defer to Language Model (all handle this differently, with different return types)
488
+ def _reorder_cache(self, *args, **kwargs) -> Any:
489
+ return self.language_model._reorder_cache(*args, **kwargs)
490
+
491
+
492
+ class OpenVLAForActionPrediction(PrismaticForConditionalGeneration):
493
+ config_class: PretrainedConfig = OpenVLAConfig
494
+
495
+ def __init__(self, config: OpenVLAConfig) -> None:
496
+ super().__init__(config)
497
+ self.norm_stats = config.norm_stats
498
+
499
+ # Compute action bins
500
+ self.bins = np.linspace(-1, 1, config.n_action_bins)
501
+ self.bin_centers = (self.bins[:-1] + self.bins[1:]) / 2.0
502
+
503
+ # Compute vocab size for de-tokenization -- revert added "multiple of"
504
+ self.vocab_size = self.config.text_config.vocab_size - self.config.pad_to_multiple_of
505
+
506
+ def predict_action(
507
+ self, input_ids: Optional[torch.LongTensor] = None, unnorm_key: Optional[str] = None, **kwargs: str
508
+ ) -> np.ndarray:
509
+ """Thin wrapper around .generate() that decodes predicted actions and unnormalizes them."""
510
+ # If the special empty token ('') does not already appear after the colon (':') token in the prompt
511
+ # (after "OUT:" or "ASSISTANT:"), insert it to match the inputs seen at training time
512
+ if not torch.all(input_ids[:, -1] == 29871):
513
+ input_ids = torch.cat(
514
+ (input_ids, torch.unsqueeze(torch.Tensor([29871]).long(), dim=0).to(input_ids.device)), dim=1
515
+ )
516
+
517
+ # Run VLA inference
518
+ generated_ids = self.generate(input_ids, max_new_tokens=self.get_action_dim(unnorm_key), **kwargs)
519
+
520
+ # Extract predicted action tokens and translate into (normalized) continuous actions
521
+ predicted_action_token_ids = generated_ids[0, -self.get_action_dim(unnorm_key) :].cpu().numpy()
522
+ discretized_actions = self.vocab_size - predicted_action_token_ids
523
+ discretized_actions = np.clip(discretized_actions - 1, a_min=0, a_max=self.bin_centers.shape[0] - 1)
524
+ normalized_actions = self.bin_centers[discretized_actions]
525
+
526
+ # Unnormalize actions
527
+ action_norm_stats = self.get_action_stats(unnorm_key)
528
+ mask = action_norm_stats.get("mask", np.ones_like(action_norm_stats["q01"], dtype=bool))
529
+ action_high, action_low = np.array(action_norm_stats["q99"]), np.array(action_norm_stats["q01"])
530
+ actions = np.where(
531
+ mask,
532
+ 0.5 * (normalized_actions + 1) * (action_high - action_low) + action_low,
533
+ normalized_actions,
534
+ )
535
+
536
+ return actions
537
+
538
+ @staticmethod
539
+ def _check_unnorm_key(norm_stats: Dict[str, Dict[str, Any]], unnorm_key: Optional[str]) -> str:
540
+ if unnorm_key is None:
541
+ assert len(norm_stats) == 1, (
542
+ f"Your model was trained on more than one dataset, "
543
+ f"please pass a `unnorm_key` from the following options to choose the statistics "
544
+ f"used for un-normalizing actions: {norm_stats.keys()}"
545
+ )
546
+ unnorm_key = next(iter(norm_stats.keys()))
547
+
548
+ assert unnorm_key in norm_stats, (
549
+ f"The `unnorm_key` you chose is not in the set of available dataset statistics, "
550
+ f"please choose from: {norm_stats.keys()}"
551
+ )
552
+ return unnorm_key
553
+
554
+ def get_action_dim(self, unnorm_key: Optional[str] = None) -> int:
555
+ """Get the dimensionality of the policy's action space."""
556
+ unnorm_key = self._check_unnorm_key(self.norm_stats, unnorm_key)
557
+ return len(self.norm_stats[unnorm_key]["action"]["q01"])
558
+
559
+ def get_action_stats(self, unnorm_key: Optional[str] = None) -> Dict[str, Any]:
560
+ """Get all the logged statistics for the given dataset."""
561
+ unnorm_key = self._check_unnorm_key(self.norm_stats, unnorm_key)
562
+ return self.norm_stats[unnorm_key]["action"]
output_hf_model_openx/preprocessor_config.json ADDED
@@ -0,0 +1,114 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "auto_map": {
3
+ "AutoImageProcessor": "processing_prismatic.PrismaticImageProcessor",
4
+ "AutoProcessor": "processing_prismatic.PrismaticProcessor"
5
+ },
6
+ "image_processor_type": "PrismaticImageProcessor",
7
+ "image_resize_strategy": "resize-naive",
8
+ "input_sizes": [
9
+ [
10
+ 3,
11
+ 224,
12
+ 224
13
+ ],
14
+ [
15
+ 3,
16
+ 224,
17
+ 224
18
+ ]
19
+ ],
20
+ "interpolations": [
21
+ "bicubic",
22
+ "bicubic"
23
+ ],
24
+ "means": [
25
+ [
26
+ 0.485,
27
+ 0.456,
28
+ 0.406
29
+ ],
30
+ [
31
+ 0.5,
32
+ 0.5,
33
+ 0.5
34
+ ]
35
+ ],
36
+ "processor_class": "PrismaticProcessor",
37
+ "stds": [
38
+ [
39
+ 0.229,
40
+ 0.224,
41
+ 0.225
42
+ ],
43
+ [
44
+ 0.5,
45
+ 0.5,
46
+ 0.5
47
+ ]
48
+ ],
49
+ "tvf_crop_params": [
50
+ {
51
+ "output_size": [
52
+ 224,
53
+ 224
54
+ ]
55
+ },
56
+ {
57
+ "output_size": [
58
+ 224,
59
+ 224
60
+ ]
61
+ }
62
+ ],
63
+ "tvf_do_letterbox": false,
64
+ "tvf_letterbox_fill": null,
65
+ "tvf_normalize_params": [
66
+ {
67
+ "inplace": false,
68
+ "mean": [
69
+ 0.484375,
70
+ 0.455078125,
71
+ 0.40625
72
+ ],
73
+ "std": [
74
+ 0.228515625,
75
+ 0.2236328125,
76
+ 0.224609375
77
+ ]
78
+ },
79
+ {
80
+ "inplace": false,
81
+ "mean": [
82
+ 0.5,
83
+ 0.5,
84
+ 0.5
85
+ ],
86
+ "std": [
87
+ 0.5,
88
+ 0.5,
89
+ 0.5
90
+ ]
91
+ }
92
+ ],
93
+ "tvf_resize_params": [
94
+ {
95
+ "antialias": true,
96
+ "interpolation": 3,
97
+ "max_size": null,
98
+ "size": [
99
+ 224,
100
+ 224
101
+ ]
102
+ },
103
+ {
104
+ "antialias": true,
105
+ "interpolation": 3,
106
+ "max_size": null,
107
+ "size": [
108
+ 224,
109
+ 224
110
+ ]
111
+ }
112
+ ],
113
+ "use_fused_vision_backbone": true
114
+ }
output_hf_model_openx/processing_prismatic.py ADDED
@@ -0,0 +1,257 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ processing_prismatic.py
3
+
4
+ HuggingFace-style preprocessor definitions for Prismatic VLMs, inheriting from `ProcessorMixin`. Default configuration
5
+ specifies `siglip-224px+7b`.
6
+ """
7
+
8
+ from typing import Any, ClassVar, List, Optional, Tuple, Union
9
+
10
+ import timm.data
11
+ import torch
12
+ import torchvision.transforms.functional as TVF
13
+ from PIL import Image
14
+ from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
15
+ from transformers import PreTrainedTokenizerBase
16
+ from transformers.image_processing_utils import BatchFeature, ImageProcessingMixin
17
+ from transformers.processing_utils import ProcessorMixin
18
+ from transformers.tokenization_utils import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
19
+ from transformers.utils import TensorType
20
+
21
+
22
+ # === Image Processing ===
23
+ def letterbox_pad_transform(image: Image.Image, padding_fill_value: Tuple[int, int, int]) -> Image.Image:
24
+ """Given a PIL.Image, pad to square by adding a symmetric border around the height/width."""
25
+ (w, h), max_wh = image.size, max(image.size)
26
+ horizontal_pad, vertical_pad = int((max_wh - w) / 2), int((max_wh - h) / 2)
27
+ padding = (horizontal_pad, vertical_pad, horizontal_pad, vertical_pad)
28
+
29
+ return TVF.pad(image, padding, fill=padding_fill_value, padding_mode="constant")
30
+
31
+
32
+ class PrismaticImageProcessor(ImageProcessingMixin):
33
+ model_input_names: ClassVar[List[str]] = ["pixel_values"]
34
+
35
+ def __init__(
36
+ self,
37
+ use_fused_vision_backbone: bool = False,
38
+ image_resize_strategy: str = "letterbox",
39
+ input_sizes: Optional[List[Tuple[int, int, int]]] = None,
40
+ interpolations: Optional[List[str]] = None,
41
+ means: Optional[List[Tuple[float, float, float]]] = None,
42
+ stds: Optional[List[Tuple[float, float, float]]] = None,
43
+ **kwargs: str,
44
+ ) -> None:
45
+ """
46
+ Initialize a PrismaticImageProcessor as a wrapper around a torchvision transform; this transform will be
47
+ created by TIMM, and edited to follow our custom `image_resize_strategy` logic.
48
+
49
+ @param use_fused_vision_backbone: Boolean indicating single or fused (dual) vision backbone
50
+ @param image_resize_strategy: Prismatic image resize strategy in < resize-naive | resize-crop | letterbox >
51
+ @param input_size: [TIMM :: `data_cfg`] Input image size as tuple (channels, width, height)
52
+ @param interpolation: [TIMM :: `data_cfg`] Interpolation as string (default: "bicubic")
53
+ @param mean: [TIMM :: `data_cfg`] Normalization mean as float tuple (or two-tuple if `fused_backbone`)
54
+ @param std: [TIMM :: `data_cfg`] Normalization std as float tuple (or two-tuple if `fused_backbone`)
55
+ """
56
+ self.use_fused_vision_backbone = use_fused_vision_backbone
57
+ self.image_resize_strategy = image_resize_strategy
58
+
59
+ # Handle `None` default values
60
+ input_sizes = [(3, 224, 224)] if input_sizes is None else input_sizes
61
+ means = [(0.5, 0.5, 0.5)] if means is None else means
62
+ stds = [(0.5, 0.5, 0.5)] if stds is None else stds
63
+
64
+ # TIMM `data_cfg` Parameters
65
+ self.input_sizes, self.interpolations, self.means, self.stds = input_sizes, interpolations, means, stds
66
+
67
+ # Grab torchvision transforms via TIMM =>> need to parse for specific "functional" transform values!
68
+ self.tvf_resize_params, self.tvf_crop_params, self.tvf_normalize_params = [], [], []
69
+ self.tvf_do_letterbox, self.tvf_letterbox_fill = False, None
70
+
71
+ for idx in range(len(input_sizes)):
72
+ transform = timm.data.create_transform(
73
+ input_size=self.input_sizes[idx],
74
+ interpolation=self.interpolations[idx],
75
+ mean=self.means[idx],
76
+ std=self.stds[idx],
77
+ crop_pct=1.0, # Set to 1.0 to ignore cropping (initial Resize sets `input_size`)
78
+ crop_mode="center", # Default crop mode -- no-op when `crop_pct == 1.0`
79
+ is_training=False, # No image augmentations when loading the transform!
80
+ )
81
+
82
+ # [Validation] Ensure appropriate transform structure, expected sizes
83
+ if not (
84
+ isinstance(transform, Compose)
85
+ and (len(transform.transforms) == 4)
86
+ and isinstance(transform.transforms[0], Resize)
87
+ and isinstance(transform.transforms[1], CenterCrop)
88
+ and isinstance(transform.transforms[2], ToTensor)
89
+ and isinstance(transform.transforms[3], Normalize)
90
+ and (transform.transforms[0].size == self.input_sizes[idx][-1])
91
+ and (transform.transforms[1].size == self.input_sizes[idx][-2:])
92
+ ):
93
+ raise ValueError(f"Unexpected TIMM image transformation structure/sizes: `{transform}`")
94
+
95
+ # HF Image Processors *must* be JSON-serializable; as such, cannot have torchvision. as an attribute.
96
+ # => Instead, we're going to parse the transform and call "torchvision.transforms.functional" (`tvf`)
97
+ resize_t, crop_t, norm_t = transform.transforms[0], transform.transforms[1], transform.transforms[3]
98
+ self.tvf_resize_params.append(
99
+ {
100
+ "size": resize_t.size,
101
+ "interpolation": TVF.pil_modes_mapping[resize_t.interpolation],
102
+ "max_size": None,
103
+ "antialias": True,
104
+ }
105
+ )
106
+ self.tvf_crop_params.append({"output_size": crop_t.size})
107
+ self.tvf_normalize_params.append(
108
+ {
109
+ "mean": norm_t.mean.float().numpy().tolist(),
110
+ "std": norm_t.std.float().numpy().tolist(),
111
+ "inplace": False,
112
+ }
113
+ )
114
+ self.tvf_do_letterbox, self.tvf_letterbox_fill = False, None
115
+
116
+ # Handle Prismatic `image_resize_strategy`
117
+ if self.image_resize_strategy == "resize-naive":
118
+ self.tvf_resize_params[idx]["size"] = (resize_t.size, resize_t.size)
119
+ elif self.image_resize_strategy == "letterbox":
120
+ self.tvf_do_letterbox, self.tvf_letterbox_fill = True, tuple([int(x * 255) for x in self.means[idx]])
121
+ elif self.image_resize_strategy == "resize-crop":
122
+ pass
123
+ else:
124
+ raise ValueError(f"Image resize strategy `{self.image_resize_strategy}` is not supported!")
125
+
126
+ # Dispatch **kwargs to super()
127
+ super().__init__(**kwargs)
128
+
129
+ def apply_transform(self, img: Image.Image) -> torch.Tensor:
130
+ """Apply `functional` variant of TIMM's Transform = Compose([Resize -> CenterCrop -> ToTensor -> Normalize])"""
131
+ if self.tvf_do_letterbox:
132
+ img = letterbox_pad_transform(img, self.tvf_letterbox_fill)
133
+
134
+ # [Contract] Fused Backbones expect "channel-stacked" inputs; we'll unpack on the model side!
135
+ imgs_t = []
136
+ for idx in range(len(self.input_sizes)):
137
+ img_idx = TVF.resize(img, **self.tvf_resize_params[idx])
138
+ img_idx = TVF.center_crop(img_idx, **self.tvf_crop_params[idx])
139
+ img_idx_t = TVF.to_tensor(img_idx)
140
+ img_idx_t = TVF.normalize(img_idx_t, **self.tvf_normalize_params[idx])
141
+ imgs_t.append(img_idx_t)
142
+
143
+ # [Contract] `imgs_t` is a list of Tensors of shape [3, input_size, input_size]; stack along dim = 0
144
+ img_t = torch.vstack(imgs_t)
145
+
146
+ return img_t
147
+
148
+ def preprocess(
149
+ self,
150
+ images: Union[Image.Image, List[Image.Image]],
151
+ return_tensors: Optional[Union[str, TensorType]] = None,
152
+ **_: str,
153
+ ) -> BatchFeature:
154
+ """
155
+ Preprocess an image (or batch of images); note that unlike the `transformers :: BaseImageProcessor` we
156
+ explicitly only handle PIL.Image.Image instances for simplicity.
157
+
158
+ @param images: A (batch of) PIL.Image.Image instance(s) to preprocess.
159
+ @param return_tensors: BatchFeature default Tensor format (e.g., "pt" for torch); if None, returns np.ndarray
160
+
161
+ @return: Instance of `transformers :: BatchFeature` with a single key "pixel_values"
162
+ """
163
+ if not isinstance(images, list):
164
+ images = [images]
165
+
166
+ # Apply `self.img_transform` to each image (will return list of torch.Tensors); stack into "batched" Tensor
167
+ pixel_values = torch.stack([self.apply_transform(img.convert("RGB")) for img in images])
168
+
169
+ # Return BatchFeature =>> note that for compatibility, constructor expects Dict[str, np.ndarray], so we convert
170
+ return BatchFeature(data={"pixel_values": pixel_values.float().numpy()}, tensor_type=return_tensors)
171
+
172
+ def __call__(self, images: Union[Image.Image, List[Image.Image]], **kwargs) -> BatchFeature:
173
+ return self.preprocess(images, **kwargs)
174
+
175
+
176
+ # === PrismaticProcessor =>> Wraps both ImageProcessor and Tokenizer ===
177
+ # =>> https://github.com/huggingface/transformers/blob/main/src/transformers/models/llava/processing_llava.py
178
+ class PrismaticProcessor(ProcessorMixin):
179
+ attributes: ClassVar[List[str]] = ["image_processor", "tokenizer"]
180
+ image_processor_class: str = "AutoImageProcessor"
181
+ tokenizer_class: str = "AutoTokenizer"
182
+
183
+ def __init__(
184
+ self,
185
+ image_processor: Optional[ImageProcessingMixin] = None,
186
+ tokenizer: Optional[PreTrainedTokenizerBase] = None,
187
+ ) -> None:
188
+ super().__init__(image_processor, tokenizer)
189
+
190
+ def __call__(
191
+ self,
192
+ text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]],
193
+ images: Union[Image.Image, List[Image.Image]],
194
+ padding: Union[bool, str, PaddingStrategy] = False,
195
+ truncation: Optional[Union[bool, str, TruncationStrategy]] = None,
196
+ max_length: Optional[int] = None,
197
+ return_tensors: Optional[Union[str, TensorType]] = TensorType.PYTORCH,
198
+ ) -> BatchFeature:
199
+ """
200
+ Preprocess a given (batch) of text/images for a Prismatic VLM; forwards text to the underlying LLM's tokenizer,
201
+ forwards images to PrismaticImageProcessor.
202
+
203
+ @param text: The (batch) of text to encode; must be a string or list of strings.
204
+ @param images: A (batch of) PIL.Image.Image instance(s) to preprocess.
205
+ @param padding: Sequence padding strategy (if multiple specified) in < True = "longest" | "max_length" | False >
206
+ @param truncation: Truncation strategy for the output sequences; requires `max_length` to be specified
207
+ @param max_length: Maximum length (in tokens) to truncate
208
+ @param return_tensors: Type of return tensors (usually "pt" or TensorType.PYTORCH)
209
+
210
+ @return: BatchFeature with keys for `input_ids`, `attention_mask` and `pixel_values`.
211
+ """
212
+ pixel_values = self.image_processor(images, return_tensors=return_tensors)["pixel_values"]
213
+ text_inputs = self.tokenizer(
214
+ text, return_tensors=return_tensors, padding=padding, truncation=truncation, max_length=max_length
215
+ )
216
+
217
+ # [Validate] Need same number of images and text inputs!
218
+ if pixel_values.shape[0] != text_inputs.input_ids.shape[0]:
219
+ raise ValueError("Batch is malformed; expected same number of images and text inputs!")
220
+
221
+ return BatchFeature(data={**text_inputs, "pixel_values": pixel_values})
222
+
223
+ # === Tokenizer Dispatch Utilities =>> check `PreTrainedTokenizerBase` for documentation ===
224
+ def batch_decode(
225
+ self,
226
+ sequences: Union[List[int], List[List[int]], torch.Tensor, Any], # `Any` = np.ndarray | tf.Tensor
227
+ skip_special_tokens: bool = False,
228
+ clean_up_tokenization_spaces: Optional[bool] = None,
229
+ **kwargs: str,
230
+ ) -> List[str]:
231
+ return self.tokenizer.batch_decode(
232
+ sequences=sequences,
233
+ skip_special_tokens=skip_special_tokens,
234
+ clean_up_tokenization_spaces=clean_up_tokenization_spaces,
235
+ **kwargs,
236
+ )
237
+
238
+ def decode(
239
+ self,
240
+ token_ids: Union[int, List[int], torch.Tensor, Any], # `Any` = np.ndarray | tf.Tensor
241
+ skip_special_tokens: bool = False,
242
+ clean_up_tokenization_spaces: Optional[bool] = None,
243
+ **kwargs: str,
244
+ ) -> str:
245
+ return self.tokenizer.decode(
246
+ token_ids=token_ids,
247
+ skip_special_tokens=skip_special_tokens,
248
+ clean_up_tokenization_spaces=clean_up_tokenization_spaces,
249
+ **kwargs,
250
+ )
251
+
252
+ @property
253
+ def model_input_names(self) -> List[str]:
254
+ tokenizer_input_names = self.tokenizer.model_input_names
255
+ image_processor_input_names = self.image_processor.model_input_names
256
+
257
+ return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
output_hf_model_openx/processor_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "auto_map": {
3
+ "AutoProcessor": "processing_prismatic.PrismaticProcessor"
4
+ },
5
+ "processor_class": "PrismaticProcessor"
6
+ }
output_hf_model_openx/special_tokens_map.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "</s>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "<PAD>",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "unk_token": {
24
+ "content": "<unk>",
25
+ "lstrip": false,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ }
30
+ }
output_hf_model_openx/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
output_hf_model_openx/tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
3
+ size 499723
output_hf_model_openx/tokenizer_config.json ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
+ "added_tokens_decoder": {
5
+ "0": {
6
+ "content": "<unk>",
7
+ "lstrip": false,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "1": {
14
+ "content": "<s>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "2": {
22
+ "content": "</s>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": true
28
+ },
29
+ "32000": {
30
+ "content": "<PAD>",
31
+ "lstrip": false,
32
+ "normalized": false,
33
+ "rstrip": false,
34
+ "single_word": false,
35
+ "special": true
36
+ }
37
+ },
38
+ "auto_map": {
39
+ "AutoProcessor": "processing_prismatic.PrismaticProcessor"
40
+ },
41
+ "bos_token": "<s>",
42
+ "clean_up_tokenization_spaces": false,
43
+ "eos_token": "</s>",
44
+ "legacy": false,
45
+ "model_max_length": 2048,
46
+ "pad_token": "<PAD>",
47
+ "padding_side": "right",
48
+ "processor_class": "PrismaticProcessor",
49
+ "sp_model_kwargs": {},
50
+ "tokenizer_class": "LlamaTokenizer",
51
+ "unk_token": "<unk>",
52
+ "use_default_system_prompt": false
53
+ }
prismatic/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ from .models import available_model_names, available_models, get_model_description, load
prismatic/conf/__init__.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ from .datasets import DatasetConfig, DatasetRegistry
2
+ from .models import ModelConfig, ModelRegistry
3
+ from .vla import VLAConfig, VLARegistry
prismatic/conf/datasets.py ADDED
@@ -0,0 +1,133 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ datasets.py
3
+
4
+ Draccus Dataclass Definition for a DatasetConfig object, with various registered subclasses for each dataset variant
5
+ and processing scheme. A given dataset variant (e.g., `llava-lightning`) configures the following attributes:
6
+ - Dataset Variant (Identifier) --> e.g., "llava-v15"
7
+ - Align Stage Dataset Components (annotations, images)
8
+ - Finetune Stage Dataset Components (annotations, images)
9
+ - Dataset Root Directory (Path)
10
+ """
11
+
12
+ from dataclasses import dataclass
13
+ from enum import Enum, unique
14
+ from pathlib import Path
15
+ from typing import Tuple
16
+
17
+ from draccus import ChoiceRegistry
18
+
19
+
20
+ @dataclass
21
+ class DatasetConfig(ChoiceRegistry):
22
+ # fmt: off
23
+ dataset_id: str # Unique ID that fully specifies a dataset variant
24
+
25
+ # Dataset Components for each Stage in < align | finetune >
26
+ align_stage_components: Tuple[Path, Path] # Path to annotation file and images directory for `align` stage
27
+ finetune_stage_components: Tuple[Path, Path] # Path to annotation file and images directory for `finetune` stage
28
+
29
+ dataset_root_dir: Path # Path to dataset root directory; others paths are relative to root
30
+ # fmt: on
31
+
32
+
33
+ # [Reproduction] LLaVa-v15 (exact dataset used in all public LLaVa-v15 models)
34
+ @dataclass
35
+ class LLaVa_V15_Config(DatasetConfig):
36
+ dataset_id: str = "llava-v15"
37
+
38
+ align_stage_components: Tuple[Path, Path] = (
39
+ Path("download/llava-laion-cc-sbu-558k/chat.json"),
40
+ Path("download/llava-laion-cc-sbu-558k/"),
41
+ )
42
+ finetune_stage_components: Tuple[Path, Path] = (
43
+ Path("download/llava-v1.5-instruct/llava_v1_5_mix665k.json"),
44
+ Path("download/llava-v1.5-instruct/"),
45
+ )
46
+ dataset_root_dir: Path = Path("/mnt/fsx/skaramcheti/datasets/prismatic-vlms")
47
+
48
+
49
+ # [Multimodal-Only] LLava-v15 WITHOUT the Language-Only ShareGPT Data (No Co-Training)
50
+ @dataclass
51
+ class LLaVa_Multimodal_Only_Config(DatasetConfig):
52
+ dataset_id: str = "llava-multimodal"
53
+
54
+ align_stage_components: Tuple[Path, Path] = (
55
+ Path("download/llava-laion-cc-sbu-558k/chat.json"),
56
+ Path("download/llava-laion-cc-sbu-558k/"),
57
+ )
58
+ finetune_stage_components: Tuple[Path, Path] = (
59
+ Path("download/llava-v1.5-instruct/llava_v1_5_stripped625k.json"),
60
+ Path("download/llava-v1.5-instruct/"),
61
+ )
62
+ dataset_root_dir: Path = Path("/mnt/fsx/skaramcheti/datasets/prismatic-vlms")
63
+
64
+
65
+ # LLaVa-v15 + LVIS-Instruct-4V
66
+ @dataclass
67
+ class LLaVa_LVIS4V_Config(DatasetConfig):
68
+ dataset_id: str = "llava-lvis4v"
69
+
70
+ align_stage_components: Tuple[Path, Path] = (
71
+ Path("download/llava-laion-cc-sbu-558k/chat.json"),
72
+ Path("download/llava-laion-cc-sbu-558k/"),
73
+ )
74
+ finetune_stage_components: Tuple[Path, Path] = (
75
+ Path("download/llava-v1.5-instruct/llava_v1_5_lvis4v_mix888k.json"),
76
+ Path("download/llava-v1.5-instruct/"),
77
+ )
78
+ dataset_root_dir: Path = Path("/mnt/fsx/skaramcheti/datasets/prismatic-vlms")
79
+
80
+
81
+ # LLaVa-v15 + LRV-Instruct
82
+ @dataclass
83
+ class LLaVa_LRV_Config(DatasetConfig):
84
+ dataset_id: str = "llava-lrv"
85
+
86
+ align_stage_components: Tuple[Path, Path] = (
87
+ Path("download/llava-laion-cc-sbu-558k/chat.json"),
88
+ Path("download/llava-laion-cc-sbu-558k/"),
89
+ )
90
+ finetune_stage_components: Tuple[Path, Path] = (
91
+ Path("download/llava-v1.5-instruct/llava_v1_5_lrv_mix1008k.json"),
92
+ Path("download/llava-v1.5-instruct/"),
93
+ )
94
+ dataset_root_dir: Path = Path("/mnt/fsx/skaramcheti/datasets/prismatic-vlms")
95
+
96
+
97
+ # LLaVa-v15 + LVIS-Instruct-4V + LRV-Instruct
98
+ @dataclass
99
+ class LLaVa_LVIS4V_LRV_Config(DatasetConfig):
100
+ dataset_id: str = "llava-lvis4v-lrv"
101
+
102
+ align_stage_components: Tuple[Path, Path] = (
103
+ Path("download/llava-laion-cc-sbu-558k/chat.json"),
104
+ Path("download/llava-laion-cc-sbu-558k/"),
105
+ )
106
+ finetune_stage_components: Tuple[Path, Path] = (
107
+ Path("download/llava-v1.5-instruct/llava_v1_5_lvis4v_lrv_mix1231k.json"),
108
+ Path("download/llava-v1.5-instruct/"),
109
+ )
110
+ dataset_root_dir: Path = Path("/mnt/fsx/skaramcheti/datasets/prismatic-vlms")
111
+
112
+
113
+ # === Define a Dataset Registry Enum for Reference & Validation =>> all *new* datasets must be added here! ===
114
+ @unique
115
+ class DatasetRegistry(Enum):
116
+ # === LLaVa v1.5 ===
117
+ LLAVA_V15 = LLaVa_V15_Config
118
+
119
+ LLAVA_MULTIMODAL_ONLY = LLaVa_Multimodal_Only_Config
120
+
121
+ LLAVA_LVIS4V = LLaVa_LVIS4V_Config
122
+ LLAVA_LRV = LLaVa_LRV_Config
123
+
124
+ LLAVA_LVIS4V_LRV = LLaVa_LVIS4V_LRV_Config
125
+
126
+ @property
127
+ def dataset_id(self) -> str:
128
+ return self.value.dataset_id
129
+
130
+
131
+ # Register Datasets in Choice Registry
132
+ for dataset_variant in DatasetRegistry:
133
+ DatasetConfig.register_subclass(dataset_variant.dataset_id, dataset_variant.value)