GvineQQ commited on
Commit
8edabbb
·
verified ·
1 Parent(s): 23c7119

Upload OpenVLA grasp model checkpoint

Browse files
action_head--10000_checkpoint.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:70fcbbfc3fc492ecb7245c82e61116e6c6e7e76fd0dd435bb8e37c9be1d766a0
3
+ size 604453726
added_tokens.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ {
2
+ "<PAD>": 32000
3
+ }
config.json ADDED
@@ -0,0 +1,415 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "norm_stats": {
3
+ "gen72_grasp_stacking_baskets": {
4
+ "action": {
5
+ "mean": [
6
+ 71.42466735839844,
7
+ 84.38833618164062,
8
+ -82.9383316040039,
9
+ -85.52547454833984,
10
+ -4.3938679695129395,
11
+ 19.08216094970703,
12
+ 0.308868408203125,
13
+ 67.15239715576172,
14
+ -80.31741333007812,
15
+ 84.10924530029297,
16
+ 86.23428344726562,
17
+ -86.4471206665039,
18
+ 3.9559240341186523,
19
+ 9.594415664672852,
20
+ 4.726510524749756,
21
+ 82.21428680419922
22
+ ],
23
+ "std": [
24
+ 25.26982879638672,
25
+ 15.808586120605469,
26
+ 20.259897232055664,
27
+ 21.52882957458496,
28
+ 20.819337844848633,
29
+ 20.531246185302734,
30
+ 26.965940475463867,
31
+ 45.217315673828125,
32
+ 27.104999542236328,
33
+ 12.850805282592773,
34
+ 14.046908378601074,
35
+ 17.79497718811035,
36
+ 13.857259750366211,
37
+ 17.137197494506836,
38
+ 17.71614646911621,
39
+ 36.83033752441406
40
+ ],
41
+ "max": [
42
+ 124.62999725341797,
43
+ 102.0,
44
+ 12.920000076293945,
45
+ 52.0,
46
+ 119.18000030517578,
47
+ 89.77999877929688,
48
+ 169.0,
49
+ 100.0,
50
+ 3.9600000381469727,
51
+ 102.0,
52
+ 123.30999755859375,
53
+ 52.0,
54
+ 73.56500244140625,
55
+ 66.08999633789062,
56
+ 86.5250015258789,
57
+ 100.0
58
+ ],
59
+ "min": [
60
+ 5.84499979019165,
61
+ -38.275001525878906,
62
+ -169.0,
63
+ -97.47000122070312,
64
+ -113.19999694824219,
65
+ -65.30000305175781,
66
+ -139.75,
67
+ 0.0,
68
+ -121.68499755859375,
69
+ 5.84499979019165,
70
+ -8.654999732971191,
71
+ -98.16999816894531,
72
+ -55.459999084472656,
73
+ -78.44499969482422,
74
+ -92.29000091552734,
75
+ 0.0
76
+ ],
77
+ "q01": [
78
+ 11.34000015258789,
79
+ 35.595001220703125,
80
+ -127.60875129699707,
81
+ -96.5,
82
+ -68.33499908447266,
83
+ -37.86875057220459,
84
+ -82.83499908447266,
85
+ 0.0,
86
+ -109.64374732971191,
87
+ 33.56999969482422,
88
+ 33.582499504089355,
89
+ -96.94000244140625,
90
+ -22.323750495910645,
91
+ -32.78499984741211,
92
+ -46.05500030517578,
93
+ 0.0
94
+ ],
95
+ "q99": [
96
+ 103.84500122070312,
97
+ 102.0,
98
+ -23.510000228881836,
99
+ 24.047500133514404,
100
+ 58.91999912261963,
101
+ 69.83000183105469,
102
+ 63.060001373291016,
103
+ 100.0,
104
+ -10.866249561309814,
105
+ 102.0,
106
+ 108.41500091552734,
107
+ 2.14000004529953,
108
+ 54.625,
109
+ 51.900001525878906,
110
+ 61.040000915527344,
111
+ 100.0
112
+ ],
113
+ "mask": [
114
+ true,
115
+ true,
116
+ true,
117
+ true,
118
+ true,
119
+ true,
120
+ true,
121
+ true,
122
+ true,
123
+ true,
124
+ true,
125
+ true,
126
+ true,
127
+ true,
128
+ true,
129
+ true
130
+ ]
131
+ },
132
+ "proprio": {
133
+ "mean": [
134
+ 71.39665222167969,
135
+ 84.42801666259766,
136
+ -82.94181823730469,
137
+ -85.53479766845703,
138
+ -4.377756118774414,
139
+ 19.067413330078125,
140
+ 0.31575945019721985,
141
+ 67.15303802490234,
142
+ -80.27192687988281,
143
+ 84.11940002441406,
144
+ 86.19857788085938,
145
+ -86.43647766113281,
146
+ 3.944101572036743,
147
+ 9.555686950683594,
148
+ 4.720884323120117,
149
+ 82.21434783935547
150
+ ],
151
+ "std": [
152
+ 24.659215927124023,
153
+ 15.453649520874023,
154
+ 19.847105026245117,
155
+ 21.23726463317871,
156
+ 20.293472290039062,
157
+ 19.495216369628906,
158
+ 26.316822052001953,
159
+ 45.21790313720703,
160
+ 26.583589553833008,
161
+ 12.41045093536377,
162
+ 13.648248672485352,
163
+ 17.389333724975586,
164
+ 13.373165130615234,
165
+ 16.093711853027344,
166
+ 17.135786056518555,
167
+ 36.83036804199219
168
+ ],
169
+ "max": [
170
+ 124.08000183105469,
171
+ 102.05000305175781,
172
+ 10.850000381469727,
173
+ 52.0099983215332,
174
+ 109.37000274658203,
175
+ 87.58000183105469,
176
+ 168.66000366210938,
177
+ 100.0,
178
+ 3.8399999141693115,
179
+ 102.01000213623047,
180
+ 119.8499984741211,
181
+ 41.31999969482422,
182
+ 68.77999877929688,
183
+ 65.19999694824219,
184
+ 82.41999816894531,
185
+ 100.0
186
+ ],
187
+ "min": [
188
+ 6.940000057220459,
189
+ -35.029998779296875,
190
+ -169.00999450683594,
191
+ -97.37999725341797,
192
+ -111.45999908447266,
193
+ -62.58000183105469,
194
+ -136.9199981689453,
195
+ 0.0,
196
+ -120.04000091552734,
197
+ 10.140000343322754,
198
+ -3.2699999809265137,
199
+ -97.66000366210938,
200
+ -54.2599983215332,
201
+ -64.5,
202
+ -89.81999969482422,
203
+ 0.0
204
+ ],
205
+ "q01": [
206
+ 11.512500286102295,
207
+ 36.560001373291016,
208
+ -126.5099983215332,
209
+ -96.41000366210938,
210
+ -66.58749771118164,
211
+ -35.62750053405762,
212
+ -81.84500122070312,
213
+ 0.0,
214
+ -109.31999969482422,
215
+ 37.10499858856201,
216
+ 34.77500057220459,
217
+ -96.91999816894531,
218
+ -21.260000228881836,
219
+ -29.440000534057617,
220
+ -45.08750057220459,
221
+ 0.0
222
+ ],
223
+ "q99": [
224
+ 102.95499992370605,
225
+ 102.01000213623047,
226
+ -25.047500610351562,
227
+ 23.78499937057495,
228
+ 56.52000045776367,
229
+ 67.02999877929688,
230
+ 61.95750045776367,
231
+ 100.0,
232
+ -11.315000295639038,
233
+ 102.01000213623047,
234
+ 107.51000213623047,
235
+ 0.6450000107288361,
236
+ 53.470001220703125,
237
+ 49.709999084472656,
238
+ 59.13999938964844,
239
+ 100.0
240
+ ]
241
+ },
242
+ "num_transitions": 74826,
243
+ "num_trajectories": 200
244
+ }
245
+ },
246
+ "n_action_bins": 256,
247
+ "vision_backbone_id": "dinosiglip-vit-so-224px",
248
+ "llm_backbone_id": "llama2-7b-pure",
249
+ "arch_specifier": "no-align+fused-gelu-mlp",
250
+ "output_projector_states": false,
251
+ "use_fused_vision_backbone": true,
252
+ "timm_model_ids": [
253
+ "vit_large_patch14_reg4_dinov2.lvd142m",
254
+ "vit_so400m_patch14_siglip_224"
255
+ ],
256
+ "timm_override_act_layers": [
257
+ null,
258
+ null
259
+ ],
260
+ "image_sizes": [
261
+ 224,
262
+ 224
263
+ ],
264
+ "image_resize_strategy": "resize-naive",
265
+ "hf_llm_id": "meta-llama/Llama-2-7b-hf",
266
+ "llm_max_length": 2048,
267
+ "pad_token_id": 32000,
268
+ "pad_to_multiple_of": 64,
269
+ "text_config": {
270
+ "vocab_size": 32064,
271
+ "max_position_embeddings": 2048,
272
+ "hidden_size": 4096,
273
+ "intermediate_size": 11008,
274
+ "num_hidden_layers": 32,
275
+ "num_attention_heads": 32,
276
+ "num_key_value_heads": 32,
277
+ "hidden_act": "silu",
278
+ "initializer_range": 0.02,
279
+ "rms_norm_eps": 1e-06,
280
+ "pretraining_tp": 1,
281
+ "use_cache": true,
282
+ "rope_theta": 10000.0,
283
+ "rope_scaling": null,
284
+ "attention_bias": false,
285
+ "attention_dropout": 0.0,
286
+ "return_dict": true,
287
+ "output_hidden_states": false,
288
+ "output_attentions": false,
289
+ "torchscript": false,
290
+ "torch_dtype": "bfloat16",
291
+ "use_bfloat16": false,
292
+ "tf_legacy_loss": false,
293
+ "pruned_heads": {},
294
+ "tie_word_embeddings": false,
295
+ "chunk_size_feed_forward": 0,
296
+ "is_encoder_decoder": false,
297
+ "is_decoder": false,
298
+ "cross_attention_hidden_size": null,
299
+ "add_cross_attention": false,
300
+ "tie_encoder_decoder": false,
301
+ "max_length": 20,
302
+ "min_length": 0,
303
+ "do_sample": false,
304
+ "early_stopping": false,
305
+ "num_beams": 1,
306
+ "num_beam_groups": 1,
307
+ "diversity_penalty": 0.0,
308
+ "temperature": 1.0,
309
+ "top_k": 50,
310
+ "top_p": 1.0,
311
+ "typical_p": 1.0,
312
+ "repetition_penalty": 1.0,
313
+ "length_penalty": 1.0,
314
+ "no_repeat_ngram_size": 0,
315
+ "encoder_no_repeat_ngram_size": 0,
316
+ "bad_words_ids": null,
317
+ "num_return_sequences": 1,
318
+ "output_scores": false,
319
+ "return_dict_in_generate": false,
320
+ "forced_bos_token_id": null,
321
+ "forced_eos_token_id": null,
322
+ "remove_invalid_values": false,
323
+ "exponential_decay_length_penalty": null,
324
+ "suppress_tokens": null,
325
+ "begin_suppress_tokens": null,
326
+ "architectures": null,
327
+ "finetuning_task": null,
328
+ "id2label": {
329
+ "0": "LABEL_0",
330
+ "1": "LABEL_1"
331
+ },
332
+ "label2id": {
333
+ "LABEL_0": 0,
334
+ "LABEL_1": 1
335
+ },
336
+ "tokenizer_class": null,
337
+ "prefix": null,
338
+ "bos_token_id": 1,
339
+ "pad_token_id": 32000,
340
+ "eos_token_id": 2,
341
+ "sep_token_id": null,
342
+ "decoder_start_token_id": null,
343
+ "task_specific_params": null,
344
+ "problem_type": null,
345
+ "_name_or_path": "",
346
+ "model_type": "llama"
347
+ },
348
+ "return_dict": true,
349
+ "output_hidden_states": false,
350
+ "output_attentions": false,
351
+ "torchscript": false,
352
+ "torch_dtype": "bfloat16",
353
+ "use_bfloat16": false,
354
+ "tf_legacy_loss": false,
355
+ "pruned_heads": {},
356
+ "tie_word_embeddings": true,
357
+ "chunk_size_feed_forward": 0,
358
+ "is_encoder_decoder": false,
359
+ "is_decoder": false,
360
+ "cross_attention_hidden_size": null,
361
+ "add_cross_attention": false,
362
+ "tie_encoder_decoder": false,
363
+ "max_length": 20,
364
+ "min_length": 0,
365
+ "do_sample": false,
366
+ "early_stopping": false,
367
+ "num_beams": 1,
368
+ "num_beam_groups": 1,
369
+ "diversity_penalty": 0.0,
370
+ "temperature": 1.0,
371
+ "top_k": 50,
372
+ "top_p": 1.0,
373
+ "typical_p": 1.0,
374
+ "repetition_penalty": 1.0,
375
+ "length_penalty": 1.0,
376
+ "no_repeat_ngram_size": 0,
377
+ "encoder_no_repeat_ngram_size": 0,
378
+ "bad_words_ids": null,
379
+ "num_return_sequences": 1,
380
+ "output_scores": false,
381
+ "return_dict_in_generate": false,
382
+ "forced_bos_token_id": null,
383
+ "forced_eos_token_id": null,
384
+ "remove_invalid_values": false,
385
+ "exponential_decay_length_penalty": null,
386
+ "suppress_tokens": null,
387
+ "begin_suppress_tokens": null,
388
+ "architectures": [
389
+ "OpenVLAForActionPrediction"
390
+ ],
391
+ "finetuning_task": null,
392
+ "id2label": {
393
+ "0": "LABEL_0",
394
+ "1": "LABEL_1"
395
+ },
396
+ "label2id": {
397
+ "LABEL_0": 0,
398
+ "LABEL_1": 1
399
+ },
400
+ "tokenizer_class": null,
401
+ "prefix": null,
402
+ "bos_token_id": null,
403
+ "eos_token_id": null,
404
+ "sep_token_id": null,
405
+ "decoder_start_token_id": null,
406
+ "task_specific_params": null,
407
+ "problem_type": null,
408
+ "_name_or_path": "/home/guangyu/.cache/huggingface/hub/models--openvla--openvla-7b/snapshots/31f090d05236101ebfc381b61c674dd4746d4ce0",
409
+ "transformers_version": "4.40.1",
410
+ "auto_map": {
411
+ "AutoConfig": "configuration_prismatic.OpenVLAConfig",
412
+ "AutoModelForVision2Seq": "modeling_prismatic.OpenVLAForActionPrediction"
413
+ },
414
+ "model_type": "openvla"
415
+ }
dataset_statistics.json ADDED
@@ -0,0 +1,244 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "gen72_grasp_stacking_baskets": {
3
+ "action": {
4
+ "mean": [
5
+ 71.42466735839844,
6
+ 84.38833618164062,
7
+ -82.9383316040039,
8
+ -85.52547454833984,
9
+ -4.3938679695129395,
10
+ 19.08216094970703,
11
+ 0.308868408203125,
12
+ 67.15239715576172,
13
+ -80.31741333007812,
14
+ 84.10924530029297,
15
+ 86.23428344726562,
16
+ -86.4471206665039,
17
+ 3.9559240341186523,
18
+ 9.594415664672852,
19
+ 4.726510524749756,
20
+ 82.21428680419922
21
+ ],
22
+ "std": [
23
+ 25.26982879638672,
24
+ 15.808586120605469,
25
+ 20.259897232055664,
26
+ 21.52882957458496,
27
+ 20.819337844848633,
28
+ 20.531246185302734,
29
+ 26.965940475463867,
30
+ 45.217315673828125,
31
+ 27.104999542236328,
32
+ 12.850805282592773,
33
+ 14.046908378601074,
34
+ 17.79497718811035,
35
+ 13.857259750366211,
36
+ 17.137197494506836,
37
+ 17.71614646911621,
38
+ 36.83033752441406
39
+ ],
40
+ "max": [
41
+ 124.62999725341797,
42
+ 102.0,
43
+ 12.920000076293945,
44
+ 52.0,
45
+ 119.18000030517578,
46
+ 89.77999877929688,
47
+ 169.0,
48
+ 100.0,
49
+ 3.9600000381469727,
50
+ 102.0,
51
+ 123.30999755859375,
52
+ 52.0,
53
+ 73.56500244140625,
54
+ 66.08999633789062,
55
+ 86.5250015258789,
56
+ 100.0
57
+ ],
58
+ "min": [
59
+ 5.84499979019165,
60
+ -38.275001525878906,
61
+ -169.0,
62
+ -97.47000122070312,
63
+ -113.19999694824219,
64
+ -65.30000305175781,
65
+ -139.75,
66
+ 0.0,
67
+ -121.68499755859375,
68
+ 5.84499979019165,
69
+ -8.654999732971191,
70
+ -98.16999816894531,
71
+ -55.459999084472656,
72
+ -78.44499969482422,
73
+ -92.29000091552734,
74
+ 0.0
75
+ ],
76
+ "q01": [
77
+ 11.34000015258789,
78
+ 35.595001220703125,
79
+ -127.60875129699707,
80
+ -96.5,
81
+ -68.33499908447266,
82
+ -37.86875057220459,
83
+ -82.83499908447266,
84
+ 0.0,
85
+ -109.64374732971191,
86
+ 33.56999969482422,
87
+ 33.582499504089355,
88
+ -96.94000244140625,
89
+ -22.323750495910645,
90
+ -32.78499984741211,
91
+ -46.05500030517578,
92
+ 0.0
93
+ ],
94
+ "q99": [
95
+ 103.84500122070312,
96
+ 102.0,
97
+ -23.510000228881836,
98
+ 24.047500133514404,
99
+ 58.91999912261963,
100
+ 69.83000183105469,
101
+ 63.060001373291016,
102
+ 100.0,
103
+ -10.866249561309814,
104
+ 102.0,
105
+ 108.41500091552734,
106
+ 2.14000004529953,
107
+ 54.625,
108
+ 51.900001525878906,
109
+ 61.040000915527344,
110
+ 100.0
111
+ ],
112
+ "mask": [
113
+ true,
114
+ true,
115
+ true,
116
+ true,
117
+ true,
118
+ true,
119
+ true,
120
+ true,
121
+ true,
122
+ true,
123
+ true,
124
+ true,
125
+ true,
126
+ true,
127
+ true,
128
+ true
129
+ ]
130
+ },
131
+ "proprio": {
132
+ "mean": [
133
+ 71.39665222167969,
134
+ 84.42801666259766,
135
+ -82.94181823730469,
136
+ -85.53479766845703,
137
+ -4.377756118774414,
138
+ 19.067413330078125,
139
+ 0.31575945019721985,
140
+ 67.15303802490234,
141
+ -80.27192687988281,
142
+ 84.11940002441406,
143
+ 86.19857788085938,
144
+ -86.43647766113281,
145
+ 3.944101572036743,
146
+ 9.555686950683594,
147
+ 4.720884323120117,
148
+ 82.21434783935547
149
+ ],
150
+ "std": [
151
+ 24.659215927124023,
152
+ 15.453649520874023,
153
+ 19.847105026245117,
154
+ 21.23726463317871,
155
+ 20.293472290039062,
156
+ 19.495216369628906,
157
+ 26.316822052001953,
158
+ 45.21790313720703,
159
+ 26.583589553833008,
160
+ 12.41045093536377,
161
+ 13.648248672485352,
162
+ 17.389333724975586,
163
+ 13.373165130615234,
164
+ 16.093711853027344,
165
+ 17.135786056518555,
166
+ 36.83036804199219
167
+ ],
168
+ "max": [
169
+ 124.08000183105469,
170
+ 102.05000305175781,
171
+ 10.850000381469727,
172
+ 52.0099983215332,
173
+ 109.37000274658203,
174
+ 87.58000183105469,
175
+ 168.66000366210938,
176
+ 100.0,
177
+ 3.8399999141693115,
178
+ 102.01000213623047,
179
+ 119.8499984741211,
180
+ 41.31999969482422,
181
+ 68.77999877929688,
182
+ 65.19999694824219,
183
+ 82.41999816894531,
184
+ 100.0
185
+ ],
186
+ "min": [
187
+ 6.940000057220459,
188
+ -35.029998779296875,
189
+ -169.00999450683594,
190
+ -97.37999725341797,
191
+ -111.45999908447266,
192
+ -62.58000183105469,
193
+ -136.9199981689453,
194
+ 0.0,
195
+ -120.04000091552734,
196
+ 10.140000343322754,
197
+ -3.2699999809265137,
198
+ -97.66000366210938,
199
+ -54.2599983215332,
200
+ -64.5,
201
+ -89.81999969482422,
202
+ 0.0
203
+ ],
204
+ "q01": [
205
+ 11.512500286102295,
206
+ 36.560001373291016,
207
+ -126.5099983215332,
208
+ -96.41000366210938,
209
+ -66.58749771118164,
210
+ -35.62750053405762,
211
+ -81.84500122070312,
212
+ 0.0,
213
+ -109.31999969482422,
214
+ 37.10499858856201,
215
+ 34.77500057220459,
216
+ -96.91999816894531,
217
+ -21.260000228881836,
218
+ -29.440000534057617,
219
+ -45.08750057220459,
220
+ 0.0
221
+ ],
222
+ "q99": [
223
+ 102.95499992370605,
224
+ 102.01000213623047,
225
+ -25.047500610351562,
226
+ 23.78499937057495,
227
+ 56.52000045776367,
228
+ 67.02999877929688,
229
+ 61.95750045776367,
230
+ 100.0,
231
+ -11.315000295639038,
232
+ 102.01000213623047,
233
+ 107.51000213623047,
234
+ 0.6450000107288361,
235
+ 53.470001220703125,
236
+ 49.709999084472656,
237
+ 59.13999938964844,
238
+ 100.0
239
+ ]
240
+ },
241
+ "num_transitions": 74826,
242
+ "num_trajectories": 200
243
+ }
244
+ }
lora_adapter/README.md ADDED
@@ -0,0 +1,202 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: /home/guangyu/.cache/huggingface/hub/models--openvla--openvla-7b/snapshots/31f090d05236101ebfc381b61c674dd4746d4ce0
3
+ library_name: peft
4
+ ---
5
+
6
+ # Model Card for Model ID
7
+
8
+ <!-- Provide a quick summary of what the model is/does. -->
9
+
10
+
11
+
12
+ ## Model Details
13
+
14
+ ### Model Description
15
+
16
+ <!-- Provide a longer summary of what this model is. -->
17
+
18
+
19
+
20
+ - **Developed by:** [More Information Needed]
21
+ - **Funded by [optional]:** [More Information Needed]
22
+ - **Shared by [optional]:** [More Information Needed]
23
+ - **Model type:** [More Information Needed]
24
+ - **Language(s) (NLP):** [More Information Needed]
25
+ - **License:** [More Information Needed]
26
+ - **Finetuned from model [optional]:** [More Information Needed]
27
+
28
+ ### Model Sources [optional]
29
+
30
+ <!-- Provide the basic links for the model. -->
31
+
32
+ - **Repository:** [More Information Needed]
33
+ - **Paper [optional]:** [More Information Needed]
34
+ - **Demo [optional]:** [More Information Needed]
35
+
36
+ ## Uses
37
+
38
+ <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
39
+
40
+ ### Direct Use
41
+
42
+ <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
43
+
44
+ [More Information Needed]
45
+
46
+ ### Downstream Use [optional]
47
+
48
+ <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
49
+
50
+ [More Information Needed]
51
+
52
+ ### Out-of-Scope Use
53
+
54
+ <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
55
+
56
+ [More Information Needed]
57
+
58
+ ## Bias, Risks, and Limitations
59
+
60
+ <!-- This section is meant to convey both technical and sociotechnical limitations. -->
61
+
62
+ [More Information Needed]
63
+
64
+ ### Recommendations
65
+
66
+ <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
67
+
68
+ Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
69
+
70
+ ## How to Get Started with the Model
71
+
72
+ Use the code below to get started with the model.
73
+
74
+ [More Information Needed]
75
+
76
+ ## Training Details
77
+
78
+ ### Training Data
79
+
80
+ <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
81
+
82
+ [More Information Needed]
83
+
84
+ ### Training Procedure
85
+
86
+ <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
87
+
88
+ #### Preprocessing [optional]
89
+
90
+ [More Information Needed]
91
+
92
+
93
+ #### Training Hyperparameters
94
+
95
+ - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
96
+
97
+ #### Speeds, Sizes, Times [optional]
98
+
99
+ <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
100
+
101
+ [More Information Needed]
102
+
103
+ ## Evaluation
104
+
105
+ <!-- This section describes the evaluation protocols and provides the results. -->
106
+
107
+ ### Testing Data, Factors & Metrics
108
+
109
+ #### Testing Data
110
+
111
+ <!-- This should link to a Dataset Card if possible. -->
112
+
113
+ [More Information Needed]
114
+
115
+ #### Factors
116
+
117
+ <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
118
+
119
+ [More Information Needed]
120
+
121
+ #### Metrics
122
+
123
+ <!-- These are the evaluation metrics being used, ideally with a description of why. -->
124
+
125
+ [More Information Needed]
126
+
127
+ ### Results
128
+
129
+ [More Information Needed]
130
+
131
+ #### Summary
132
+
133
+
134
+
135
+ ## Model Examination [optional]
136
+
137
+ <!-- Relevant interpretability work for the model goes here -->
138
+
139
+ [More Information Needed]
140
+
141
+ ## Environmental Impact
142
+
143
+ <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
144
+
145
+ Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
146
+
147
+ - **Hardware Type:** [More Information Needed]
148
+ - **Hours used:** [More Information Needed]
149
+ - **Cloud Provider:** [More Information Needed]
150
+ - **Compute Region:** [More Information Needed]
151
+ - **Carbon Emitted:** [More Information Needed]
152
+
153
+ ## Technical Specifications [optional]
154
+
155
+ ### Model Architecture and Objective
156
+
157
+ [More Information Needed]
158
+
159
+ ### Compute Infrastructure
160
+
161
+ [More Information Needed]
162
+
163
+ #### Hardware
164
+
165
+ [More Information Needed]
166
+
167
+ #### Software
168
+
169
+ [More Information Needed]
170
+
171
+ ## Citation [optional]
172
+
173
+ <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
174
+
175
+ **BibTeX:**
176
+
177
+ [More Information Needed]
178
+
179
+ **APA:**
180
+
181
+ [More Information Needed]
182
+
183
+ ## Glossary [optional]
184
+
185
+ <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
186
+
187
+ [More Information Needed]
188
+
189
+ ## More Information [optional]
190
+
191
+ [More Information Needed]
192
+
193
+ ## Model Card Authors [optional]
194
+
195
+ [More Information Needed]
196
+
197
+ ## Model Card Contact
198
+
199
+ [More Information Needed]
200
+ ### Framework versions
201
+
202
+ - PEFT 0.11.1
lora_adapter/adapter_config.json ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": {
4
+ "base_model_class": "OpenVLAForActionPrediction",
5
+ "parent_library": "transformers_modules.31f090d05236101ebfc381b61c674dd4746d4ce0.modeling_prismatic"
6
+ },
7
+ "base_model_name_or_path": "/home/guangyu/.cache/huggingface/hub/models--openvla--openvla-7b/snapshots/31f090d05236101ebfc381b61c674dd4746d4ce0",
8
+ "bias": "none",
9
+ "fan_in_fan_out": false,
10
+ "inference_mode": true,
11
+ "init_lora_weights": "gaussian",
12
+ "layer_replication": null,
13
+ "layers_pattern": null,
14
+ "layers_to_transform": null,
15
+ "loftq_config": {},
16
+ "lora_alpha": 16,
17
+ "lora_dropout": 0.0,
18
+ "megatron_config": null,
19
+ "megatron_core": "megatron.core",
20
+ "modules_to_save": null,
21
+ "peft_type": "LORA",
22
+ "r": 32,
23
+ "rank_pattern": {},
24
+ "revision": null,
25
+ "target_modules": [
26
+ "k_proj",
27
+ "qkv",
28
+ "fc1",
29
+ "up_proj",
30
+ "q_proj",
31
+ "lm_head",
32
+ "fc2",
33
+ "o_proj",
34
+ "down_proj",
35
+ "kv",
36
+ "proj",
37
+ "gate_proj",
38
+ "v_proj",
39
+ "q",
40
+ "fc3"
41
+ ],
42
+ "task_type": null,
43
+ "use_dora": false,
44
+ "use_rslora": false
45
+ }
lora_adapter/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d10387bc396d7897bdb06b0bd45ee62d2d7204cc122e51e1271b18eefa55e1aa
3
+ size 484467800
preprocessor_config.json ADDED
@@ -0,0 +1,114 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "auto_map": {
3
+ "AutoImageProcessor": "processing_prismatic.PrismaticImageProcessor",
4
+ "AutoProcessor": "processing_prismatic.PrismaticProcessor"
5
+ },
6
+ "image_processor_type": "PrismaticImageProcessor",
7
+ "image_resize_strategy": "resize-naive",
8
+ "input_sizes": [
9
+ [
10
+ 3,
11
+ 224,
12
+ 224
13
+ ],
14
+ [
15
+ 3,
16
+ 224,
17
+ 224
18
+ ]
19
+ ],
20
+ "interpolations": [
21
+ "bicubic",
22
+ "bicubic"
23
+ ],
24
+ "means": [
25
+ [
26
+ 0.485,
27
+ 0.456,
28
+ 0.406
29
+ ],
30
+ [
31
+ 0.5,
32
+ 0.5,
33
+ 0.5
34
+ ]
35
+ ],
36
+ "processor_class": "PrismaticProcessor",
37
+ "stds": [
38
+ [
39
+ 0.229,
40
+ 0.224,
41
+ 0.225
42
+ ],
43
+ [
44
+ 0.5,
45
+ 0.5,
46
+ 0.5
47
+ ]
48
+ ],
49
+ "tvf_crop_params": [
50
+ {
51
+ "output_size": [
52
+ 224,
53
+ 224
54
+ ]
55
+ },
56
+ {
57
+ "output_size": [
58
+ 224,
59
+ 224
60
+ ]
61
+ }
62
+ ],
63
+ "tvf_do_letterbox": false,
64
+ "tvf_letterbox_fill": null,
65
+ "tvf_normalize_params": [
66
+ {
67
+ "inplace": false,
68
+ "mean": [
69
+ 0.484375,
70
+ 0.455078125,
71
+ 0.40625
72
+ ],
73
+ "std": [
74
+ 0.228515625,
75
+ 0.2236328125,
76
+ 0.224609375
77
+ ]
78
+ },
79
+ {
80
+ "inplace": false,
81
+ "mean": [
82
+ 0.5,
83
+ 0.5,
84
+ 0.5
85
+ ],
86
+ "std": [
87
+ 0.5,
88
+ 0.5,
89
+ 0.5
90
+ ]
91
+ }
92
+ ],
93
+ "tvf_resize_params": [
94
+ {
95
+ "antialias": true,
96
+ "interpolation": 3,
97
+ "max_size": null,
98
+ "size": [
99
+ 224,
100
+ 224
101
+ ]
102
+ },
103
+ {
104
+ "antialias": true,
105
+ "interpolation": 3,
106
+ "max_size": null,
107
+ "size": [
108
+ 224,
109
+ 224
110
+ ]
111
+ }
112
+ ],
113
+ "use_fused_vision_backbone": true
114
+ }
processing_prismatic.py ADDED
@@ -0,0 +1,257 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ processing_prismatic.py
3
+
4
+ HuggingFace-style preprocessor definitions for Prismatic VLMs, inheriting from `ProcessorMixin`. Default configuration
5
+ specifies `siglip-224px+7b`.
6
+ """
7
+
8
+ from typing import Any, ClassVar, List, Optional, Tuple, Union
9
+
10
+ import timm.data
11
+ import torch
12
+ import torchvision.transforms.functional as TVF
13
+ from PIL import Image
14
+ from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
15
+ from transformers import PreTrainedTokenizerBase
16
+ from transformers.image_processing_utils import BatchFeature, ImageProcessingMixin
17
+ from transformers.processing_utils import ProcessorMixin
18
+ from transformers.tokenization_utils import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
19
+ from transformers.utils import TensorType
20
+
21
+
22
+ # === Image Processing ===
23
+ def letterbox_pad_transform(image: Image.Image, padding_fill_value: Tuple[int, int, int]) -> Image.Image:
24
+ """Given a PIL.Image, pad to square by adding a symmetric border around the height/width."""
25
+ (w, h), max_wh = image.size, max(image.size)
26
+ horizontal_pad, vertical_pad = int((max_wh - w) / 2), int((max_wh - h) / 2)
27
+ padding = (horizontal_pad, vertical_pad, horizontal_pad, vertical_pad)
28
+
29
+ return TVF.pad(image, padding, fill=padding_fill_value, padding_mode="constant")
30
+
31
+
32
+ class PrismaticImageProcessor(ImageProcessingMixin):
33
+ model_input_names: ClassVar[List[str]] = ["pixel_values"]
34
+
35
+ def __init__(
36
+ self,
37
+ use_fused_vision_backbone: bool = False,
38
+ image_resize_strategy: str = "letterbox",
39
+ input_sizes: Optional[List[Tuple[int, int, int]]] = None,
40
+ interpolations: Optional[List[str]] = None,
41
+ means: Optional[List[Tuple[float, float, float]]] = None,
42
+ stds: Optional[List[Tuple[float, float, float]]] = None,
43
+ **kwargs: str,
44
+ ) -> None:
45
+ """
46
+ Initialize a PrismaticImageProcessor as a wrapper around a torchvision transform; this transform will be
47
+ created by TIMM, and edited to follow our custom `image_resize_strategy` logic.
48
+
49
+ @param use_fused_vision_backbone: Boolean indicating single or fused (dual) vision backbone
50
+ @param image_resize_strategy: Prismatic image resize strategy in < resize-naive | resize-crop | letterbox >
51
+ @param input_size: [TIMM :: `data_cfg`] Input image size as tuple (channels, width, height)
52
+ @param interpolation: [TIMM :: `data_cfg`] Interpolation as string (default: "bicubic")
53
+ @param mean: [TIMM :: `data_cfg`] Normalization mean as float tuple (or two-tuple if `fused_backbone`)
54
+ @param std: [TIMM :: `data_cfg`] Normalization std as float tuple (or two-tuple if `fused_backbone`)
55
+ """
56
+ self.use_fused_vision_backbone = use_fused_vision_backbone
57
+ self.image_resize_strategy = image_resize_strategy
58
+
59
+ # Handle `None` default values
60
+ input_sizes = [(3, 224, 224)] if input_sizes is None else input_sizes
61
+ means = [(0.5, 0.5, 0.5)] if means is None else means
62
+ stds = [(0.5, 0.5, 0.5)] if stds is None else stds
63
+
64
+ # TIMM `data_cfg` Parameters
65
+ self.input_sizes, self.interpolations, self.means, self.stds = input_sizes, interpolations, means, stds
66
+
67
+ # Grab torchvision transforms via TIMM =>> need to parse for specific "functional" transform values!
68
+ self.tvf_resize_params, self.tvf_crop_params, self.tvf_normalize_params = [], [], []
69
+ self.tvf_do_letterbox, self.tvf_letterbox_fill = False, None
70
+
71
+ for idx in range(len(input_sizes)):
72
+ transform = timm.data.create_transform(
73
+ input_size=self.input_sizes[idx],
74
+ interpolation=self.interpolations[idx],
75
+ mean=self.means[idx],
76
+ std=self.stds[idx],
77
+ crop_pct=1.0, # Set to 1.0 to ignore cropping (initial Resize sets `input_size`)
78
+ crop_mode="center", # Default crop mode -- no-op when `crop_pct == 1.0`
79
+ is_training=False, # No image augmentations when loading the transform!
80
+ )
81
+
82
+ # [Validation] Ensure appropriate transform structure, expected sizes
83
+ if not (
84
+ isinstance(transform, Compose)
85
+ and (len(transform.transforms) == 4)
86
+ and isinstance(transform.transforms[0], Resize)
87
+ and isinstance(transform.transforms[1], CenterCrop)
88
+ and isinstance(transform.transforms[2], ToTensor)
89
+ and isinstance(transform.transforms[3], Normalize)
90
+ and (transform.transforms[0].size == self.input_sizes[idx][-1])
91
+ and (transform.transforms[1].size == self.input_sizes[idx][-2:])
92
+ ):
93
+ raise ValueError(f"Unexpected TIMM image transformation structure/sizes: `{transform}`")
94
+
95
+ # HF Image Processors *must* be JSON-serializable; as such, cannot have torchvision. as an attribute.
96
+ # => Instead, we're going to parse the transform and call "torchvision.transforms.functional" (`tvf`)
97
+ resize_t, crop_t, norm_t = transform.transforms[0], transform.transforms[1], transform.transforms[3]
98
+ self.tvf_resize_params.append(
99
+ {
100
+ "size": resize_t.size,
101
+ "interpolation": TVF.pil_modes_mapping[resize_t.interpolation],
102
+ "max_size": None,
103
+ "antialias": True,
104
+ }
105
+ )
106
+ self.tvf_crop_params.append({"output_size": crop_t.size})
107
+ self.tvf_normalize_params.append(
108
+ {
109
+ "mean": norm_t.mean.float().numpy().tolist(),
110
+ "std": norm_t.std.float().numpy().tolist(),
111
+ "inplace": False,
112
+ }
113
+ )
114
+ self.tvf_do_letterbox, self.tvf_letterbox_fill = False, None
115
+
116
+ # Handle Prismatic `image_resize_strategy`
117
+ if self.image_resize_strategy == "resize-naive":
118
+ self.tvf_resize_params[idx]["size"] = (resize_t.size, resize_t.size)
119
+ elif self.image_resize_strategy == "letterbox":
120
+ self.tvf_do_letterbox, self.tvf_letterbox_fill = True, tuple([int(x * 255) for x in self.means[idx]])
121
+ elif self.image_resize_strategy == "resize-crop":
122
+ pass
123
+ else:
124
+ raise ValueError(f"Image resize strategy `{self.image_resize_strategy}` is not supported!")
125
+
126
+ # Dispatch **kwargs to super()
127
+ super().__init__(**kwargs)
128
+
129
+ def apply_transform(self, img: Image.Image) -> torch.Tensor:
130
+ """Apply `functional` variant of TIMM's Transform = Compose([Resize -> CenterCrop -> ToTensor -> Normalize])"""
131
+ if self.tvf_do_letterbox:
132
+ img = letterbox_pad_transform(img, self.tvf_letterbox_fill)
133
+
134
+ # [Contract] Fused Backbones expect "channel-stacked" inputs; we'll unpack on the model side!
135
+ imgs_t = []
136
+ for idx in range(len(self.input_sizes)):
137
+ img_idx = TVF.resize(img, **self.tvf_resize_params[idx])
138
+ img_idx = TVF.center_crop(img_idx, **self.tvf_crop_params[idx])
139
+ img_idx_t = TVF.to_tensor(img_idx)
140
+ img_idx_t = TVF.normalize(img_idx_t, **self.tvf_normalize_params[idx])
141
+ imgs_t.append(img_idx_t)
142
+
143
+ # [Contract] `imgs_t` is a list of Tensors of shape [3, input_size, input_size]; stack along dim = 0
144
+ img_t = torch.vstack(imgs_t)
145
+
146
+ return img_t
147
+
148
+ def preprocess(
149
+ self,
150
+ images: Union[Image.Image, List[Image.Image]],
151
+ return_tensors: Optional[Union[str, TensorType]] = None,
152
+ **_: str,
153
+ ) -> BatchFeature:
154
+ """
155
+ Preprocess an image (or batch of images); note that unlike the `transformers :: BaseImageProcessor` we
156
+ explicitly only handle PIL.Image.Image instances for simplicity.
157
+
158
+ @param images: A (batch of) PIL.Image.Image instance(s) to preprocess.
159
+ @param return_tensors: BatchFeature default Tensor format (e.g., "pt" for torch); if None, returns np.ndarray
160
+
161
+ @return: Instance of `transformers :: BatchFeature` with a single key "pixel_values"
162
+ """
163
+ if not isinstance(images, list):
164
+ images = [images]
165
+
166
+ # Apply `self.img_transform` to each image (will return list of torch.Tensors); stack into "batched" Tensor
167
+ pixel_values = torch.stack([self.apply_transform(img.convert("RGB")) for img in images])
168
+
169
+ # Return BatchFeature =>> note that for compatibility, constructor expects Dict[str, np.ndarray], so we convert
170
+ return BatchFeature(data={"pixel_values": pixel_values.float().numpy()}, tensor_type=return_tensors)
171
+
172
+ def __call__(self, images: Union[Image.Image, List[Image.Image]], **kwargs) -> BatchFeature:
173
+ return self.preprocess(images, **kwargs)
174
+
175
+
176
+ # === PrismaticProcessor =>> Wraps both ImageProcessor and Tokenizer ===
177
+ # =>> https://github.com/huggingface/transformers/blob/main/src/transformers/models/llava/processing_llava.py
178
+ class PrismaticProcessor(ProcessorMixin):
179
+ attributes: ClassVar[List[str]] = ["image_processor", "tokenizer"]
180
+ image_processor_class: str = "AutoImageProcessor"
181
+ tokenizer_class: str = "AutoTokenizer"
182
+
183
+ def __init__(
184
+ self,
185
+ image_processor: Optional[ImageProcessingMixin] = None,
186
+ tokenizer: Optional[PreTrainedTokenizerBase] = None,
187
+ ) -> None:
188
+ super().__init__(image_processor, tokenizer)
189
+
190
+ def __call__(
191
+ self,
192
+ text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]],
193
+ images: Union[Image.Image, List[Image.Image]],
194
+ padding: Union[bool, str, PaddingStrategy] = False,
195
+ truncation: Optional[Union[bool, str, TruncationStrategy]] = None,
196
+ max_length: Optional[int] = None,
197
+ return_tensors: Optional[Union[str, TensorType]] = TensorType.PYTORCH,
198
+ ) -> BatchFeature:
199
+ """
200
+ Preprocess a given (batch) of text/images for a Prismatic VLM; forwards text to the underlying LLM's tokenizer,
201
+ forwards images to PrismaticImageProcessor.
202
+
203
+ @param text: The (batch) of text to encode; must be a string or list of strings.
204
+ @param images: A (batch of) PIL.Image.Image instance(s) to preprocess.
205
+ @param padding: Sequence padding strategy (if multiple specified) in < True = "longest" | "max_length" | False >
206
+ @param truncation: Truncation strategy for the output sequences; requires `max_length` to be specified
207
+ @param max_length: Maximum length (in tokens) to truncate
208
+ @param return_tensors: Type of return tensors (usually "pt" or TensorType.PYTORCH)
209
+
210
+ @return: BatchFeature with keys for `input_ids`, `attention_mask` and `pixel_values`.
211
+ """
212
+ pixel_values = self.image_processor(images, return_tensors=return_tensors)["pixel_values"]
213
+ text_inputs = self.tokenizer(
214
+ text, return_tensors=return_tensors, padding=padding, truncation=truncation, max_length=max_length
215
+ )
216
+
217
+ # [Validate] Need same number of images and text inputs!
218
+ if pixel_values.shape[0] != text_inputs.input_ids.shape[0]:
219
+ raise ValueError("Batch is malformed; expected same number of images and text inputs!")
220
+
221
+ return BatchFeature(data={**text_inputs, "pixel_values": pixel_values})
222
+
223
+ # === Tokenizer Dispatch Utilities =>> check `PreTrainedTokenizerBase` for documentation ===
224
+ def batch_decode(
225
+ self,
226
+ sequences: Union[List[int], List[List[int]], torch.Tensor, Any], # `Any` = np.ndarray | tf.Tensor
227
+ skip_special_tokens: bool = False,
228
+ clean_up_tokenization_spaces: Optional[bool] = None,
229
+ **kwargs: str,
230
+ ) -> List[str]:
231
+ return self.tokenizer.batch_decode(
232
+ sequences=sequences,
233
+ skip_special_tokens=skip_special_tokens,
234
+ clean_up_tokenization_spaces=clean_up_tokenization_spaces,
235
+ **kwargs,
236
+ )
237
+
238
+ def decode(
239
+ self,
240
+ token_ids: Union[int, List[int], torch.Tensor, Any], # `Any` = np.ndarray | tf.Tensor
241
+ skip_special_tokens: bool = False,
242
+ clean_up_tokenization_spaces: Optional[bool] = None,
243
+ **kwargs: str,
244
+ ) -> str:
245
+ return self.tokenizer.decode(
246
+ token_ids=token_ids,
247
+ skip_special_tokens=skip_special_tokens,
248
+ clean_up_tokenization_spaces=clean_up_tokenization_spaces,
249
+ **kwargs,
250
+ )
251
+
252
+ @property
253
+ def model_input_names(self) -> List[str]:
254
+ tokenizer_input_names = self.tokenizer.model_input_names
255
+ image_processor_input_names = self.image_processor.model_input_names
256
+
257
+ return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
processor_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "auto_map": {
3
+ "AutoProcessor": "processing_prismatic.PrismaticProcessor"
4
+ },
5
+ "processor_class": "PrismaticProcessor"
6
+ }
proprio_projector--10000_checkpoint.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:99a7ecce003d63423fecdf228c69c048975400245c462cc0928d9eee69f1508a
3
+ size 67406320
special_tokens_map.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "</s>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "<PAD>",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "unk_token": {
24
+ "content": "<unk>",
25
+ "lstrip": false,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ }
30
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
3
+ size 499723
tokenizer_config.json ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
+ "added_tokens_decoder": {
5
+ "0": {
6
+ "content": "<unk>",
7
+ "lstrip": false,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "1": {
14
+ "content": "<s>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "2": {
22
+ "content": "</s>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": true
28
+ },
29
+ "32000": {
30
+ "content": "<PAD>",
31
+ "lstrip": false,
32
+ "normalized": false,
33
+ "rstrip": false,
34
+ "single_word": false,
35
+ "special": true
36
+ }
37
+ },
38
+ "auto_map": {
39
+ "AutoProcessor": "processing_prismatic.PrismaticProcessor"
40
+ },
41
+ "bos_token": "<s>",
42
+ "clean_up_tokenization_spaces": false,
43
+ "eos_token": "</s>",
44
+ "legacy": false,
45
+ "model_max_length": 2048,
46
+ "pad_token": "<PAD>",
47
+ "padding_side": "right",
48
+ "processor_class": "PrismaticProcessor",
49
+ "sp_model_kwargs": {},
50
+ "tokenizer_class": "LlamaTokenizer",
51
+ "unk_token": "<unk>",
52
+ "use_default_system_prompt": false
53
+ }
vision_backbone--10000_checkpoint.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bcf3f0c6c17b8d55ea97db293e408131d043acc1594bb7700ca2179f1411194c
3
+ size 3344957817