juyil's picture
Initial model upload
be71b3d verified
{
"norm_stats": {
"libero_spatial_no_noops": {
"action": {
"mean": [
0.15312480926513672,
0.13707278668880463,
-0.1552678942680359,
-0.005176451988518238,
-0.011208761483430862,
-0.020194293931126595,
0.4578818082809448
],
"std": [
0.4127274453639984,
0.34724298119544983,
0.5086919665336609,
0.03726602718234062,
0.07244444638490677,
0.05762366205453873,
0.4982787072658539
],
"max": [
0.9375,
0.9375,
0.9375,
0.1971428543329239,
0.33642858266830444,
0.375,
1.0
],
"min": [
-0.9375,
-0.9375,
-0.9375,
-0.1875,
-0.3675000071525574,
-0.36000001430511475,
0.0
],
"q01": [
-0.7454732114076613,
-0.6616071462631226,
-0.9375,
-0.1071428582072258,
-0.20678570866584778,
-0.1842857152223587,
0.0
],
"q99": [
0.9375,
0.8758928775787354,
0.9321428537368774,
0.1039285734295845,
0.17678570747375488,
0.14571428298950195,
1.0
],
"mask": [
true,
true,
true,
true,
true,
true,
false
]
},
"proprio": {
"mean": [
-0.024462509900331497,
0.10652972012758255,
1.0580466985702515,
3.0628459453582764,
-0.10464027523994446,
0.08307304978370667,
0.01995459757745266,
-0.020162809640169144
],
"std": [
0.11014781892299652,
0.13784685730934143,
0.1044282391667366,
0.10451057553291321,
0.4112096130847931,
0.21766896545886993,
0.017260905355215073,
0.01711163856089115
],
"max": [
0.1759040206670761,
0.3904820382595062,
1.3290715217590332,
3.4566118717193604,
1.2268599271774292,
1.0429412126541138,
0.041053611785173416,
0.000775813648942858
],
"min": [
-0.3095473051071167,
-0.29250794649124146,
0.9095591306686401,
2.497488260269165,
-1.8006486892700195,
-0.7207611203193665,
-0.0004703797458205372,
-0.041536275297403336
],
"q01": [
-0.2727657300233841,
-0.23721413239836692,
0.9160063165426254,
2.77949666261673,
-1.3187511622905732,
-0.41989982962608335,
0.001503719249740243,
-0.03989770736545324
],
"q99": [
0.13529365032911292,
0.3629165390133857,
1.2862326657772063,
3.2829698753356933,
0.9332760351896285,
0.6325724506378171,
0.039933966137468815,
-0.001671919699292631
]
},
"num_transitions": 52970,
"num_trajectories": 432
}
},
"n_action_bins": 256,
"vision_backbone_id": "dinosiglip-vit-so-224px",
"llm_backbone_id": "llama2-7b-pure",
"arch_specifier": "no-align+fused-gelu-mlp",
"output_projector_states": false,
"use_fused_vision_backbone": true,
"timm_model_ids": [
"vit_large_patch14_reg4_dinov2.lvd142m",
"vit_so400m_patch14_siglip_224"
],
"timm_override_act_layers": [
null,
null
],
"image_sizes": [
224,
224
],
"image_resize_strategy": "resize-naive",
"hf_llm_id": "meta-llama/Llama-2-7b-hf",
"llm_max_length": 2048,
"pad_token_id": 32000,
"pad_to_multiple_of": 64,
"text_config": {
"vocab_size": 32064,
"max_position_embeddings": 2048,
"hidden_size": 4096,
"intermediate_size": 11008,
"num_hidden_layers": 32,
"num_attention_heads": 32,
"num_key_value_heads": 32,
"hidden_act": "silu",
"initializer_range": 0.02,
"rms_norm_eps": 1e-06,
"pretraining_tp": 1,
"use_cache": true,
"rope_theta": 10000.0,
"rope_scaling": null,
"attention_bias": false,
"attention_dropout": 0.0,
"mlp_bias": false,
"head_dim": 128,
"return_dict": true,
"output_hidden_states": false,
"output_attentions": false,
"torchscript": false,
"torch_dtype": "bfloat16",
"use_bfloat16": false,
"tf_legacy_loss": false,
"pruned_heads": {},
"tie_word_embeddings": false,
"chunk_size_feed_forward": 0,
"is_encoder_decoder": false,
"is_decoder": false,
"cross_attention_hidden_size": null,
"add_cross_attention": false,
"tie_encoder_decoder": false,
"max_length": 20,
"min_length": 0,
"do_sample": false,
"early_stopping": false,
"num_beams": 1,
"num_beam_groups": 1,
"diversity_penalty": 0.0,
"temperature": 1.0,
"top_k": 50,
"top_p": 1.0,
"typical_p": 1.0,
"repetition_penalty": 1.0,
"length_penalty": 1.0,
"no_repeat_ngram_size": 0,
"encoder_no_repeat_ngram_size": 0,
"bad_words_ids": null,
"num_return_sequences": 1,
"output_scores": false,
"return_dict_in_generate": false,
"forced_bos_token_id": null,
"forced_eos_token_id": null,
"remove_invalid_values": false,
"exponential_decay_length_penalty": null,
"suppress_tokens": null,
"begin_suppress_tokens": null,
"architectures": null,
"finetuning_task": null,
"id2label": {
"0": "LABEL_0",
"1": "LABEL_1"
},
"label2id": {
"LABEL_0": 0,
"LABEL_1": 1
},
"tokenizer_class": null,
"prefix": null,
"bos_token_id": 1,
"pad_token_id": 32000,
"eos_token_id": 2,
"sep_token_id": null,
"decoder_start_token_id": null,
"task_specific_params": null,
"problem_type": null,
"_name_or_path": "",
"_attn_implementation_autoset": false,
"model_type": "llama"
},
"return_dict": true,
"output_hidden_states": false,
"output_attentions": false,
"torchscript": false,
"torch_dtype": "bfloat16",
"use_bfloat16": false,
"tf_legacy_loss": false,
"pruned_heads": {},
"tie_word_embeddings": true,
"chunk_size_feed_forward": 0,
"is_encoder_decoder": false,
"is_decoder": false,
"cross_attention_hidden_size": null,
"add_cross_attention": false,
"tie_encoder_decoder": false,
"max_length": 20,
"min_length": 0,
"do_sample": false,
"early_stopping": false,
"num_beams": 1,
"num_beam_groups": 1,
"diversity_penalty": 0.0,
"temperature": 1.0,
"top_k": 50,
"top_p": 1.0,
"typical_p": 1.0,
"repetition_penalty": 1.0,
"length_penalty": 1.0,
"no_repeat_ngram_size": 0,
"encoder_no_repeat_ngram_size": 0,
"bad_words_ids": null,
"num_return_sequences": 1,
"output_scores": false,
"return_dict_in_generate": false,
"forced_bos_token_id": null,
"forced_eos_token_id": null,
"remove_invalid_values": false,
"exponential_decay_length_penalty": null,
"suppress_tokens": null,
"begin_suppress_tokens": null,
"architectures": [
"OpenVLAForActionPrediction"
],
"finetuning_task": null,
"id2label": {
"0": "LABEL_0",
"1": "LABEL_1"
},
"label2id": {
"LABEL_0": 0,
"LABEL_1": 1
},
"tokenizer_class": null,
"prefix": null,
"bos_token_id": null,
"eos_token_id": null,
"sep_token_id": null,
"decoder_start_token_id": null,
"task_specific_params": null,
"problem_type": null,
"_name_or_path": "/data/juyi/hub/models--openvla--openvla-7b/snapshots/31f090d05236101ebfc381b61c674dd4746d4ce0",
"_attn_implementation_autoset": true,
"transformers_version": "4.47.0",
"auto_map": {
"AutoConfig": "configuration_prismatic.OpenVLAConfig",
"AutoModelForVision2Seq": "modeling_prismatic.OpenVLAForActionPrediction"
},
"model_type": "openvla"
}