| { | |
| "action_dim": 32, | |
| "action_head_cfg": { | |
| "action_dim": 32, | |
| "action_horizon": 16, | |
| "add_pos_embed": true, | |
| "backbone_embedding_dim": 2048, | |
| "diffusion_model_cfg": { | |
| "attention_head_dim": 48, | |
| "cross_attention_dim": 2048, | |
| "dropout": 0.2, | |
| "final_dropout": true, | |
| "interleave_self_attention": true, | |
| "norm_type": "ada_norm", | |
| "num_attention_heads": 32, | |
| "num_layers": 16, | |
| "output_dim": 1024, | |
| "positional_embeddings": null | |
| }, | |
| "hidden_size": 1024, | |
| "input_embedding_dim": 1536, | |
| "max_action_dim": 32, | |
| "max_state_dim": 64, | |
| "model_dtype": "float32", | |
| "noise_beta_alpha": 1.5, | |
| "noise_beta_beta": 1.0, | |
| "noise_s": 0.999, | |
| "num_inference_timesteps": 4, | |
| "num_target_vision_tokens": 32, | |
| "num_timestep_buckets": 1000, | |
| "tune_diffusion_model": true, | |
| "tune_projector": true, | |
| "use_vlln": true, | |
| "vl_self_attention_cfg": { | |
| "attention_head_dim": 64, | |
| "dropout": 0.2, | |
| "final_dropout": true, | |
| "num_attention_heads": 32, | |
| "num_layers": 4, | |
| "positional_embeddings": null | |
| } | |
| }, | |
| "action_horizon": 16, | |
| "architectures": [ | |
| "Starforce_S1" | |
| ], | |
| "attn_implementation": null, | |
| "backbone_cfg": { | |
| "project_to_dim": 2048, | |
| "select_layer": 12, | |
| "tune_llm": false, | |
| "vllm_base_model_path": "checkpoints/Qwen2.5-VL-3B-Instruct" | |
| }, | |
| "compute_dtype": "bfloat16", | |
| "model_type": "starforce_s1", | |
| "torch_dtype": "float32", | |
| "transformers_version": "4.51.3" | |
| } | |