{ "architectures": [ "NebulaForCausalLM" ], "dim": 1280, "dropout": 0.1, "ffn_dim_multiplier": 2.6666666666666665, "max_seq_len": 2048, "model_type": "nebula", "multiple_of": 256, "n_heads": 10, "n_kv_heads": 10, "n_layers": 14, "norm_eps": 1e-05, "torch_dtype": "float32", "transformers_version": "4.55.2", "use_cache": true, "vocab_size": 60729 }