| { |
| "_name_or_path": "configs/gla_340M.json", |
| "architectures": [ |
| "GLAForCausalLM" |
| ], |
| "model_max_length": 131072, |
| "attn": null, |
| "attn_mode": "chunk", |
| "bos_token_id": 1, |
| "clamp_min": null, |
| "conv_size": 4, |
| "elementwise_affine": true, |
| "eos_token_id": 2, |
| "expand_k": 0.5, |
| "expand_v": 1, |
| "feature_map": null, |
| "fuse_cross_entropy": true, |
| "fuse_norm": true, |
| "hidden_act": "swish", |
| "hidden_ratio": 4, |
| "hidden_size": 1024, |
| "initializer_range": 0.02, |
| "intermediate_size": null, |
| "max_position_embeddings": 2048, |
| "model_type": "gla", |
| "norm_eps": 1e-06, |
| "num_heads": 4, |
| "num_hidden_layers": 24, |
| "num_kv_heads": null, |
| "tie_word_embeddings": true, |
| "torch_dtype": "bfloat16", |
| "transformers_version": "4.47.1", |
| "use_cache": true, |
| "use_gk": true, |
| "use_gv": false, |
| "use_output_gate": true, |
| "use_short_conv": false, |
| "vocab_size": 32000 |
| } |
|
|