{ "_attn_implementation_autoset": true, "_name_or_path": "/tmp/tmpc0qb1ezb", "architectures": [ "ElectraForMaskedLM" ], "attention_probs_dropout_prob": 0.1, "classifier_dropout": null, "embedding_size": 128, "hidden_act": "gelu", "hidden_dropout_prob": 0.1, "hidden_size": 256, "initializer_range": 0.02, "intermediate_size": 1024, "layer_norm_eps": 1e-12, "max_position_embeddings": 512, "model_type": "electra", "neuron": { "auto_cast": null, "auto_cast_type": null, "compiler_type": "neuronx-cc", "compiler_version": "2.15.143.0+e39249ad", "disable_fallback": false, "disable_fast_relayout": false, "dynamic_batch_size": false, "inline_weights_to_neff": true, "input_names": [ "input_ids", "attention_mask", "token_type_ids" ], "model_type": "electra", "optlevel": "2", "output_attentions": false, "output_hidden_states": false, "output_names": [ "logits" ], "static_batch_size": 1, "static_sequence_length": 128, "tensor_parallel_size": 1 }, "num_attention_heads": 4, "num_hidden_layers": 12, "pad_token_id": 0, "position_embedding_type": "absolute", "summary_activation": "gelu", "summary_last_dropout": 0.1, "summary_type": "first", "summary_use_proj": true, "task": "fill-mask", "torchscript": true, "transformers_version": "4.48.3", "type_vocab_size": 2, "use_cache": true, "vocab_size": 30522 }