{ "architectures": [ "EmbedderWithMLP" ], "auto_map": { "AutoConfig": "configuration_embedder_with_mlp.EmbedderWithMLPConfig", "AutoModel": "modeling_embedder_with_mlp.EmbedderWithMLP" }, "dropout_ratio": 0.2, "dtype": "float32", "hidden_layer_list": [ 512, 256, 256, 128, 64 ], "input_size": 768, "model_name": "sasasassaszzd/RoBERTa-tune", "model_type": "embedder_with_mlp", "transformers_version": "4.56.1" }