Feature Extraction
Transformers
Safetensors
English
esm
pharmacore
sparse
drug-discovery
apple-silicon
protein-language-model
esm2
bioinformatics
computational-biology
pruning
efficient-inference
Eval Results (legacy)
Instructions to use stephenjun8192/esm2-8m-sparse50 with libraries, inference providers, notebooks, and local apps. Follow these links to get started.
- Libraries
- Transformers
How to use stephenjun8192/esm2-8m-sparse50 with Transformers:
# Use a pipeline as a high-level helper from transformers import pipeline pipe = pipeline("feature-extraction", model="stephenjun8192/esm2-8m-sparse50")# Load model directly from transformers import AutoTokenizer, AutoModel tokenizer = AutoTokenizer.from_pretrained("stephenjun8192/esm2-8m-sparse50") model = AutoModel.from_pretrained("stephenjun8192/esm2-8m-sparse50") - Notebooks
- Google Colab
- Kaggle
| { | |
| "add_cross_attention": false, | |
| "architectures": [ | |
| "EsmModel" | |
| ], | |
| "attention_probs_dropout_prob": 0.0, | |
| "classifier_dropout": null, | |
| "dtype": "float32", | |
| "emb_layer_norm_before": false, | |
| "esmfold_config": null, | |
| "hidden_act": "gelu", | |
| "hidden_dropout_prob": 0.0, | |
| "hidden_size": 320, | |
| "initializer_range": 0.02, | |
| "intermediate_size": 1280, | |
| "is_decoder": false, | |
| "is_folding_model": false, | |
| "layer_norm_eps": 1e-05, | |
| "mask_token_id": 32, | |
| "max_position_embeddings": 1026, | |
| "model_type": "esm", | |
| "num_attention_heads": 20, | |
| "num_hidden_layers": 6, | |
| "pad_token_id": 1, | |
| "position_embedding_type": "rotary", | |
| "tie_word_embeddings": true, | |
| "token_dropout": true, | |
| "transformers_version": "5.5.4", | |
| "use_cache": true, | |
| "vocab_list": null, | |
| "vocab_size": 33 | |
| } | |