Upload README.md with huggingface_hub
Browse files
README.md
CHANGED
|
@@ -38,7 +38,7 @@ import torch
|
|
| 38 |
from transformers import AutoModel, AutoTokenizer
|
| 39 |
|
| 40 |
model_path = 'Synthyra/ESM2-8M'
|
| 41 |
-
model = AutoModel.from_pretrained(model_path,
|
| 42 |
tokenizer = model.tokenizer
|
| 43 |
|
| 44 |
sequences = ['MPRTEIN', 'MSEQWENCE']
|
|
@@ -54,7 +54,7 @@ print(embeddings.shape) # (2, 11, 1280)
|
|
| 54 |
import torch
|
| 55 |
from transformers import AutoModelForMaskedLM, AutoTokenizer
|
| 56 |
|
| 57 |
-
model = AutoModelForMaskedLM.from_pretrained(model_path,
|
| 58 |
with torch.no_grad():
|
| 59 |
logits = model(**tokenized).logits
|
| 60 |
|
|
@@ -66,7 +66,7 @@ print(logits.shape) # (2, 11, 33)
|
|
| 66 |
import torch
|
| 67 |
from transformers import AutoModel, AutoTokenizer
|
| 68 |
|
| 69 |
-
model = AutoModel.from_pretrained(model_path,
|
| 70 |
with torch.no_grad():
|
| 71 |
attentions = model(**tokenized, output_attentions).attentions # tuples of (batch_size, num_heads, seq_len, seq_len)
|
| 72 |
|
|
@@ -133,12 +133,12 @@ Note:
|
|
| 133 |
### Citation
|
| 134 |
If you use any of this implementation or work please cite it (as well as the [ESM2](https://www.science.org/doi/10.1126/science.ade2574) paper).
|
| 135 |
```
|
| 136 |
-
@misc {
|
| 137 |
-
|
| 138 |
-
|
| 139 |
-
|
| 140 |
-
|
| 141 |
-
|
| 142 |
-
|
| 143 |
}
|
| 144 |
```
|
|
|
|
| 38 |
from transformers import AutoModel, AutoTokenizer
|
| 39 |
|
| 40 |
model_path = 'Synthyra/ESM2-8M'
|
| 41 |
+
model = AutoModel.from_pretrained(model_path, dtype=torch.float16, trust_remote_code=True).eval()
|
| 42 |
tokenizer = model.tokenizer
|
| 43 |
|
| 44 |
sequences = ['MPRTEIN', 'MSEQWENCE']
|
|
|
|
| 54 |
import torch
|
| 55 |
from transformers import AutoModelForMaskedLM, AutoTokenizer
|
| 56 |
|
| 57 |
+
model = AutoModelForMaskedLM.from_pretrained(model_path, dtype=torch.float16, trust_remote_code=True).eval()
|
| 58 |
with torch.no_grad():
|
| 59 |
logits = model(**tokenized).logits
|
| 60 |
|
|
|
|
| 66 |
import torch
|
| 67 |
from transformers import AutoModel, AutoTokenizer
|
| 68 |
|
| 69 |
+
model = AutoModel.from_pretrained(model_path, dtype=torch.float16, trust_remote_code=True).eval()
|
| 70 |
with torch.no_grad():
|
| 71 |
attentions = model(**tokenized, output_attentions).attentions # tuples of (batch_size, num_heads, seq_len, seq_len)
|
| 72 |
|
|
|
|
| 133 |
### Citation
|
| 134 |
If you use any of this implementation or work please cite it (as well as the [ESM2](https://www.science.org/doi/10.1126/science.ade2574) paper).
|
| 135 |
```
|
| 136 |
+
@misc {FastPLMs,
|
| 137 |
+
author = { Hallee, Logan and Bichara, David and Gleghorn, Jason P.},
|
| 138 |
+
title = { FastPLMs: Fast, efficient, protien language model inference from Huggingface AutoModel.},
|
| 139 |
+
year = {2024},
|
| 140 |
+
url = { https://huggingface.co/Synthyra/ESMplusplus_small },
|
| 141 |
+
DOI = { 10.57967/hf/3726 },
|
| 142 |
+
publisher = { Hugging Face }
|
| 143 |
}
|
| 144 |
```
|