Update README.md
Browse files
README.md
CHANGED
|
@@ -41,19 +41,14 @@ All models share the same architecture and dictionary size (10,240). See [here](
|
|
| 41 |
Extract interpretable features from protein sequences:
|
| 42 |
|
| 43 |
```python
|
| 44 |
-
from
|
| 45 |
-
from interplm.sae.inference import load_model
|
| 46 |
from interplm.esm.embed import embed_list_of_prot_seqs
|
| 47 |
|
| 48 |
# Select ESM layer (must be one of 1-6)
|
| 49 |
-
|
| 50 |
-
|
| 51 |
-
#
|
| 52 |
-
|
| 53 |
-
repo_id=f"Elana/InterPLM-esm2-8m",
|
| 54 |
-
filename=f"layer_{layer_num}/ae_normalized.pt"
|
| 55 |
-
)
|
| 56 |
-
sae = load_model(weights_path)
|
| 57 |
|
| 58 |
# Get ESM embeddings for protein
|
| 59 |
protein_embeddings = embed_single_sequence(sequence="MRWQEMGYIFYPRKLR",
|
|
|
|
| 41 |
Extract interpretable features from protein sequences:
|
| 42 |
|
| 43 |
```python
|
| 44 |
+
from interplm.sae.inference import load_sae_from_hf
|
|
|
|
| 45 |
from interplm.esm.embed import embed_list_of_prot_seqs
|
| 46 |
|
| 47 |
# Select ESM layer (must be one of 1-6)
|
| 48 |
+
layer = 4
|
| 49 |
+
|
| 50 |
+
# Load specific layer SAE
|
| 51 |
+
sae = load_sae_from_hf(plm_model="esm2-8m", plm_layer=layer)
|
|
|
|
|
|
|
|
|
|
|
|
|
| 52 |
|
| 53 |
# Get ESM embeddings for protein
|
| 54 |
protein_embeddings = embed_single_sequence(sequence="MRWQEMGYIFYPRKLR",
|