|
|
--- |
|
|
license: mit |
|
|
language: |
|
|
- en |
|
|
--- |
|
|
|
|
|
Load the model: |
|
|
|
|
|
``` |
|
|
import torch |
|
|
from transformers import AutoModel, AutoTokenizer |
|
|
|
|
|
model_name = "rnalm/144M_H_MLM_last" |
|
|
|
|
|
tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True) |
|
|
model = AutoModel.from_pretrained(model_name, trust_remote_code=True) |
|
|
|
|
|
# Move model to GPU |
|
|
model = model.cuda() |
|
|
``` |
|
|
|
|
|
Get embeddings: |
|
|
``` |
|
|
inputs = tokenizer("ACGTACGT", return_tensors="pt") |
|
|
|
|
|
with torch.no_grad(): |
|
|
outputs = model(input_ids=inputs["input_ids"].cuda()) |
|
|
|
|
|
outputs.last_hidden_state.shape |
|
|
# torch.Size([1, 8, 768]) |
|
|
|
|
|
outputs.seq_logits.shape |
|
|
# torch.Size([1, 8, 11]) |
|
|
``` |