File size: 758 Bytes
18e359a
c57b96e
 
 
2e96d8d
c57b96e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
from DLM_emb_model import MolEmbDLM
from transformers import AutoTokenizer
import torch

MODEL_DIR = "Kiria-Nozan/ApexOracle"

tokenizer = AutoTokenizer.from_pretrained(MODEL_DIR)

model = MolEmbDLM.from_pretrained(MODEL_DIR)
model.eval()

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model = model.to(device)

seq = "[C][C][O]"          # ← 替换成你的输入串
batch = tokenizer(
    seq.replace('][', '] ['),
    padding=False,
    truncation=False,
    return_tensors="pt",
)
print(batch)

batch.to(device)

with torch.no_grad():
    embeddings = model(
        input_ids=batch["input_ids"],
        attention_mask=batch["attention_mask"],
    )                       # (1, seq_len, hidden_size)


print(embeddings.shape)