Update README.md
Browse files
README.md
CHANGED
|
@@ -33,8 +33,8 @@ Then you can use the model like this:
|
|
| 33 |
from sentence_transformers import SentenceTransformer
|
| 34 |
psmiles_strings = ["[*]CC[*]", "[*]COC[*]"]
|
| 35 |
|
| 36 |
-
|
| 37 |
-
embeddings =
|
| 38 |
print(embeddings)
|
| 39 |
```
|
| 40 |
|
|
@@ -60,14 +60,14 @@ psmiles_strings = ["[*]CC[*]", "[*]COC[*]"]
|
|
| 60 |
|
| 61 |
# Load model from HuggingFace Hub
|
| 62 |
tokenizer = AutoTokenizer.from_pretrained('kuelumbus/polyBERT')
|
| 63 |
-
|
| 64 |
|
| 65 |
# Tokenize sentences
|
| 66 |
encoded_input = tokenizer(psmiles_strings, padding=True, truncation=True, return_tensors='pt')
|
| 67 |
|
| 68 |
# Compute token embeddings
|
| 69 |
with torch.no_grad():
|
| 70 |
-
model_output =
|
| 71 |
|
| 72 |
# Perform pooling. In this case, mean pooling.
|
| 73 |
fingerprints = mean_pooling(model_output, encoded_input['attention_mask'])
|
|
|
|
| 33 |
from sentence_transformers import SentenceTransformer
|
| 34 |
psmiles_strings = ["[*]CC[*]", "[*]COC[*]"]
|
| 35 |
|
| 36 |
+
polyBERT = SentenceTransformer('kuelumbus/polyBERT')
|
| 37 |
+
embeddings = polyBERT.encode(psmiles_strings)
|
| 38 |
print(embeddings)
|
| 39 |
```
|
| 40 |
|
|
|
|
| 60 |
|
| 61 |
# Load model from HuggingFace Hub
|
| 62 |
tokenizer = AutoTokenizer.from_pretrained('kuelumbus/polyBERT')
|
| 63 |
+
polyBERT = AutoModel.from_pretrained('kuelumbus/polyBERT')
|
| 64 |
|
| 65 |
# Tokenize sentences
|
| 66 |
encoded_input = tokenizer(psmiles_strings, padding=True, truncation=True, return_tensors='pt')
|
| 67 |
|
| 68 |
# Compute token embeddings
|
| 69 |
with torch.no_grad():
|
| 70 |
+
model_output = polyBERT(**encoded_input)
|
| 71 |
|
| 72 |
# Perform pooling. In this case, mean pooling.
|
| 73 |
fingerprints = mean_pooling(model_output, encoded_input['attention_mask'])
|