Delete similarity.py
Browse files- similarity.py +0 -26
similarity.py
DELETED
|
@@ -1,26 +0,0 @@
|
|
| 1 |
-
import torch
|
| 2 |
-
from sentence_transformers import SentenceTransformer, util
|
| 3 |
-
|
| 4 |
-
# Path to your model `.bin` file
|
| 5 |
-
model_path = "pytorch_model.bin"
|
| 6 |
-
|
| 7 |
-
# Path to your tokenizer `.json` file
|
| 8 |
-
tokenizer_path = "tokenizer.json"
|
| 9 |
-
|
| 10 |
-
# Load the model
|
| 11 |
-
model = SentenceTransformer(model_path)
|
| 12 |
-
|
| 13 |
-
# Load the tokenizer
|
| 14 |
-
tokenizer = util.load_tokenizer(tokenizer_path)
|
| 15 |
-
|
| 16 |
-
# Your sentences
|
| 17 |
-
sentences = ["This is an example sentence", "Each sentence is converted"]
|
| 18 |
-
|
| 19 |
-
# Preprocess the sentences using the tokenizer
|
| 20 |
-
encoded_sentences = tokenizer.encode(sentences, batch_size=None, return_tensors="pt")
|
| 21 |
-
|
| 22 |
-
# Get the embeddings from the model
|
| 23 |
-
embeddings = model(encoded_sentences)
|
| 24 |
-
|
| 25 |
-
# Print the embeddings
|
| 26 |
-
print(embeddings)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|