Modify a small descriptive mistake in the usage example
#2
by
Haxy
- opened
README.md
CHANGED
|
@@ -75,7 +75,7 @@ encoded_input = tokenizer(sentences, padding=True, truncation=True, return_tenso
|
|
| 75 |
# Compute token embeddings
|
| 76 |
with torch.no_grad():
|
| 77 |
model_output = model(**encoded_input)
|
| 78 |
-
# Perform pooling. In this case,
|
| 79 |
sentence_embeddings = mean_pooling(model_output, encoded_input['attention_mask'])
|
| 80 |
print("Sentence embeddings:")
|
| 81 |
print(sentence_embeddings)
|
|
|
|
| 75 |
# Compute token embeddings
|
| 76 |
with torch.no_grad():
|
| 77 |
model_output = model(**encoded_input)
|
| 78 |
+
# Perform pooling. In this case, mean pooling.
|
| 79 |
sentence_embeddings = mean_pooling(model_output, encoded_input['attention_mask'])
|
| 80 |
print("Sentence embeddings:")
|
| 81 |
print(sentence_embeddings)
|