radoslavralev commited on
Commit
65db05a
·
verified ·
1 Parent(s): 6cdb9d4

Add new SentenceTransformer model

Browse files
Files changed (3) hide show
  1. README.md +5 -5
  2. config.json +1 -1
  3. model.safetensors +2 -2
README.md CHANGED
@@ -165,9 +165,9 @@ print(embeddings.shape)
165
  # Get the similarity scores for the embeddings
166
  similarities = model.similarity(embeddings, embeddings)
167
  print(similarities)
168
- # tensor([[1.0000, 1.0000, 0.3431],
169
- # [1.0000, 1.0000, 0.3431],
170
- # [0.3431, 0.3431, 1.0000]])
171
  ```
172
 
173
  <!--
@@ -296,7 +296,7 @@ You can finetune this model on your own dataset.
296
  - `warmup_ratio`: 0.05
297
  - `bf16`: True
298
  - `dataloader_num_workers`: 6
299
- - `dataloader_prefetch_factor`: 6
300
  - `load_best_model_at_end`: True
301
  - `optim`: stable_adamw
302
  - `ddp_find_unused_parameters`: False
@@ -359,7 +359,7 @@ You can finetune this model on your own dataset.
359
  - `debug`: []
360
  - `dataloader_drop_last`: False
361
  - `dataloader_num_workers`: 6
362
- - `dataloader_prefetch_factor`: 6
363
  - `past_index`: -1
364
  - `disable_tqdm`: False
365
  - `remove_unused_columns`: True
 
165
  # Get the similarity scores for the embeddings
166
  similarities = model.similarity(embeddings, embeddings)
167
  print(similarities)
168
+ # tensor([[1.0000, 1.0000, 0.3432],
169
+ # [1.0000, 1.0000, 0.3432],
170
+ # [0.3432, 0.3432, 1.0001]])
171
  ```
172
 
173
  <!--
 
296
  - `warmup_ratio`: 0.05
297
  - `bf16`: True
298
  - `dataloader_num_workers`: 6
299
+ - `dataloader_prefetch_factor`: 2
300
  - `load_best_model_at_end`: True
301
  - `optim`: stable_adamw
302
  - `ddp_find_unused_parameters`: False
 
359
  - `debug`: []
360
  - `dataloader_drop_last`: False
361
  - `dataloader_num_workers`: 6
362
+ - `dataloader_prefetch_factor`: 2
363
  - `past_index`: -1
364
  - `disable_tqdm`: False
365
  - `remove_unused_columns`: True
config.json CHANGED
@@ -4,7 +4,7 @@
4
  ],
5
  "attention_probs_dropout_prob": 0.1,
6
  "classifier_dropout": null,
7
- "dtype": "bfloat16",
8
  "gradient_checkpointing": false,
9
  "hidden_act": "gelu",
10
  "hidden_dropout_prob": 0.1,
 
4
  ],
5
  "attention_probs_dropout_prob": 0.1,
6
  "classifier_dropout": null,
7
+ "dtype": "float32",
8
  "gradient_checkpointing": false,
9
  "hidden_act": "gelu",
10
  "hidden_dropout_prob": 0.1,
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:48af3f8927ba7bb57adc146865de162b0dfd3a218d065100b8b8d41f26204b50
3
- size 45437864
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:194a08727688936a34d326a0b7ec7c4aff6c34e983fb7bfbd69eb0a270dd95ae
3
+ size 90864192