Mehd212 commited on
Commit
32c271e
·
verified ·
1 Parent(s): 387108b

Upload folder using huggingface_hub

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. 1_Pooling/config.json +10 -0
  2. README.md +432 -0
  3. checkpoint-795/1_Pooling/config.json +10 -0
  4. checkpoint-795/README.md +426 -0
  5. checkpoint-795/config.json +27 -0
  6. checkpoint-795/config_sentence_transformers.json +14 -0
  7. checkpoint-795/model.safetensors +3 -0
  8. checkpoint-795/modules.json +14 -0
  9. checkpoint-795/optimizer.pt +3 -0
  10. checkpoint-795/rng_state.pth +3 -0
  11. checkpoint-795/scaler.pt +3 -0
  12. checkpoint-795/scheduler.pt +3 -0
  13. checkpoint-795/sentence_bert_config.json +4 -0
  14. checkpoint-795/special_tokens_map.json +56 -0
  15. checkpoint-795/tokenizer.json +0 -0
  16. checkpoint-795/tokenizer_config.json +84 -0
  17. checkpoint-795/trainer_state.json +268 -0
  18. checkpoint-795/training_args.bin +3 -0
  19. checkpoint-848/1_Pooling/config.json +10 -0
  20. checkpoint-848/README.md +428 -0
  21. checkpoint-848/config.json +27 -0
  22. checkpoint-848/config_sentence_transformers.json +14 -0
  23. checkpoint-848/model.safetensors +3 -0
  24. checkpoint-848/modules.json +14 -0
  25. checkpoint-848/optimizer.pt +3 -0
  26. checkpoint-848/rng_state.pth +3 -0
  27. checkpoint-848/scaler.pt +3 -0
  28. checkpoint-848/scheduler.pt +3 -0
  29. checkpoint-848/sentence_bert_config.json +4 -0
  30. checkpoint-848/special_tokens_map.json +56 -0
  31. checkpoint-848/tokenizer.json +0 -0
  32. checkpoint-848/tokenizer_config.json +84 -0
  33. checkpoint-848/trainer_state.json +283 -0
  34. checkpoint-848/training_args.bin +3 -0
  35. checkpoint-901/1_Pooling/config.json +10 -0
  36. checkpoint-901/README.md +431 -0
  37. checkpoint-901/config.json +27 -0
  38. checkpoint-901/config_sentence_transformers.json +14 -0
  39. checkpoint-901/model.safetensors +3 -0
  40. checkpoint-901/modules.json +14 -0
  41. checkpoint-901/optimizer.pt +3 -0
  42. checkpoint-901/rng_state.pth +3 -0
  43. checkpoint-901/scaler.pt +3 -0
  44. checkpoint-901/scheduler.pt +3 -0
  45. checkpoint-901/sentence_bert_config.json +4 -0
  46. checkpoint-901/special_tokens_map.json +56 -0
  47. checkpoint-901/tokenizer.json +0 -0
  48. checkpoint-901/tokenizer_config.json +84 -0
  49. checkpoint-901/trainer_state.json +305 -0
  50. checkpoint-901/training_args.bin +3 -0
1_Pooling/config.json ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "word_embedding_dimension": 768,
3
+ "pooling_mode_cls_token": false,
4
+ "pooling_mode_mean_tokens": true,
5
+ "pooling_mode_max_tokens": false,
6
+ "pooling_mode_mean_sqrt_len_tokens": false,
7
+ "pooling_mode_weightedmean_tokens": false,
8
+ "pooling_mode_lasttoken": false,
9
+ "include_prompt": true
10
+ }
README.md ADDED
@@ -0,0 +1,432 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ tags:
3
+ - sentence-transformers
4
+ - sentence-similarity
5
+ - feature-extraction
6
+ - dense
7
+ - generated_from_trainer
8
+ - dataset_size:8434
9
+ - loss:MultipleNegativesRankingLoss
10
+ base_model: almanach/camembert-bio-base
11
+ widget:
12
+ - source_sentence: tumeur maligne
13
+ sentences:
14
+ - 8082/3 Carcinome lymphoépithélial
15
+ - 8000/3 Tumeur maligne, SAI
16
+ - 8000/3 Cancer
17
+ - source_sentence: hépatocarcinome oncocytaire fibrolamellaire (fh
18
+ sentences:
19
+ - 8171/3 Carcinome hépatocellulaire fibrolamellaire (C22.0)
20
+ - 8560/3 Adénocarcinome et carcinome à cellules épidermoïdes
21
+ - 8052/3 Carcinome épidermoïde papillaire
22
+ - source_sentence: implant tumoral
23
+ sentences:
24
+ - 8000/6 Néoplasme métastatique
25
+ - 8480/3 Carcinome muqueux
26
+ - 8140/3 Adénocarcinome, SAI
27
+ - source_sentence: Anémie réfractaire sidéroblastique
28
+ sentences:
29
+ - 9982/3 RARS
30
+ - 9591/3 LMNH, SAI
31
+ - 8041/3 Carcinome à cellules de réserve
32
+ - source_sentence: carcinome
33
+ sentences:
34
+ - 9800/3 Leucémie subaiguë, SAI [obs]
35
+ - 8000/6 Métastase, SAI
36
+ - 8075/3 Carcinome malpighien pseudoglandulaire
37
+ pipeline_tag: sentence-similarity
38
+ library_name: sentence-transformers
39
+ ---
40
+
41
+ # SentenceTransformer based on almanach/camembert-bio-base
42
+
43
+ This is a [sentence-transformers](https://www.SBERT.net) model finetuned from [almanach/camembert-bio-base](https://huggingface.co/almanach/camembert-bio-base). It maps sentences & paragraphs to a 768-dimensional dense vector space and can be used for semantic textual similarity, semantic search, paraphrase mining, text classification, clustering, and more.
44
+
45
+ ## Model Details
46
+
47
+ ### Model Description
48
+ - **Model Type:** Sentence Transformer
49
+ - **Base model:** [almanach/camembert-bio-base](https://huggingface.co/almanach/camembert-bio-base) <!-- at revision 5fbdc71cc8b8b59f9e40229ff1d5baa1808ffde6 -->
50
+ - **Maximum Sequence Length:** 512 tokens
51
+ - **Output Dimensionality:** 768 dimensions
52
+ - **Similarity Function:** Cosine Similarity
53
+ <!-- - **Training Dataset:** Unknown -->
54
+ <!-- - **Language:** Unknown -->
55
+ <!-- - **License:** Unknown -->
56
+
57
+ ### Model Sources
58
+
59
+ - **Documentation:** [Sentence Transformers Documentation](https://sbert.net)
60
+ - **Repository:** [Sentence Transformers on GitHub](https://github.com/huggingface/sentence-transformers)
61
+ - **Hugging Face:** [Sentence Transformers on Hugging Face](https://huggingface.co/models?library=sentence-transformers)
62
+
63
+ ### Full Model Architecture
64
+
65
+ ```
66
+ SentenceTransformer(
67
+ (0): Transformer({'max_seq_length': 512, 'do_lower_case': False, 'architecture': 'CamembertModel'})
68
+ (1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False, 'pooling_mode_weightedmean_tokens': False, 'pooling_mode_lasttoken': False, 'include_prompt': True})
69
+ )
70
+ ```
71
+
72
+ ## Usage
73
+
74
+ ### Direct Usage (Sentence Transformers)
75
+
76
+ First install the Sentence Transformers library:
77
+
78
+ ```bash
79
+ pip install -U sentence-transformers
80
+ ```
81
+
82
+ Then you can load this model and run inference.
83
+ ```python
84
+ from sentence_transformers import SentenceTransformer
85
+
86
+ # Download from the 🤗 Hub
87
+ model = SentenceTransformer("sentence_transformers_model_id")
88
+ # Run inference
89
+ sentences = [
90
+ 'carcinome',
91
+ '8000/6 Métastase, SAI',
92
+ '9800/3 Leucémie subaiguë, SAI [obs]',
93
+ ]
94
+ embeddings = model.encode(sentences)
95
+ print(embeddings.shape)
96
+ # [3, 768]
97
+
98
+ # Get the similarity scores for the embeddings
99
+ similarities = model.similarity(embeddings, embeddings)
100
+ print(similarities)
101
+ # tensor([[ 1.0000, 0.5178, -0.0998],
102
+ # [ 0.5178, 1.0000, 0.0030],
103
+ # [-0.0998, 0.0030, 1.0000]])
104
+ ```
105
+
106
+ <!--
107
+ ### Direct Usage (Transformers)
108
+
109
+ <details><summary>Click to see the direct usage in Transformers</summary>
110
+
111
+ </details>
112
+ -->
113
+
114
+ <!--
115
+ ### Downstream Usage (Sentence Transformers)
116
+
117
+ You can finetune this model on your own dataset.
118
+
119
+ <details><summary>Click to expand</summary>
120
+
121
+ </details>
122
+ -->
123
+
124
+ <!--
125
+ ### Out-of-Scope Use
126
+
127
+ *List how the model may foreseeably be misused and address what users ought not to do with the model.*
128
+ -->
129
+
130
+ <!--
131
+ ## Bias, Risks and Limitations
132
+
133
+ *What are the known or foreseeable issues stemming from this model? You could also flag here known failure cases or weaknesses of the model.*
134
+ -->
135
+
136
+ <!--
137
+ ### Recommendations
138
+
139
+ *What are recommendations with respect to the foreseeable issues? For example, filtering explicit content.*
140
+ -->
141
+
142
+ ## Training Details
143
+
144
+ ### Training Dataset
145
+
146
+ #### Unnamed Dataset
147
+
148
+ * Size: 8,434 training samples
149
+ * Columns: <code>sentence1</code> and <code>sentence2</code>
150
+ * Approximate statistics based on the first 1000 samples:
151
+ | | sentence1 | sentence2 |
152
+ |:--------|:----------------------------------------------------------------------------------|:----------------------------------------------------------------------------------|
153
+ | type | string | string |
154
+ | details | <ul><li>min: 3 tokens</li><li>mean: 11.31 tokens</li><li>max: 43 tokens</li></ul> | <ul><li>min: 6 tokens</li><li>mean: 15.69 tokens</li><li>max: 34 tokens</li></ul> |
155
+ * Samples:
156
+ | sentence1 | sentence2 |
157
+ |:------------------------------------------------|:--------------------------------------------------------------------|
158
+ | <code>tumeur à petites cellules rondes</code> | <code>8806/3 Tumeur desmoplastique à petites cellules rondes</code> |
159
+ | <code>dissémination oligométastatique</code> | <code>8000/6 Néoplasme métastatique</code> |
160
+ | <code>processus néoprolifératif primaire</code> | <code>8000/3 Tumeur maligne non classée</code> |
161
+ * Loss: [<code>MultipleNegativesRankingLoss</code>](https://sbert.net/docs/package_reference/sentence_transformer/losses.html#multiplenegativesrankingloss) with these parameters:
162
+ ```json
163
+ {
164
+ "scale": 20.0,
165
+ "similarity_fct": "cos_sim",
166
+ "gather_across_devices": false
167
+ }
168
+ ```
169
+
170
+ ### Evaluation Dataset
171
+
172
+ #### Unnamed Dataset
173
+
174
+ * Size: 444 evaluation samples
175
+ * Columns: <code>sentence1</code> and <code>sentence2</code>
176
+ * Approximate statistics based on the first 444 samples:
177
+ | | sentence1 | sentence2 |
178
+ |:--------|:----------------------------------------------------------------------------------|:----------------------------------------------------------------------------------|
179
+ | type | string | string |
180
+ | details | <ul><li>min: 3 tokens</li><li>mean: 11.68 tokens</li><li>max: 31 tokens</li></ul> | <ul><li>min: 6 tokens</li><li>mean: 15.76 tokens</li><li>max: 34 tokens</li></ul> |
181
+ * Samples:
182
+ | sentence1 | sentence2 |
183
+ |:---------------------------------------------|:----------------------------------------------|
184
+ | <code>neuroblastome</code> | <code>9500/3 Neuroblastome, SAI</code> |
185
+ | <code>Sarcome méningothélial</code> | <code>9530/3 Sarcome méningothélial</code> |
186
+ | <code>Carcinome excréto-urinaire, SAI</code> | <code>8120/3 Carcinome urothélial, SAI</code> |
187
+ * Loss: [<code>MultipleNegativesRankingLoss</code>](https://sbert.net/docs/package_reference/sentence_transformer/losses.html#multiplenegativesrankingloss) with these parameters:
188
+ ```json
189
+ {
190
+ "scale": 20.0,
191
+ "similarity_fct": "cos_sim",
192
+ "gather_across_devices": false
193
+ }
194
+ ```
195
+
196
+ ### Training Hyperparameters
197
+ #### Non-Default Hyperparameters
198
+
199
+ - `eval_strategy`: epoch
200
+ - `per_device_train_batch_size`: 16
201
+ - `per_device_eval_batch_size`: 64
202
+ - `gradient_accumulation_steps`: 2
203
+ - `learning_rate`: 2e-05
204
+ - `weight_decay`: 0.01
205
+ - `num_train_epochs`: 30
206
+ - `lr_scheduler_type`: cosine
207
+ - `warmup_ratio`: 0.1
208
+ - `fp16`: True
209
+ - `load_best_model_at_end`: True
210
+ - `batch_sampler`: no_duplicates
211
+
212
+ #### All Hyperparameters
213
+ <details><summary>Click to expand</summary>
214
+
215
+ - `overwrite_output_dir`: False
216
+ - `do_predict`: False
217
+ - `eval_strategy`: epoch
218
+ - `prediction_loss_only`: True
219
+ - `per_device_train_batch_size`: 16
220
+ - `per_device_eval_batch_size`: 64
221
+ - `per_gpu_train_batch_size`: None
222
+ - `per_gpu_eval_batch_size`: None
223
+ - `gradient_accumulation_steps`: 2
224
+ - `eval_accumulation_steps`: None
225
+ - `torch_empty_cache_steps`: None
226
+ - `learning_rate`: 2e-05
227
+ - `weight_decay`: 0.01
228
+ - `adam_beta1`: 0.9
229
+ - `adam_beta2`: 0.999
230
+ - `adam_epsilon`: 1e-08
231
+ - `max_grad_norm`: 1.0
232
+ - `num_train_epochs`: 30
233
+ - `max_steps`: -1
234
+ - `lr_scheduler_type`: cosine
235
+ - `lr_scheduler_kwargs`: {}
236
+ - `warmup_ratio`: 0.1
237
+ - `warmup_steps`: 0
238
+ - `log_level`: passive
239
+ - `log_level_replica`: warning
240
+ - `log_on_each_node`: True
241
+ - `logging_nan_inf_filter`: True
242
+ - `save_safetensors`: True
243
+ - `save_on_each_node`: False
244
+ - `save_only_model`: False
245
+ - `restore_callback_states_from_checkpoint`: False
246
+ - `no_cuda`: False
247
+ - `use_cpu`: False
248
+ - `use_mps_device`: False
249
+ - `seed`: 42
250
+ - `data_seed`: None
251
+ - `jit_mode_eval`: False
252
+ - `bf16`: False
253
+ - `fp16`: True
254
+ - `fp16_opt_level`: O1
255
+ - `half_precision_backend`: auto
256
+ - `bf16_full_eval`: False
257
+ - `fp16_full_eval`: False
258
+ - `tf32`: None
259
+ - `local_rank`: 0
260
+ - `ddp_backend`: None
261
+ - `tpu_num_cores`: None
262
+ - `tpu_metrics_debug`: False
263
+ - `debug`: []
264
+ - `dataloader_drop_last`: False
265
+ - `dataloader_num_workers`: 0
266
+ - `dataloader_prefetch_factor`: None
267
+ - `past_index`: -1
268
+ - `disable_tqdm`: False
269
+ - `remove_unused_columns`: True
270
+ - `label_names`: None
271
+ - `load_best_model_at_end`: True
272
+ - `ignore_data_skip`: False
273
+ - `fsdp`: []
274
+ - `fsdp_min_num_params`: 0
275
+ - `fsdp_config`: {'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False}
276
+ - `fsdp_transformer_layer_cls_to_wrap`: None
277
+ - `accelerator_config`: {'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None}
278
+ - `parallelism_config`: None
279
+ - `deepspeed`: None
280
+ - `label_smoothing_factor`: 0.0
281
+ - `optim`: adamw_torch_fused
282
+ - `optim_args`: None
283
+ - `adafactor`: False
284
+ - `group_by_length`: False
285
+ - `length_column_name`: length
286
+ - `project`: huggingface
287
+ - `trackio_space_id`: trackio
288
+ - `ddp_find_unused_parameters`: None
289
+ - `ddp_bucket_cap_mb`: None
290
+ - `ddp_broadcast_buffers`: False
291
+ - `dataloader_pin_memory`: True
292
+ - `dataloader_persistent_workers`: False
293
+ - `skip_memory_metrics`: True
294
+ - `use_legacy_prediction_loop`: False
295
+ - `push_to_hub`: False
296
+ - `resume_from_checkpoint`: None
297
+ - `hub_model_id`: None
298
+ - `hub_strategy`: every_save
299
+ - `hub_private_repo`: None
300
+ - `hub_always_push`: False
301
+ - `hub_revision`: None
302
+ - `gradient_checkpointing`: False
303
+ - `gradient_checkpointing_kwargs`: None
304
+ - `include_inputs_for_metrics`: False
305
+ - `include_for_metrics`: []
306
+ - `eval_do_concat_batches`: True
307
+ - `fp16_backend`: auto
308
+ - `push_to_hub_model_id`: None
309
+ - `push_to_hub_organization`: None
310
+ - `mp_parameters`:
311
+ - `auto_find_batch_size`: False
312
+ - `full_determinism`: False
313
+ - `torchdynamo`: None
314
+ - `ray_scope`: last
315
+ - `ddp_timeout`: 1800
316
+ - `torch_compile`: False
317
+ - `torch_compile_backend`: None
318
+ - `torch_compile_mode`: None
319
+ - `include_tokens_per_second`: False
320
+ - `include_num_input_tokens_seen`: no
321
+ - `neftune_noise_alpha`: None
322
+ - `optim_target_modules`: None
323
+ - `batch_eval_metrics`: False
324
+ - `eval_on_start`: False
325
+ - `use_liger_kernel`: False
326
+ - `liger_kernel_config`: None
327
+ - `eval_use_gather_object`: False
328
+ - `average_tokens_across_devices`: True
329
+ - `prompts`: None
330
+ - `batch_sampler`: no_duplicates
331
+ - `multi_dataset_batch_sampler`: proportional
332
+ - `router_mapping`: {}
333
+ - `learning_rate_mapping`: {}
334
+
335
+ </details>
336
+
337
+ ### Training Logs
338
+ | Epoch | Step | Training Loss | Validation Loss |
339
+ |:--------:|:-------:|:-------------:|:---------------:|
340
+ | 0.9434 | 50 | 3.4933 | - |
341
+ | 1.0 | 53 | - | 2.1337 |
342
+ | 1.8868 | 100 | 2.2572 | - |
343
+ | 2.0 | 106 | - | 1.7004 |
344
+ | 2.8302 | 150 | 1.6176 | - |
345
+ | 3.0 | 159 | - | 1.4333 |
346
+ | 3.7736 | 200 | 1.2587 | - |
347
+ | 4.0 | 212 | - | 1.2576 |
348
+ | 4.7170 | 250 | 1.0359 | - |
349
+ | 5.0 | 265 | - | 1.1235 |
350
+ | 5.6604 | 300 | 0.9043 | - |
351
+ | 6.0 | 318 | - | 1.0555 |
352
+ | 6.6038 | 350 | 0.8331 | - |
353
+ | 7.0 | 371 | - | 0.9720 |
354
+ | 7.5472 | 400 | 0.7715 | - |
355
+ | 8.0 | 424 | - | 0.9189 |
356
+ | 8.4906 | 450 | 0.7566 | - |
357
+ | 9.0 | 477 | - | 0.8910 |
358
+ | 9.4340 | 500 | 0.7205 | - |
359
+ | 10.0 | 530 | - | 0.8724 |
360
+ | 10.3774 | 550 | 0.7056 | - |
361
+ | 11.0 | 583 | - | 0.8717 |
362
+ | 11.3208 | 600 | 0.6927 | - |
363
+ | 12.0 | 636 | - | 0.8567 |
364
+ | 12.2642 | 650 | 0.6798 | - |
365
+ | 13.0 | 689 | - | 0.8519 |
366
+ | 13.2075 | 700 | 0.6689 | - |
367
+ | 14.0 | 742 | - | 0.8806 |
368
+ | 14.1509 | 750 | 0.6643 | - |
369
+ | **15.0** | **795** | **-** | **0.8405** |
370
+ | 15.0943 | 800 | 0.6622 | - |
371
+ | 16.0 | 848 | - | 0.8529 |
372
+ | 16.0377 | 850 | 0.6546 | - |
373
+ | 16.9811 | 900 | 0.6358 | - |
374
+ | 17.0 | 901 | - | 0.8474 |
375
+
376
+ * The bold row denotes the saved checkpoint.
377
+
378
+ ### Framework Versions
379
+ - Python: 3.11.14
380
+ - Sentence Transformers: 5.1.2
381
+ - Transformers: 4.57.3
382
+ - PyTorch: 2.9.1+cu128
383
+ - Accelerate: 1.12.0
384
+ - Datasets: 4.4.1
385
+ - Tokenizers: 0.22.1
386
+
387
+ ## Citation
388
+
389
+ ### BibTeX
390
+
391
+ #### Sentence Transformers
392
+ ```bibtex
393
+ @inproceedings{reimers-2019-sentence-bert,
394
+ title = "Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks",
395
+ author = "Reimers, Nils and Gurevych, Iryna",
396
+ booktitle = "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing",
397
+ month = "11",
398
+ year = "2019",
399
+ publisher = "Association for Computational Linguistics",
400
+ url = "https://arxiv.org/abs/1908.10084",
401
+ }
402
+ ```
403
+
404
+ #### MultipleNegativesRankingLoss
405
+ ```bibtex
406
+ @misc{henderson2017efficient,
407
+ title={Efficient Natural Language Response Suggestion for Smart Reply},
408
+ author={Matthew Henderson and Rami Al-Rfou and Brian Strope and Yun-hsuan Sung and Laszlo Lukacs and Ruiqi Guo and Sanjiv Kumar and Balint Miklos and Ray Kurzweil},
409
+ year={2017},
410
+ eprint={1705.00652},
411
+ archivePrefix={arXiv},
412
+ primaryClass={cs.CL}
413
+ }
414
+ ```
415
+
416
+ <!--
417
+ ## Glossary
418
+
419
+ *Clearly define terms in order to be accessible across audiences.*
420
+ -->
421
+
422
+ <!--
423
+ ## Model Card Authors
424
+
425
+ *Lists the people who create the model card, providing recognition and accountability for the detailed work that goes into its construction.*
426
+ -->
427
+
428
+ <!--
429
+ ## Model Card Contact
430
+
431
+ *Provides a way for people who have updates to the Model Card, suggestions, or questions, to contact the Model Card authors.*
432
+ -->
checkpoint-795/1_Pooling/config.json ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "word_embedding_dimension": 768,
3
+ "pooling_mode_cls_token": false,
4
+ "pooling_mode_mean_tokens": true,
5
+ "pooling_mode_max_tokens": false,
6
+ "pooling_mode_mean_sqrt_len_tokens": false,
7
+ "pooling_mode_weightedmean_tokens": false,
8
+ "pooling_mode_lasttoken": false,
9
+ "include_prompt": true
10
+ }
checkpoint-795/README.md ADDED
@@ -0,0 +1,426 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ tags:
3
+ - sentence-transformers
4
+ - sentence-similarity
5
+ - feature-extraction
6
+ - dense
7
+ - generated_from_trainer
8
+ - dataset_size:8434
9
+ - loss:MultipleNegativesRankingLoss
10
+ base_model: almanach/camembert-bio-base
11
+ widget:
12
+ - source_sentence: tumeur maligne
13
+ sentences:
14
+ - 8082/3 Carcinome lymphoépithélial
15
+ - 8000/3 Tumeur maligne, SAI
16
+ - 8000/3 Cancer
17
+ - source_sentence: hépatocarcinome oncocytaire fibrolamellaire (fh
18
+ sentences:
19
+ - 8171/3 Carcinome hépatocellulaire fibrolamellaire (C22.0)
20
+ - 8560/3 Adénocarcinome et carcinome à cellules épidermoïdes
21
+ - 8052/3 Carcinome épidermoïde papillaire
22
+ - source_sentence: implant tumoral
23
+ sentences:
24
+ - 8000/6 Néoplasme métastatique
25
+ - 8480/3 Carcinome muqueux
26
+ - 8140/3 Adénocarcinome, SAI
27
+ - source_sentence: Anémie réfractaire sidéroblastique
28
+ sentences:
29
+ - 9982/3 RARS
30
+ - 9591/3 LMNH, SAI
31
+ - 8041/3 Carcinome à cellules de réserve
32
+ - source_sentence: carcinome
33
+ sentences:
34
+ - 9800/3 Leucémie subaiguë, SAI [obs]
35
+ - 8000/6 Métastase, SAI
36
+ - 8075/3 Carcinome malpighien pseudoglandulaire
37
+ pipeline_tag: sentence-similarity
38
+ library_name: sentence-transformers
39
+ ---
40
+
41
+ # SentenceTransformer based on almanach/camembert-bio-base
42
+
43
+ This is a [sentence-transformers](https://www.SBERT.net) model finetuned from [almanach/camembert-bio-base](https://huggingface.co/almanach/camembert-bio-base). It maps sentences & paragraphs to a 768-dimensional dense vector space and can be used for semantic textual similarity, semantic search, paraphrase mining, text classification, clustering, and more.
44
+
45
+ ## Model Details
46
+
47
+ ### Model Description
48
+ - **Model Type:** Sentence Transformer
49
+ - **Base model:** [almanach/camembert-bio-base](https://huggingface.co/almanach/camembert-bio-base) <!-- at revision 5fbdc71cc8b8b59f9e40229ff1d5baa1808ffde6 -->
50
+ - **Maximum Sequence Length:** 512 tokens
51
+ - **Output Dimensionality:** 768 dimensions
52
+ - **Similarity Function:** Cosine Similarity
53
+ <!-- - **Training Dataset:** Unknown -->
54
+ <!-- - **Language:** Unknown -->
55
+ <!-- - **License:** Unknown -->
56
+
57
+ ### Model Sources
58
+
59
+ - **Documentation:** [Sentence Transformers Documentation](https://sbert.net)
60
+ - **Repository:** [Sentence Transformers on GitHub](https://github.com/huggingface/sentence-transformers)
61
+ - **Hugging Face:** [Sentence Transformers on Hugging Face](https://huggingface.co/models?library=sentence-transformers)
62
+
63
+ ### Full Model Architecture
64
+
65
+ ```
66
+ SentenceTransformer(
67
+ (0): Transformer({'max_seq_length': 512, 'do_lower_case': False, 'architecture': 'CamembertModel'})
68
+ (1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False, 'pooling_mode_weightedmean_tokens': False, 'pooling_mode_lasttoken': False, 'include_prompt': True})
69
+ )
70
+ ```
71
+
72
+ ## Usage
73
+
74
+ ### Direct Usage (Sentence Transformers)
75
+
76
+ First install the Sentence Transformers library:
77
+
78
+ ```bash
79
+ pip install -U sentence-transformers
80
+ ```
81
+
82
+ Then you can load this model and run inference.
83
+ ```python
84
+ from sentence_transformers import SentenceTransformer
85
+
86
+ # Download from the 🤗 Hub
87
+ model = SentenceTransformer("sentence_transformers_model_id")
88
+ # Run inference
89
+ sentences = [
90
+ 'carcinome',
91
+ '8000/6 Métastase, SAI',
92
+ '9800/3 Leucémie subaiguë, SAI [obs]',
93
+ ]
94
+ embeddings = model.encode(sentences)
95
+ print(embeddings.shape)
96
+ # [3, 768]
97
+
98
+ # Get the similarity scores for the embeddings
99
+ similarities = model.similarity(embeddings, embeddings)
100
+ print(similarities)
101
+ # tensor([[ 1.0000, 0.5178, -0.0998],
102
+ # [ 0.5178, 1.0000, 0.0030],
103
+ # [-0.0998, 0.0030, 1.0000]])
104
+ ```
105
+
106
+ <!--
107
+ ### Direct Usage (Transformers)
108
+
109
+ <details><summary>Click to see the direct usage in Transformers</summary>
110
+
111
+ </details>
112
+ -->
113
+
114
+ <!--
115
+ ### Downstream Usage (Sentence Transformers)
116
+
117
+ You can finetune this model on your own dataset.
118
+
119
+ <details><summary>Click to expand</summary>
120
+
121
+ </details>
122
+ -->
123
+
124
+ <!--
125
+ ### Out-of-Scope Use
126
+
127
+ *List how the model may foreseeably be misused and address what users ought not to do with the model.*
128
+ -->
129
+
130
+ <!--
131
+ ## Bias, Risks and Limitations
132
+
133
+ *What are the known or foreseeable issues stemming from this model? You could also flag here known failure cases or weaknesses of the model.*
134
+ -->
135
+
136
+ <!--
137
+ ### Recommendations
138
+
139
+ *What are recommendations with respect to the foreseeable issues? For example, filtering explicit content.*
140
+ -->
141
+
142
+ ## Training Details
143
+
144
+ ### Training Dataset
145
+
146
+ #### Unnamed Dataset
147
+
148
+ * Size: 8,434 training samples
149
+ * Columns: <code>sentence1</code> and <code>sentence2</code>
150
+ * Approximate statistics based on the first 1000 samples:
151
+ | | sentence1 | sentence2 |
152
+ |:--------|:----------------------------------------------------------------------------------|:----------------------------------------------------------------------------------|
153
+ | type | string | string |
154
+ | details | <ul><li>min: 3 tokens</li><li>mean: 11.31 tokens</li><li>max: 43 tokens</li></ul> | <ul><li>min: 6 tokens</li><li>mean: 15.69 tokens</li><li>max: 34 tokens</li></ul> |
155
+ * Samples:
156
+ | sentence1 | sentence2 |
157
+ |:------------------------------------------------|:--------------------------------------------------------------------|
158
+ | <code>tumeur à petites cellules rondes</code> | <code>8806/3 Tumeur desmoplastique à petites cellules rondes</code> |
159
+ | <code>dissémination oligométastatique</code> | <code>8000/6 Néoplasme métastatique</code> |
160
+ | <code>processus néoprolifératif primaire</code> | <code>8000/3 Tumeur maligne non classée</code> |
161
+ * Loss: [<code>MultipleNegativesRankingLoss</code>](https://sbert.net/docs/package_reference/sentence_transformer/losses.html#multiplenegativesrankingloss) with these parameters:
162
+ ```json
163
+ {
164
+ "scale": 20.0,
165
+ "similarity_fct": "cos_sim",
166
+ "gather_across_devices": false
167
+ }
168
+ ```
169
+
170
+ ### Evaluation Dataset
171
+
172
+ #### Unnamed Dataset
173
+
174
+ * Size: 444 evaluation samples
175
+ * Columns: <code>sentence1</code> and <code>sentence2</code>
176
+ * Approximate statistics based on the first 444 samples:
177
+ | | sentence1 | sentence2 |
178
+ |:--------|:----------------------------------------------------------------------------------|:----------------------------------------------------------------------------------|
179
+ | type | string | string |
180
+ | details | <ul><li>min: 3 tokens</li><li>mean: 11.68 tokens</li><li>max: 31 tokens</li></ul> | <ul><li>min: 6 tokens</li><li>mean: 15.76 tokens</li><li>max: 34 tokens</li></ul> |
181
+ * Samples:
182
+ | sentence1 | sentence2 |
183
+ |:---------------------------------------------|:----------------------------------------------|
184
+ | <code>neuroblastome</code> | <code>9500/3 Neuroblastome, SAI</code> |
185
+ | <code>Sarcome méningothélial</code> | <code>9530/3 Sarcome méningothélial</code> |
186
+ | <code>Carcinome excréto-urinaire, SAI</code> | <code>8120/3 Carcinome urothélial, SAI</code> |
187
+ * Loss: [<code>MultipleNegativesRankingLoss</code>](https://sbert.net/docs/package_reference/sentence_transformer/losses.html#multiplenegativesrankingloss) with these parameters:
188
+ ```json
189
+ {
190
+ "scale": 20.0,
191
+ "similarity_fct": "cos_sim",
192
+ "gather_across_devices": false
193
+ }
194
+ ```
195
+
196
+ ### Training Hyperparameters
197
+ #### Non-Default Hyperparameters
198
+
199
+ - `eval_strategy`: epoch
200
+ - `per_device_train_batch_size`: 16
201
+ - `per_device_eval_batch_size`: 64
202
+ - `gradient_accumulation_steps`: 2
203
+ - `learning_rate`: 2e-05
204
+ - `weight_decay`: 0.01
205
+ - `num_train_epochs`: 30
206
+ - `lr_scheduler_type`: cosine
207
+ - `warmup_ratio`: 0.1
208
+ - `fp16`: True
209
+ - `load_best_model_at_end`: True
210
+ - `batch_sampler`: no_duplicates
211
+
212
+ #### All Hyperparameters
213
+ <details><summary>Click to expand</summary>
214
+
215
+ - `overwrite_output_dir`: False
216
+ - `do_predict`: False
217
+ - `eval_strategy`: epoch
218
+ - `prediction_loss_only`: True
219
+ - `per_device_train_batch_size`: 16
220
+ - `per_device_eval_batch_size`: 64
221
+ - `per_gpu_train_batch_size`: None
222
+ - `per_gpu_eval_batch_size`: None
223
+ - `gradient_accumulation_steps`: 2
224
+ - `eval_accumulation_steps`: None
225
+ - `torch_empty_cache_steps`: None
226
+ - `learning_rate`: 2e-05
227
+ - `weight_decay`: 0.01
228
+ - `adam_beta1`: 0.9
229
+ - `adam_beta2`: 0.999
230
+ - `adam_epsilon`: 1e-08
231
+ - `max_grad_norm`: 1.0
232
+ - `num_train_epochs`: 30
233
+ - `max_steps`: -1
234
+ - `lr_scheduler_type`: cosine
235
+ - `lr_scheduler_kwargs`: {}
236
+ - `warmup_ratio`: 0.1
237
+ - `warmup_steps`: 0
238
+ - `log_level`: passive
239
+ - `log_level_replica`: warning
240
+ - `log_on_each_node`: True
241
+ - `logging_nan_inf_filter`: True
242
+ - `save_safetensors`: True
243
+ - `save_on_each_node`: False
244
+ - `save_only_model`: False
245
+ - `restore_callback_states_from_checkpoint`: False
246
+ - `no_cuda`: False
247
+ - `use_cpu`: False
248
+ - `use_mps_device`: False
249
+ - `seed`: 42
250
+ - `data_seed`: None
251
+ - `jit_mode_eval`: False
252
+ - `bf16`: False
253
+ - `fp16`: True
254
+ - `fp16_opt_level`: O1
255
+ - `half_precision_backend`: auto
256
+ - `bf16_full_eval`: False
257
+ - `fp16_full_eval`: False
258
+ - `tf32`: None
259
+ - `local_rank`: 0
260
+ - `ddp_backend`: None
261
+ - `tpu_num_cores`: None
262
+ - `tpu_metrics_debug`: False
263
+ - `debug`: []
264
+ - `dataloader_drop_last`: False
265
+ - `dataloader_num_workers`: 0
266
+ - `dataloader_prefetch_factor`: None
267
+ - `past_index`: -1
268
+ - `disable_tqdm`: False
269
+ - `remove_unused_columns`: True
270
+ - `label_names`: None
271
+ - `load_best_model_at_end`: True
272
+ - `ignore_data_skip`: False
273
+ - `fsdp`: []
274
+ - `fsdp_min_num_params`: 0
275
+ - `fsdp_config`: {'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False}
276
+ - `fsdp_transformer_layer_cls_to_wrap`: None
277
+ - `accelerator_config`: {'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None}
278
+ - `parallelism_config`: None
279
+ - `deepspeed`: None
280
+ - `label_smoothing_factor`: 0.0
281
+ - `optim`: adamw_torch_fused
282
+ - `optim_args`: None
283
+ - `adafactor`: False
284
+ - `group_by_length`: False
285
+ - `length_column_name`: length
286
+ - `project`: huggingface
287
+ - `trackio_space_id`: trackio
288
+ - `ddp_find_unused_parameters`: None
289
+ - `ddp_bucket_cap_mb`: None
290
+ - `ddp_broadcast_buffers`: False
291
+ - `dataloader_pin_memory`: True
292
+ - `dataloader_persistent_workers`: False
293
+ - `skip_memory_metrics`: True
294
+ - `use_legacy_prediction_loop`: False
295
+ - `push_to_hub`: False
296
+ - `resume_from_checkpoint`: None
297
+ - `hub_model_id`: None
298
+ - `hub_strategy`: every_save
299
+ - `hub_private_repo`: None
300
+ - `hub_always_push`: False
301
+ - `hub_revision`: None
302
+ - `gradient_checkpointing`: False
303
+ - `gradient_checkpointing_kwargs`: None
304
+ - `include_inputs_for_metrics`: False
305
+ - `include_for_metrics`: []
306
+ - `eval_do_concat_batches`: True
307
+ - `fp16_backend`: auto
308
+ - `push_to_hub_model_id`: None
309
+ - `push_to_hub_organization`: None
310
+ - `mp_parameters`:
311
+ - `auto_find_batch_size`: False
312
+ - `full_determinism`: False
313
+ - `torchdynamo`: None
314
+ - `ray_scope`: last
315
+ - `ddp_timeout`: 1800
316
+ - `torch_compile`: False
317
+ - `torch_compile_backend`: None
318
+ - `torch_compile_mode`: None
319
+ - `include_tokens_per_second`: False
320
+ - `include_num_input_tokens_seen`: no
321
+ - `neftune_noise_alpha`: None
322
+ - `optim_target_modules`: None
323
+ - `batch_eval_metrics`: False
324
+ - `eval_on_start`: False
325
+ - `use_liger_kernel`: False
326
+ - `liger_kernel_config`: None
327
+ - `eval_use_gather_object`: False
328
+ - `average_tokens_across_devices`: True
329
+ - `prompts`: None
330
+ - `batch_sampler`: no_duplicates
331
+ - `multi_dataset_batch_sampler`: proportional
332
+ - `router_mapping`: {}
333
+ - `learning_rate_mapping`: {}
334
+
335
+ </details>
336
+
337
+ ### Training Logs
338
+ | Epoch | Step | Training Loss | Validation Loss |
339
+ |:-------:|:----:|:-------------:|:---------------:|
340
+ | 0.9434 | 50 | 3.4933 | - |
341
+ | 1.0 | 53 | - | 2.1337 |
342
+ | 1.8868 | 100 | 2.2572 | - |
343
+ | 2.0 | 106 | - | 1.7004 |
344
+ | 2.8302 | 150 | 1.6176 | - |
345
+ | 3.0 | 159 | - | 1.4333 |
346
+ | 3.7736 | 200 | 1.2587 | - |
347
+ | 4.0 | 212 | - | 1.2576 |
348
+ | 4.7170 | 250 | 1.0359 | - |
349
+ | 5.0 | 265 | - | 1.1235 |
350
+ | 5.6604 | 300 | 0.9043 | - |
351
+ | 6.0 | 318 | - | 1.0555 |
352
+ | 6.6038 | 350 | 0.8331 | - |
353
+ | 7.0 | 371 | - | 0.9720 |
354
+ | 7.5472 | 400 | 0.7715 | - |
355
+ | 8.0 | 424 | - | 0.9189 |
356
+ | 8.4906 | 450 | 0.7566 | - |
357
+ | 9.0 | 477 | - | 0.8910 |
358
+ | 9.4340 | 500 | 0.7205 | - |
359
+ | 10.0 | 530 | - | 0.8724 |
360
+ | 10.3774 | 550 | 0.7056 | - |
361
+ | 11.0 | 583 | - | 0.8717 |
362
+ | 11.3208 | 600 | 0.6927 | - |
363
+ | 12.0 | 636 | - | 0.8567 |
364
+ | 12.2642 | 650 | 0.6798 | - |
365
+ | 13.0 | 689 | - | 0.8519 |
366
+ | 13.2075 | 700 | 0.6689 | - |
367
+ | 14.0 | 742 | - | 0.8806 |
368
+ | 14.1509 | 750 | 0.6643 | - |
369
+ | 15.0 | 795 | - | 0.8405 |
370
+
371
+
372
+ ### Framework Versions
373
+ - Python: 3.11.14
374
+ - Sentence Transformers: 5.1.2
375
+ - Transformers: 4.57.3
376
+ - PyTorch: 2.9.1+cu128
377
+ - Accelerate: 1.12.0
378
+ - Datasets: 4.4.1
379
+ - Tokenizers: 0.22.1
380
+
381
+ ## Citation
382
+
383
+ ### BibTeX
384
+
385
+ #### Sentence Transformers
386
+ ```bibtex
387
+ @inproceedings{reimers-2019-sentence-bert,
388
+ title = "Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks",
389
+ author = "Reimers, Nils and Gurevych, Iryna",
390
+ booktitle = "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing",
391
+ month = "11",
392
+ year = "2019",
393
+ publisher = "Association for Computational Linguistics",
394
+ url = "https://arxiv.org/abs/1908.10084",
395
+ }
396
+ ```
397
+
398
+ #### MultipleNegativesRankingLoss
399
+ ```bibtex
400
+ @misc{henderson2017efficient,
401
+ title={Efficient Natural Language Response Suggestion for Smart Reply},
402
+ author={Matthew Henderson and Rami Al-Rfou and Brian Strope and Yun-hsuan Sung and Laszlo Lukacs and Ruiqi Guo and Sanjiv Kumar and Balint Miklos and Ray Kurzweil},
403
+ year={2017},
404
+ eprint={1705.00652},
405
+ archivePrefix={arXiv},
406
+ primaryClass={cs.CL}
407
+ }
408
+ ```
409
+
410
+ <!--
411
+ ## Glossary
412
+
413
+ *Clearly define terms in order to be accessible across audiences.*
414
+ -->
415
+
416
+ <!--
417
+ ## Model Card Authors
418
+
419
+ *Lists the people who create the model card, providing recognition and accountability for the detailed work that goes into its construction.*
420
+ -->
421
+
422
+ <!--
423
+ ## Model Card Contact
424
+
425
+ *Provides a way for people who have updates to the Model Card, suggestions, or questions, to contact the Model Card authors.*
426
+ -->
checkpoint-795/config.json ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "CamembertModel"
4
+ ],
5
+ "attention_probs_dropout_prob": 0.1,
6
+ "bos_token_id": 5,
7
+ "classifier_dropout": null,
8
+ "dtype": "float32",
9
+ "eos_token_id": 6,
10
+ "hidden_act": "gelu",
11
+ "hidden_dropout_prob": 0.1,
12
+ "hidden_size": 768,
13
+ "initializer_range": 0.02,
14
+ "intermediate_size": 3072,
15
+ "layer_norm_eps": 1e-05,
16
+ "max_position_embeddings": 514,
17
+ "model_type": "camembert",
18
+ "num_attention_heads": 12,
19
+ "num_hidden_layers": 12,
20
+ "output_past": true,
21
+ "pad_token_id": 1,
22
+ "position_embedding_type": "absolute",
23
+ "transformers_version": "4.57.3",
24
+ "type_vocab_size": 1,
25
+ "use_cache": true,
26
+ "vocab_size": 32005
27
+ }
checkpoint-795/config_sentence_transformers.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model_type": "SentenceTransformer",
3
+ "__version__": {
4
+ "sentence_transformers": "5.1.2",
5
+ "transformers": "4.57.3",
6
+ "pytorch": "2.9.1+cu128"
7
+ },
8
+ "prompts": {
9
+ "query": "",
10
+ "document": ""
11
+ },
12
+ "default_prompt_name": null,
13
+ "similarity_fn_name": "cosine"
14
+ }
checkpoint-795/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:98b23d7f5cfe98ec7606b8ef7c90800ebe3429e84db5bfeb4adf06ae0ad0f0ba
3
+ size 442510176
checkpoint-795/modules.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "idx": 0,
4
+ "name": "0",
5
+ "path": "",
6
+ "type": "sentence_transformers.models.Transformer"
7
+ },
8
+ {
9
+ "idx": 1,
10
+ "name": "1",
11
+ "path": "1_Pooling",
12
+ "type": "sentence_transformers.models.Pooling"
13
+ }
14
+ ]
checkpoint-795/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9c3ff888727654e6e57eba28dbd352a3182ba1f5f5d9cd90e74f2333c15e0060
3
+ size 885145355
checkpoint-795/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a1f4033ed0ffbba03e3a069a638ee37d41297f6048559acd2134daa05bf30444
3
+ size 14645
checkpoint-795/scaler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:959e307a51bf71d6d1815861c707589765ad81856b984a356b6bcefb6add6344
3
+ size 1383
checkpoint-795/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b71b34da46ae504db4a348139d258ffcc5ad5e02b53b85e715e0a942d0865358
3
+ size 1465
checkpoint-795/sentence_bert_config.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "max_seq_length": 512,
3
+ "do_lower_case": false
4
+ }
checkpoint-795/special_tokens_map.json ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<s>NOTUSED",
4
+ "</s>NOTUSED",
5
+ "<unk>NOTUSED"
6
+ ],
7
+ "bos_token": {
8
+ "content": "<s>",
9
+ "lstrip": false,
10
+ "normalized": false,
11
+ "rstrip": false,
12
+ "single_word": false
13
+ },
14
+ "cls_token": {
15
+ "content": "<s>",
16
+ "lstrip": false,
17
+ "normalized": false,
18
+ "rstrip": false,
19
+ "single_word": false
20
+ },
21
+ "eos_token": {
22
+ "content": "</s>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false
27
+ },
28
+ "mask_token": {
29
+ "content": "<mask>",
30
+ "lstrip": true,
31
+ "normalized": false,
32
+ "rstrip": false,
33
+ "single_word": false
34
+ },
35
+ "pad_token": {
36
+ "content": "<pad>",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false
41
+ },
42
+ "sep_token": {
43
+ "content": "</s>",
44
+ "lstrip": false,
45
+ "normalized": false,
46
+ "rstrip": false,
47
+ "single_word": false
48
+ },
49
+ "unk_token": {
50
+ "content": "<unk>",
51
+ "lstrip": false,
52
+ "normalized": false,
53
+ "rstrip": false,
54
+ "single_word": false
55
+ }
56
+ }
checkpoint-795/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-795/tokenizer_config.json ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "<s>NOTUSED",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "1": {
12
+ "content": "<pad>",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "2": {
20
+ "content": "</s>NOTUSED",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "3": {
28
+ "content": "<unk>",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "4": {
36
+ "content": "<unk>NOTUSED",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ },
43
+ "5": {
44
+ "content": "<s>",
45
+ "lstrip": false,
46
+ "normalized": false,
47
+ "rstrip": false,
48
+ "single_word": false,
49
+ "special": true
50
+ },
51
+ "6": {
52
+ "content": "</s>",
53
+ "lstrip": false,
54
+ "normalized": false,
55
+ "rstrip": false,
56
+ "single_word": false,
57
+ "special": true
58
+ },
59
+ "32004": {
60
+ "content": "<mask>",
61
+ "lstrip": true,
62
+ "normalized": false,
63
+ "rstrip": false,
64
+ "single_word": false,
65
+ "special": true
66
+ }
67
+ },
68
+ "additional_special_tokens": [
69
+ "<s>NOTUSED",
70
+ "</s>NOTUSED",
71
+ "<unk>NOTUSED"
72
+ ],
73
+ "bos_token": "<s>",
74
+ "clean_up_tokenization_spaces": true,
75
+ "cls_token": "<s>",
76
+ "eos_token": "</s>",
77
+ "extra_special_tokens": {},
78
+ "mask_token": "<mask>",
79
+ "model_max_length": 512,
80
+ "pad_token": "<pad>",
81
+ "sep_token": "</s>",
82
+ "tokenizer_class": "CamembertTokenizer",
83
+ "unk_token": "<unk>"
84
+ }
checkpoint-795/trainer_state.json ADDED
@@ -0,0 +1,268 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_global_step": 795,
3
+ "best_metric": 0.8405108451843262,
4
+ "best_model_checkpoint": "../models/camembert-bio-morpho-bi-encoder/checkpoint-795",
5
+ "epoch": 15.0,
6
+ "eval_steps": 500,
7
+ "global_step": 795,
8
+ "is_hyper_param_search": false,
9
+ "is_local_process_zero": true,
10
+ "is_world_process_zero": true,
11
+ "log_history": [
12
+ {
13
+ "epoch": 0.9433962264150944,
14
+ "grad_norm": 7.664644241333008,
15
+ "learning_rate": 6.163522012578617e-06,
16
+ "loss": 3.4933,
17
+ "step": 50
18
+ },
19
+ {
20
+ "epoch": 1.0,
21
+ "eval_loss": 2.1337265968322754,
22
+ "eval_runtime": 3.1424,
23
+ "eval_samples_per_second": 141.294,
24
+ "eval_steps_per_second": 0.636,
25
+ "step": 53
26
+ },
27
+ {
28
+ "epoch": 1.8867924528301887,
29
+ "grad_norm": 7.603148937225342,
30
+ "learning_rate": 1.2452830188679246e-05,
31
+ "loss": 2.2572,
32
+ "step": 100
33
+ },
34
+ {
35
+ "epoch": 2.0,
36
+ "eval_loss": 1.7004107236862183,
37
+ "eval_runtime": 2.0559,
38
+ "eval_samples_per_second": 215.959,
39
+ "eval_steps_per_second": 0.973,
40
+ "step": 106
41
+ },
42
+ {
43
+ "epoch": 2.830188679245283,
44
+ "grad_norm": 12.284380912780762,
45
+ "learning_rate": 1.8742138364779877e-05,
46
+ "loss": 1.6176,
47
+ "step": 150
48
+ },
49
+ {
50
+ "epoch": 3.0,
51
+ "eval_loss": 1.4332726001739502,
52
+ "eval_runtime": 2.2469,
53
+ "eval_samples_per_second": 197.609,
54
+ "eval_steps_per_second": 0.89,
55
+ "step": 159
56
+ },
57
+ {
58
+ "epoch": 3.7735849056603774,
59
+ "grad_norm": 5.0590691566467285,
60
+ "learning_rate": 1.996146712998892e-05,
61
+ "loss": 1.2587,
62
+ "step": 200
63
+ },
64
+ {
65
+ "epoch": 4.0,
66
+ "eval_loss": 1.2575932741165161,
67
+ "eval_runtime": 1.7396,
68
+ "eval_samples_per_second": 255.237,
69
+ "eval_steps_per_second": 1.15,
70
+ "step": 212
71
+ },
72
+ {
73
+ "epoch": 4.716981132075472,
74
+ "grad_norm": 14.913520812988281,
75
+ "learning_rate": 1.9805436150436352e-05,
76
+ "loss": 1.0359,
77
+ "step": 250
78
+ },
79
+ {
80
+ "epoch": 5.0,
81
+ "eval_loss": 1.1234651803970337,
82
+ "eval_runtime": 1.8234,
83
+ "eval_samples_per_second": 243.507,
84
+ "eval_steps_per_second": 1.097,
85
+ "step": 265
86
+ },
87
+ {
88
+ "epoch": 5.660377358490566,
89
+ "grad_norm": 4.878383636474609,
90
+ "learning_rate": 1.9531375476817667e-05,
91
+ "loss": 0.9043,
92
+ "step": 300
93
+ },
94
+ {
95
+ "epoch": 6.0,
96
+ "eval_loss": 1.055496096611023,
97
+ "eval_runtime": 1.7553,
98
+ "eval_samples_per_second": 252.945,
99
+ "eval_steps_per_second": 1.139,
100
+ "step": 318
101
+ },
102
+ {
103
+ "epoch": 6.60377358490566,
104
+ "grad_norm": 10.084212303161621,
105
+ "learning_rate": 1.9142584023833506e-05,
106
+ "loss": 0.8331,
107
+ "step": 350
108
+ },
109
+ {
110
+ "epoch": 7.0,
111
+ "eval_loss": 0.9720313549041748,
112
+ "eval_runtime": 1.979,
113
+ "eval_samples_per_second": 224.355,
114
+ "eval_steps_per_second": 1.011,
115
+ "step": 371
116
+ },
117
+ {
118
+ "epoch": 7.547169811320755,
119
+ "grad_norm": 3.5150604248046875,
120
+ "learning_rate": 1.8643741739988672e-05,
121
+ "loss": 0.7715,
122
+ "step": 400
123
+ },
124
+ {
125
+ "epoch": 8.0,
126
+ "eval_loss": 0.9189229607582092,
127
+ "eval_runtime": 1.7408,
128
+ "eval_samples_per_second": 255.054,
129
+ "eval_steps_per_second": 1.149,
130
+ "step": 424
131
+ },
132
+ {
133
+ "epoch": 8.49056603773585,
134
+ "grad_norm": 3.50665020942688,
135
+ "learning_rate": 1.8040853274260137e-05,
136
+ "loss": 0.7566,
137
+ "step": 450
138
+ },
139
+ {
140
+ "epoch": 9.0,
141
+ "eval_loss": 0.8909644484519958,
142
+ "eval_runtime": 1.9496,
143
+ "eval_samples_per_second": 227.734,
144
+ "eval_steps_per_second": 1.026,
145
+ "step": 477
146
+ },
147
+ {
148
+ "epoch": 9.433962264150944,
149
+ "grad_norm": 3.0296542644500732,
150
+ "learning_rate": 1.7341175697121273e-05,
151
+ "loss": 0.7205,
152
+ "step": 500
153
+ },
154
+ {
155
+ "epoch": 10.0,
156
+ "eval_loss": 0.8723557591438293,
157
+ "eval_runtime": 1.7422,
158
+ "eval_samples_per_second": 254.844,
159
+ "eval_steps_per_second": 1.148,
160
+ "step": 530
161
+ },
162
+ {
163
+ "epoch": 10.377358490566039,
164
+ "grad_norm": 2.8000473976135254,
165
+ "learning_rate": 1.655313114595666e-05,
166
+ "loss": 0.7056,
167
+ "step": 550
168
+ },
169
+ {
170
+ "epoch": 11.0,
171
+ "eval_loss": 0.8716644644737244,
172
+ "eval_runtime": 1.7145,
173
+ "eval_samples_per_second": 258.973,
174
+ "eval_steps_per_second": 1.167,
175
+ "step": 583
176
+ },
177
+ {
178
+ "epoch": 11.320754716981131,
179
+ "grad_norm": 3.1076793670654297,
180
+ "learning_rate": 1.5686205446369293e-05,
181
+ "loss": 0.6927,
182
+ "step": 600
183
+ },
184
+ {
185
+ "epoch": 12.0,
186
+ "eval_loss": 0.8567394614219666,
187
+ "eval_runtime": 1.7089,
188
+ "eval_samples_per_second": 259.818,
189
+ "eval_steps_per_second": 1.17,
190
+ "step": 636
191
+ },
192
+ {
193
+ "epoch": 12.264150943396226,
194
+ "grad_norm": 3.407707929611206,
195
+ "learning_rate": 1.4750833929692785e-05,
196
+ "loss": 0.6798,
197
+ "step": 650
198
+ },
199
+ {
200
+ "epoch": 13.0,
201
+ "eval_loss": 0.8519091606140137,
202
+ "eval_runtime": 1.7527,
203
+ "eval_samples_per_second": 253.324,
204
+ "eval_steps_per_second": 1.141,
205
+ "step": 689
206
+ },
207
+ {
208
+ "epoch": 13.20754716981132,
209
+ "grad_norm": 2.503713607788086,
210
+ "learning_rate": 1.3758275821142382e-05,
211
+ "loss": 0.6689,
212
+ "step": 700
213
+ },
214
+ {
215
+ "epoch": 14.0,
216
+ "eval_loss": 0.8806237578392029,
217
+ "eval_runtime": 1.7483,
218
+ "eval_samples_per_second": 253.954,
219
+ "eval_steps_per_second": 1.144,
220
+ "step": 742
221
+ },
222
+ {
223
+ "epoch": 14.150943396226415,
224
+ "grad_norm": 2.356168270111084,
225
+ "learning_rate": 1.2720478710615944e-05,
226
+ "loss": 0.6643,
227
+ "step": 750
228
+ },
229
+ {
230
+ "epoch": 15.0,
231
+ "eval_loss": 0.8405108451843262,
232
+ "eval_runtime": 1.727,
233
+ "eval_samples_per_second": 257.097,
234
+ "eval_steps_per_second": 1.158,
235
+ "step": 795
236
+ }
237
+ ],
238
+ "logging_steps": 50,
239
+ "max_steps": 1590,
240
+ "num_input_tokens_seen": 0,
241
+ "num_train_epochs": 30,
242
+ "save_steps": 500,
243
+ "stateful_callbacks": {
244
+ "EarlyStoppingCallback": {
245
+ "args": {
246
+ "early_stopping_patience": 2,
247
+ "early_stopping_threshold": 0.0
248
+ },
249
+ "attributes": {
250
+ "early_stopping_patience_counter": 0
251
+ }
252
+ },
253
+ "TrainerControl": {
254
+ "args": {
255
+ "should_epoch_stop": false,
256
+ "should_evaluate": false,
257
+ "should_log": false,
258
+ "should_save": true,
259
+ "should_training_stop": false
260
+ },
261
+ "attributes": {}
262
+ }
263
+ },
264
+ "total_flos": 0.0,
265
+ "train_batch_size": 80,
266
+ "trial_name": null,
267
+ "trial_params": null
268
+ }
checkpoint-795/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0d06e647210f13dbee2e3933b7061a89b32e1c16df743074b449e7c097a0ff51
3
+ size 6225
checkpoint-848/1_Pooling/config.json ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "word_embedding_dimension": 768,
3
+ "pooling_mode_cls_token": false,
4
+ "pooling_mode_mean_tokens": true,
5
+ "pooling_mode_max_tokens": false,
6
+ "pooling_mode_mean_sqrt_len_tokens": false,
7
+ "pooling_mode_weightedmean_tokens": false,
8
+ "pooling_mode_lasttoken": false,
9
+ "include_prompt": true
10
+ }
checkpoint-848/README.md ADDED
@@ -0,0 +1,428 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ tags:
3
+ - sentence-transformers
4
+ - sentence-similarity
5
+ - feature-extraction
6
+ - dense
7
+ - generated_from_trainer
8
+ - dataset_size:8434
9
+ - loss:MultipleNegativesRankingLoss
10
+ base_model: almanach/camembert-bio-base
11
+ widget:
12
+ - source_sentence: tumeur maligne
13
+ sentences:
14
+ - 8082/3 Carcinome lymphoépithélial
15
+ - 8000/3 Tumeur maligne, SAI
16
+ - 8000/3 Cancer
17
+ - source_sentence: hépatocarcinome oncocytaire fibrolamellaire (fh
18
+ sentences:
19
+ - 8171/3 Carcinome hépatocellulaire fibrolamellaire (C22.0)
20
+ - 8560/3 Adénocarcinome et carcinome à cellules épidermoïdes
21
+ - 8052/3 Carcinome épidermoïde papillaire
22
+ - source_sentence: implant tumoral
23
+ sentences:
24
+ - 8000/6 Néoplasme métastatique
25
+ - 8480/3 Carcinome muqueux
26
+ - 8140/3 Adénocarcinome, SAI
27
+ - source_sentence: Anémie réfractaire sidéroblastique
28
+ sentences:
29
+ - 9982/3 RARS
30
+ - 9591/3 LMNH, SAI
31
+ - 8041/3 Carcinome à cellules de réserve
32
+ - source_sentence: carcinome
33
+ sentences:
34
+ - 9800/3 Leucémie subaiguë, SAI [obs]
35
+ - 8000/6 Métastase, SAI
36
+ - 8075/3 Carcinome malpighien pseudoglandulaire
37
+ pipeline_tag: sentence-similarity
38
+ library_name: sentence-transformers
39
+ ---
40
+
41
+ # SentenceTransformer based on almanach/camembert-bio-base
42
+
43
+ This is a [sentence-transformers](https://www.SBERT.net) model finetuned from [almanach/camembert-bio-base](https://huggingface.co/almanach/camembert-bio-base). It maps sentences & paragraphs to a 768-dimensional dense vector space and can be used for semantic textual similarity, semantic search, paraphrase mining, text classification, clustering, and more.
44
+
45
+ ## Model Details
46
+
47
+ ### Model Description
48
+ - **Model Type:** Sentence Transformer
49
+ - **Base model:** [almanach/camembert-bio-base](https://huggingface.co/almanach/camembert-bio-base) <!-- at revision 5fbdc71cc8b8b59f9e40229ff1d5baa1808ffde6 -->
50
+ - **Maximum Sequence Length:** 512 tokens
51
+ - **Output Dimensionality:** 768 dimensions
52
+ - **Similarity Function:** Cosine Similarity
53
+ <!-- - **Training Dataset:** Unknown -->
54
+ <!-- - **Language:** Unknown -->
55
+ <!-- - **License:** Unknown -->
56
+
57
+ ### Model Sources
58
+
59
+ - **Documentation:** [Sentence Transformers Documentation](https://sbert.net)
60
+ - **Repository:** [Sentence Transformers on GitHub](https://github.com/huggingface/sentence-transformers)
61
+ - **Hugging Face:** [Sentence Transformers on Hugging Face](https://huggingface.co/models?library=sentence-transformers)
62
+
63
+ ### Full Model Architecture
64
+
65
+ ```
66
+ SentenceTransformer(
67
+ (0): Transformer({'max_seq_length': 512, 'do_lower_case': False, 'architecture': 'CamembertModel'})
68
+ (1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False, 'pooling_mode_weightedmean_tokens': False, 'pooling_mode_lasttoken': False, 'include_prompt': True})
69
+ )
70
+ ```
71
+
72
+ ## Usage
73
+
74
+ ### Direct Usage (Sentence Transformers)
75
+
76
+ First install the Sentence Transformers library:
77
+
78
+ ```bash
79
+ pip install -U sentence-transformers
80
+ ```
81
+
82
+ Then you can load this model and run inference.
83
+ ```python
84
+ from sentence_transformers import SentenceTransformer
85
+
86
+ # Download from the 🤗 Hub
87
+ model = SentenceTransformer("sentence_transformers_model_id")
88
+ # Run inference
89
+ sentences = [
90
+ 'carcinome',
91
+ '8000/6 Métastase, SAI',
92
+ '9800/3 Leucémie subaiguë, SAI [obs]',
93
+ ]
94
+ embeddings = model.encode(sentences)
95
+ print(embeddings.shape)
96
+ # [3, 768]
97
+
98
+ # Get the similarity scores for the embeddings
99
+ similarities = model.similarity(embeddings, embeddings)
100
+ print(similarities)
101
+ # tensor([[1.0000, 0.5991, 0.0038],
102
+ # [0.5991, 1.0000, 0.1220],
103
+ # [0.0038, 0.1220, 1.0000]])
104
+ ```
105
+
106
+ <!--
107
+ ### Direct Usage (Transformers)
108
+
109
+ <details><summary>Click to see the direct usage in Transformers</summary>
110
+
111
+ </details>
112
+ -->
113
+
114
+ <!--
115
+ ### Downstream Usage (Sentence Transformers)
116
+
117
+ You can finetune this model on your own dataset.
118
+
119
+ <details><summary>Click to expand</summary>
120
+
121
+ </details>
122
+ -->
123
+
124
+ <!--
125
+ ### Out-of-Scope Use
126
+
127
+ *List how the model may foreseeably be misused and address what users ought not to do with the model.*
128
+ -->
129
+
130
+ <!--
131
+ ## Bias, Risks and Limitations
132
+
133
+ *What are the known or foreseeable issues stemming from this model? You could also flag here known failure cases or weaknesses of the model.*
134
+ -->
135
+
136
+ <!--
137
+ ### Recommendations
138
+
139
+ *What are recommendations with respect to the foreseeable issues? For example, filtering explicit content.*
140
+ -->
141
+
142
+ ## Training Details
143
+
144
+ ### Training Dataset
145
+
146
+ #### Unnamed Dataset
147
+
148
+ * Size: 8,434 training samples
149
+ * Columns: <code>sentence1</code> and <code>sentence2</code>
150
+ * Approximate statistics based on the first 1000 samples:
151
+ | | sentence1 | sentence2 |
152
+ |:--------|:----------------------------------------------------------------------------------|:----------------------------------------------------------------------------------|
153
+ | type | string | string |
154
+ | details | <ul><li>min: 3 tokens</li><li>mean: 11.31 tokens</li><li>max: 43 tokens</li></ul> | <ul><li>min: 6 tokens</li><li>mean: 15.69 tokens</li><li>max: 34 tokens</li></ul> |
155
+ * Samples:
156
+ | sentence1 | sentence2 |
157
+ |:------------------------------------------------|:--------------------------------------------------------------------|
158
+ | <code>tumeur à petites cellules rondes</code> | <code>8806/3 Tumeur desmoplastique à petites cellules rondes</code> |
159
+ | <code>dissémination oligométastatique</code> | <code>8000/6 Néoplasme métastatique</code> |
160
+ | <code>processus néoprolifératif primaire</code> | <code>8000/3 Tumeur maligne non classée</code> |
161
+ * Loss: [<code>MultipleNegativesRankingLoss</code>](https://sbert.net/docs/package_reference/sentence_transformer/losses.html#multiplenegativesrankingloss) with these parameters:
162
+ ```json
163
+ {
164
+ "scale": 20.0,
165
+ "similarity_fct": "cos_sim",
166
+ "gather_across_devices": false
167
+ }
168
+ ```
169
+
170
+ ### Evaluation Dataset
171
+
172
+ #### Unnamed Dataset
173
+
174
+ * Size: 444 evaluation samples
175
+ * Columns: <code>sentence1</code> and <code>sentence2</code>
176
+ * Approximate statistics based on the first 444 samples:
177
+ | | sentence1 | sentence2 |
178
+ |:--------|:----------------------------------------------------------------------------------|:----------------------------------------------------------------------------------|
179
+ | type | string | string |
180
+ | details | <ul><li>min: 3 tokens</li><li>mean: 11.68 tokens</li><li>max: 31 tokens</li></ul> | <ul><li>min: 6 tokens</li><li>mean: 15.76 tokens</li><li>max: 34 tokens</li></ul> |
181
+ * Samples:
182
+ | sentence1 | sentence2 |
183
+ |:---------------------------------------------|:----------------------------------------------|
184
+ | <code>neuroblastome</code> | <code>9500/3 Neuroblastome, SAI</code> |
185
+ | <code>Sarcome méningothélial</code> | <code>9530/3 Sarcome méningothélial</code> |
186
+ | <code>Carcinome excréto-urinaire, SAI</code> | <code>8120/3 Carcinome urothélial, SAI</code> |
187
+ * Loss: [<code>MultipleNegativesRankingLoss</code>](https://sbert.net/docs/package_reference/sentence_transformer/losses.html#multiplenegativesrankingloss) with these parameters:
188
+ ```json
189
+ {
190
+ "scale": 20.0,
191
+ "similarity_fct": "cos_sim",
192
+ "gather_across_devices": false
193
+ }
194
+ ```
195
+
196
+ ### Training Hyperparameters
197
+ #### Non-Default Hyperparameters
198
+
199
+ - `eval_strategy`: epoch
200
+ - `per_device_train_batch_size`: 16
201
+ - `per_device_eval_batch_size`: 64
202
+ - `gradient_accumulation_steps`: 2
203
+ - `learning_rate`: 2e-05
204
+ - `weight_decay`: 0.01
205
+ - `num_train_epochs`: 30
206
+ - `lr_scheduler_type`: cosine
207
+ - `warmup_ratio`: 0.1
208
+ - `fp16`: True
209
+ - `load_best_model_at_end`: True
210
+ - `batch_sampler`: no_duplicates
211
+
212
+ #### All Hyperparameters
213
+ <details><summary>Click to expand</summary>
214
+
215
+ - `overwrite_output_dir`: False
216
+ - `do_predict`: False
217
+ - `eval_strategy`: epoch
218
+ - `prediction_loss_only`: True
219
+ - `per_device_train_batch_size`: 16
220
+ - `per_device_eval_batch_size`: 64
221
+ - `per_gpu_train_batch_size`: None
222
+ - `per_gpu_eval_batch_size`: None
223
+ - `gradient_accumulation_steps`: 2
224
+ - `eval_accumulation_steps`: None
225
+ - `torch_empty_cache_steps`: None
226
+ - `learning_rate`: 2e-05
227
+ - `weight_decay`: 0.01
228
+ - `adam_beta1`: 0.9
229
+ - `adam_beta2`: 0.999
230
+ - `adam_epsilon`: 1e-08
231
+ - `max_grad_norm`: 1.0
232
+ - `num_train_epochs`: 30
233
+ - `max_steps`: -1
234
+ - `lr_scheduler_type`: cosine
235
+ - `lr_scheduler_kwargs`: {}
236
+ - `warmup_ratio`: 0.1
237
+ - `warmup_steps`: 0
238
+ - `log_level`: passive
239
+ - `log_level_replica`: warning
240
+ - `log_on_each_node`: True
241
+ - `logging_nan_inf_filter`: True
242
+ - `save_safetensors`: True
243
+ - `save_on_each_node`: False
244
+ - `save_only_model`: False
245
+ - `restore_callback_states_from_checkpoint`: False
246
+ - `no_cuda`: False
247
+ - `use_cpu`: False
248
+ - `use_mps_device`: False
249
+ - `seed`: 42
250
+ - `data_seed`: None
251
+ - `jit_mode_eval`: False
252
+ - `bf16`: False
253
+ - `fp16`: True
254
+ - `fp16_opt_level`: O1
255
+ - `half_precision_backend`: auto
256
+ - `bf16_full_eval`: False
257
+ - `fp16_full_eval`: False
258
+ - `tf32`: None
259
+ - `local_rank`: 0
260
+ - `ddp_backend`: None
261
+ - `tpu_num_cores`: None
262
+ - `tpu_metrics_debug`: False
263
+ - `debug`: []
264
+ - `dataloader_drop_last`: False
265
+ - `dataloader_num_workers`: 0
266
+ - `dataloader_prefetch_factor`: None
267
+ - `past_index`: -1
268
+ - `disable_tqdm`: False
269
+ - `remove_unused_columns`: True
270
+ - `label_names`: None
271
+ - `load_best_model_at_end`: True
272
+ - `ignore_data_skip`: False
273
+ - `fsdp`: []
274
+ - `fsdp_min_num_params`: 0
275
+ - `fsdp_config`: {'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False}
276
+ - `fsdp_transformer_layer_cls_to_wrap`: None
277
+ - `accelerator_config`: {'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None}
278
+ - `parallelism_config`: None
279
+ - `deepspeed`: None
280
+ - `label_smoothing_factor`: 0.0
281
+ - `optim`: adamw_torch_fused
282
+ - `optim_args`: None
283
+ - `adafactor`: False
284
+ - `group_by_length`: False
285
+ - `length_column_name`: length
286
+ - `project`: huggingface
287
+ - `trackio_space_id`: trackio
288
+ - `ddp_find_unused_parameters`: None
289
+ - `ddp_bucket_cap_mb`: None
290
+ - `ddp_broadcast_buffers`: False
291
+ - `dataloader_pin_memory`: True
292
+ - `dataloader_persistent_workers`: False
293
+ - `skip_memory_metrics`: True
294
+ - `use_legacy_prediction_loop`: False
295
+ - `push_to_hub`: False
296
+ - `resume_from_checkpoint`: None
297
+ - `hub_model_id`: None
298
+ - `hub_strategy`: every_save
299
+ - `hub_private_repo`: None
300
+ - `hub_always_push`: False
301
+ - `hub_revision`: None
302
+ - `gradient_checkpointing`: False
303
+ - `gradient_checkpointing_kwargs`: None
304
+ - `include_inputs_for_metrics`: False
305
+ - `include_for_metrics`: []
306
+ - `eval_do_concat_batches`: True
307
+ - `fp16_backend`: auto
308
+ - `push_to_hub_model_id`: None
309
+ - `push_to_hub_organization`: None
310
+ - `mp_parameters`:
311
+ - `auto_find_batch_size`: False
312
+ - `full_determinism`: False
313
+ - `torchdynamo`: None
314
+ - `ray_scope`: last
315
+ - `ddp_timeout`: 1800
316
+ - `torch_compile`: False
317
+ - `torch_compile_backend`: None
318
+ - `torch_compile_mode`: None
319
+ - `include_tokens_per_second`: False
320
+ - `include_num_input_tokens_seen`: no
321
+ - `neftune_noise_alpha`: None
322
+ - `optim_target_modules`: None
323
+ - `batch_eval_metrics`: False
324
+ - `eval_on_start`: False
325
+ - `use_liger_kernel`: False
326
+ - `liger_kernel_config`: None
327
+ - `eval_use_gather_object`: False
328
+ - `average_tokens_across_devices`: True
329
+ - `prompts`: None
330
+ - `batch_sampler`: no_duplicates
331
+ - `multi_dataset_batch_sampler`: proportional
332
+ - `router_mapping`: {}
333
+ - `learning_rate_mapping`: {}
334
+
335
+ </details>
336
+
337
+ ### Training Logs
338
+ | Epoch | Step | Training Loss | Validation Loss |
339
+ |:-------:|:----:|:-------------:|:---------------:|
340
+ | 0.9434 | 50 | 3.4933 | - |
341
+ | 1.0 | 53 | - | 2.1337 |
342
+ | 1.8868 | 100 | 2.2572 | - |
343
+ | 2.0 | 106 | - | 1.7004 |
344
+ | 2.8302 | 150 | 1.6176 | - |
345
+ | 3.0 | 159 | - | 1.4333 |
346
+ | 3.7736 | 200 | 1.2587 | - |
347
+ | 4.0 | 212 | - | 1.2576 |
348
+ | 4.7170 | 250 | 1.0359 | - |
349
+ | 5.0 | 265 | - | 1.1235 |
350
+ | 5.6604 | 300 | 0.9043 | - |
351
+ | 6.0 | 318 | - | 1.0555 |
352
+ | 6.6038 | 350 | 0.8331 | - |
353
+ | 7.0 | 371 | - | 0.9720 |
354
+ | 7.5472 | 400 | 0.7715 | - |
355
+ | 8.0 | 424 | - | 0.9189 |
356
+ | 8.4906 | 450 | 0.7566 | - |
357
+ | 9.0 | 477 | - | 0.8910 |
358
+ | 9.4340 | 500 | 0.7205 | - |
359
+ | 10.0 | 530 | - | 0.8724 |
360
+ | 10.3774 | 550 | 0.7056 | - |
361
+ | 11.0 | 583 | - | 0.8717 |
362
+ | 11.3208 | 600 | 0.6927 | - |
363
+ | 12.0 | 636 | - | 0.8567 |
364
+ | 12.2642 | 650 | 0.6798 | - |
365
+ | 13.0 | 689 | - | 0.8519 |
366
+ | 13.2075 | 700 | 0.6689 | - |
367
+ | 14.0 | 742 | - | 0.8806 |
368
+ | 14.1509 | 750 | 0.6643 | - |
369
+ | 15.0 | 795 | - | 0.8405 |
370
+ | 15.0943 | 800 | 0.6622 | - |
371
+ | 16.0 | 848 | - | 0.8529 |
372
+
373
+
374
+ ### Framework Versions
375
+ - Python: 3.11.14
376
+ - Sentence Transformers: 5.1.2
377
+ - Transformers: 4.57.3
378
+ - PyTorch: 2.9.1+cu128
379
+ - Accelerate: 1.12.0
380
+ - Datasets: 4.4.1
381
+ - Tokenizers: 0.22.1
382
+
383
+ ## Citation
384
+
385
+ ### BibTeX
386
+
387
+ #### Sentence Transformers
388
+ ```bibtex
389
+ @inproceedings{reimers-2019-sentence-bert,
390
+ title = "Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks",
391
+ author = "Reimers, Nils and Gurevych, Iryna",
392
+ booktitle = "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing",
393
+ month = "11",
394
+ year = "2019",
395
+ publisher = "Association for Computational Linguistics",
396
+ url = "https://arxiv.org/abs/1908.10084",
397
+ }
398
+ ```
399
+
400
+ #### MultipleNegativesRankingLoss
401
+ ```bibtex
402
+ @misc{henderson2017efficient,
403
+ title={Efficient Natural Language Response Suggestion for Smart Reply},
404
+ author={Matthew Henderson and Rami Al-Rfou and Brian Strope and Yun-hsuan Sung and Laszlo Lukacs and Ruiqi Guo and Sanjiv Kumar and Balint Miklos and Ray Kurzweil},
405
+ year={2017},
406
+ eprint={1705.00652},
407
+ archivePrefix={arXiv},
408
+ primaryClass={cs.CL}
409
+ }
410
+ ```
411
+
412
+ <!--
413
+ ## Glossary
414
+
415
+ *Clearly define terms in order to be accessible across audiences.*
416
+ -->
417
+
418
+ <!--
419
+ ## Model Card Authors
420
+
421
+ *Lists the people who create the model card, providing recognition and accountability for the detailed work that goes into its construction.*
422
+ -->
423
+
424
+ <!--
425
+ ## Model Card Contact
426
+
427
+ *Provides a way for people who have updates to the Model Card, suggestions, or questions, to contact the Model Card authors.*
428
+ -->
checkpoint-848/config.json ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "CamembertModel"
4
+ ],
5
+ "attention_probs_dropout_prob": 0.1,
6
+ "bos_token_id": 5,
7
+ "classifier_dropout": null,
8
+ "dtype": "float32",
9
+ "eos_token_id": 6,
10
+ "hidden_act": "gelu",
11
+ "hidden_dropout_prob": 0.1,
12
+ "hidden_size": 768,
13
+ "initializer_range": 0.02,
14
+ "intermediate_size": 3072,
15
+ "layer_norm_eps": 1e-05,
16
+ "max_position_embeddings": 514,
17
+ "model_type": "camembert",
18
+ "num_attention_heads": 12,
19
+ "num_hidden_layers": 12,
20
+ "output_past": true,
21
+ "pad_token_id": 1,
22
+ "position_embedding_type": "absolute",
23
+ "transformers_version": "4.57.3",
24
+ "type_vocab_size": 1,
25
+ "use_cache": true,
26
+ "vocab_size": 32005
27
+ }
checkpoint-848/config_sentence_transformers.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model_type": "SentenceTransformer",
3
+ "__version__": {
4
+ "sentence_transformers": "5.1.2",
5
+ "transformers": "4.57.3",
6
+ "pytorch": "2.9.1+cu128"
7
+ },
8
+ "prompts": {
9
+ "query": "",
10
+ "document": ""
11
+ },
12
+ "default_prompt_name": null,
13
+ "similarity_fn_name": "cosine"
14
+ }
checkpoint-848/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:94dec4d601dbb0adbd112a1f56fd1c3a9341eb1796a297751bfb130906070807
3
+ size 442510176
checkpoint-848/modules.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "idx": 0,
4
+ "name": "0",
5
+ "path": "",
6
+ "type": "sentence_transformers.models.Transformer"
7
+ },
8
+ {
9
+ "idx": 1,
10
+ "name": "1",
11
+ "path": "1_Pooling",
12
+ "type": "sentence_transformers.models.Pooling"
13
+ }
14
+ ]
checkpoint-848/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8b89036b34e88aafa36284199caed826447d58d0b75c9e1eab13350fa778e69a
3
+ size 885145355
checkpoint-848/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:05214c57f5e98136df0672a64df7ea1011b106bda85220b6079cbc4528d8bd3d
3
+ size 14645
checkpoint-848/scaler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cf2a9fe435dfbefaa92959adc3814f2239917841ebe3a9d6c745419e2a4d4b8a
3
+ size 1383
checkpoint-848/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9fee0b25cd332182f572e145678b9dba80ff084fef4453d2566ecc313783dacf
3
+ size 1465
checkpoint-848/sentence_bert_config.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "max_seq_length": 512,
3
+ "do_lower_case": false
4
+ }
checkpoint-848/special_tokens_map.json ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<s>NOTUSED",
4
+ "</s>NOTUSED",
5
+ "<unk>NOTUSED"
6
+ ],
7
+ "bos_token": {
8
+ "content": "<s>",
9
+ "lstrip": false,
10
+ "normalized": false,
11
+ "rstrip": false,
12
+ "single_word": false
13
+ },
14
+ "cls_token": {
15
+ "content": "<s>",
16
+ "lstrip": false,
17
+ "normalized": false,
18
+ "rstrip": false,
19
+ "single_word": false
20
+ },
21
+ "eos_token": {
22
+ "content": "</s>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false
27
+ },
28
+ "mask_token": {
29
+ "content": "<mask>",
30
+ "lstrip": true,
31
+ "normalized": false,
32
+ "rstrip": false,
33
+ "single_word": false
34
+ },
35
+ "pad_token": {
36
+ "content": "<pad>",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false
41
+ },
42
+ "sep_token": {
43
+ "content": "</s>",
44
+ "lstrip": false,
45
+ "normalized": false,
46
+ "rstrip": false,
47
+ "single_word": false
48
+ },
49
+ "unk_token": {
50
+ "content": "<unk>",
51
+ "lstrip": false,
52
+ "normalized": false,
53
+ "rstrip": false,
54
+ "single_word": false
55
+ }
56
+ }
checkpoint-848/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-848/tokenizer_config.json ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "<s>NOTUSED",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "1": {
12
+ "content": "<pad>",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "2": {
20
+ "content": "</s>NOTUSED",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "3": {
28
+ "content": "<unk>",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "4": {
36
+ "content": "<unk>NOTUSED",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ },
43
+ "5": {
44
+ "content": "<s>",
45
+ "lstrip": false,
46
+ "normalized": false,
47
+ "rstrip": false,
48
+ "single_word": false,
49
+ "special": true
50
+ },
51
+ "6": {
52
+ "content": "</s>",
53
+ "lstrip": false,
54
+ "normalized": false,
55
+ "rstrip": false,
56
+ "single_word": false,
57
+ "special": true
58
+ },
59
+ "32004": {
60
+ "content": "<mask>",
61
+ "lstrip": true,
62
+ "normalized": false,
63
+ "rstrip": false,
64
+ "single_word": false,
65
+ "special": true
66
+ }
67
+ },
68
+ "additional_special_tokens": [
69
+ "<s>NOTUSED",
70
+ "</s>NOTUSED",
71
+ "<unk>NOTUSED"
72
+ ],
73
+ "bos_token": "<s>",
74
+ "clean_up_tokenization_spaces": true,
75
+ "cls_token": "<s>",
76
+ "eos_token": "</s>",
77
+ "extra_special_tokens": {},
78
+ "mask_token": "<mask>",
79
+ "model_max_length": 512,
80
+ "pad_token": "<pad>",
81
+ "sep_token": "</s>",
82
+ "tokenizer_class": "CamembertTokenizer",
83
+ "unk_token": "<unk>"
84
+ }
checkpoint-848/trainer_state.json ADDED
@@ -0,0 +1,283 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_global_step": 795,
3
+ "best_metric": 0.8405108451843262,
4
+ "best_model_checkpoint": "../models/camembert-bio-morpho-bi-encoder/checkpoint-795",
5
+ "epoch": 16.0,
6
+ "eval_steps": 500,
7
+ "global_step": 848,
8
+ "is_hyper_param_search": false,
9
+ "is_local_process_zero": true,
10
+ "is_world_process_zero": true,
11
+ "log_history": [
12
+ {
13
+ "epoch": 0.9433962264150944,
14
+ "grad_norm": 7.664644241333008,
15
+ "learning_rate": 6.163522012578617e-06,
16
+ "loss": 3.4933,
17
+ "step": 50
18
+ },
19
+ {
20
+ "epoch": 1.0,
21
+ "eval_loss": 2.1337265968322754,
22
+ "eval_runtime": 3.1424,
23
+ "eval_samples_per_second": 141.294,
24
+ "eval_steps_per_second": 0.636,
25
+ "step": 53
26
+ },
27
+ {
28
+ "epoch": 1.8867924528301887,
29
+ "grad_norm": 7.603148937225342,
30
+ "learning_rate": 1.2452830188679246e-05,
31
+ "loss": 2.2572,
32
+ "step": 100
33
+ },
34
+ {
35
+ "epoch": 2.0,
36
+ "eval_loss": 1.7004107236862183,
37
+ "eval_runtime": 2.0559,
38
+ "eval_samples_per_second": 215.959,
39
+ "eval_steps_per_second": 0.973,
40
+ "step": 106
41
+ },
42
+ {
43
+ "epoch": 2.830188679245283,
44
+ "grad_norm": 12.284380912780762,
45
+ "learning_rate": 1.8742138364779877e-05,
46
+ "loss": 1.6176,
47
+ "step": 150
48
+ },
49
+ {
50
+ "epoch": 3.0,
51
+ "eval_loss": 1.4332726001739502,
52
+ "eval_runtime": 2.2469,
53
+ "eval_samples_per_second": 197.609,
54
+ "eval_steps_per_second": 0.89,
55
+ "step": 159
56
+ },
57
+ {
58
+ "epoch": 3.7735849056603774,
59
+ "grad_norm": 5.0590691566467285,
60
+ "learning_rate": 1.996146712998892e-05,
61
+ "loss": 1.2587,
62
+ "step": 200
63
+ },
64
+ {
65
+ "epoch": 4.0,
66
+ "eval_loss": 1.2575932741165161,
67
+ "eval_runtime": 1.7396,
68
+ "eval_samples_per_second": 255.237,
69
+ "eval_steps_per_second": 1.15,
70
+ "step": 212
71
+ },
72
+ {
73
+ "epoch": 4.716981132075472,
74
+ "grad_norm": 14.913520812988281,
75
+ "learning_rate": 1.9805436150436352e-05,
76
+ "loss": 1.0359,
77
+ "step": 250
78
+ },
79
+ {
80
+ "epoch": 5.0,
81
+ "eval_loss": 1.1234651803970337,
82
+ "eval_runtime": 1.8234,
83
+ "eval_samples_per_second": 243.507,
84
+ "eval_steps_per_second": 1.097,
85
+ "step": 265
86
+ },
87
+ {
88
+ "epoch": 5.660377358490566,
89
+ "grad_norm": 4.878383636474609,
90
+ "learning_rate": 1.9531375476817667e-05,
91
+ "loss": 0.9043,
92
+ "step": 300
93
+ },
94
+ {
95
+ "epoch": 6.0,
96
+ "eval_loss": 1.055496096611023,
97
+ "eval_runtime": 1.7553,
98
+ "eval_samples_per_second": 252.945,
99
+ "eval_steps_per_second": 1.139,
100
+ "step": 318
101
+ },
102
+ {
103
+ "epoch": 6.60377358490566,
104
+ "grad_norm": 10.084212303161621,
105
+ "learning_rate": 1.9142584023833506e-05,
106
+ "loss": 0.8331,
107
+ "step": 350
108
+ },
109
+ {
110
+ "epoch": 7.0,
111
+ "eval_loss": 0.9720313549041748,
112
+ "eval_runtime": 1.979,
113
+ "eval_samples_per_second": 224.355,
114
+ "eval_steps_per_second": 1.011,
115
+ "step": 371
116
+ },
117
+ {
118
+ "epoch": 7.547169811320755,
119
+ "grad_norm": 3.5150604248046875,
120
+ "learning_rate": 1.8643741739988672e-05,
121
+ "loss": 0.7715,
122
+ "step": 400
123
+ },
124
+ {
125
+ "epoch": 8.0,
126
+ "eval_loss": 0.9189229607582092,
127
+ "eval_runtime": 1.7408,
128
+ "eval_samples_per_second": 255.054,
129
+ "eval_steps_per_second": 1.149,
130
+ "step": 424
131
+ },
132
+ {
133
+ "epoch": 8.49056603773585,
134
+ "grad_norm": 3.50665020942688,
135
+ "learning_rate": 1.8040853274260137e-05,
136
+ "loss": 0.7566,
137
+ "step": 450
138
+ },
139
+ {
140
+ "epoch": 9.0,
141
+ "eval_loss": 0.8909644484519958,
142
+ "eval_runtime": 1.9496,
143
+ "eval_samples_per_second": 227.734,
144
+ "eval_steps_per_second": 1.026,
145
+ "step": 477
146
+ },
147
+ {
148
+ "epoch": 9.433962264150944,
149
+ "grad_norm": 3.0296542644500732,
150
+ "learning_rate": 1.7341175697121273e-05,
151
+ "loss": 0.7205,
152
+ "step": 500
153
+ },
154
+ {
155
+ "epoch": 10.0,
156
+ "eval_loss": 0.8723557591438293,
157
+ "eval_runtime": 1.7422,
158
+ "eval_samples_per_second": 254.844,
159
+ "eval_steps_per_second": 1.148,
160
+ "step": 530
161
+ },
162
+ {
163
+ "epoch": 10.377358490566039,
164
+ "grad_norm": 2.8000473976135254,
165
+ "learning_rate": 1.655313114595666e-05,
166
+ "loss": 0.7056,
167
+ "step": 550
168
+ },
169
+ {
170
+ "epoch": 11.0,
171
+ "eval_loss": 0.8716644644737244,
172
+ "eval_runtime": 1.7145,
173
+ "eval_samples_per_second": 258.973,
174
+ "eval_steps_per_second": 1.167,
175
+ "step": 583
176
+ },
177
+ {
178
+ "epoch": 11.320754716981131,
179
+ "grad_norm": 3.1076793670654297,
180
+ "learning_rate": 1.5686205446369293e-05,
181
+ "loss": 0.6927,
182
+ "step": 600
183
+ },
184
+ {
185
+ "epoch": 12.0,
186
+ "eval_loss": 0.8567394614219666,
187
+ "eval_runtime": 1.7089,
188
+ "eval_samples_per_second": 259.818,
189
+ "eval_steps_per_second": 1.17,
190
+ "step": 636
191
+ },
192
+ {
193
+ "epoch": 12.264150943396226,
194
+ "grad_norm": 3.407707929611206,
195
+ "learning_rate": 1.4750833929692785e-05,
196
+ "loss": 0.6798,
197
+ "step": 650
198
+ },
199
+ {
200
+ "epoch": 13.0,
201
+ "eval_loss": 0.8519091606140137,
202
+ "eval_runtime": 1.7527,
203
+ "eval_samples_per_second": 253.324,
204
+ "eval_steps_per_second": 1.141,
205
+ "step": 689
206
+ },
207
+ {
208
+ "epoch": 13.20754716981132,
209
+ "grad_norm": 2.503713607788086,
210
+ "learning_rate": 1.3758275821142382e-05,
211
+ "loss": 0.6689,
212
+ "step": 700
213
+ },
214
+ {
215
+ "epoch": 14.0,
216
+ "eval_loss": 0.8806237578392029,
217
+ "eval_runtime": 1.7483,
218
+ "eval_samples_per_second": 253.954,
219
+ "eval_steps_per_second": 1.144,
220
+ "step": 742
221
+ },
222
+ {
223
+ "epoch": 14.150943396226415,
224
+ "grad_norm": 2.356168270111084,
225
+ "learning_rate": 1.2720478710615944e-05,
226
+ "loss": 0.6643,
227
+ "step": 750
228
+ },
229
+ {
230
+ "epoch": 15.0,
231
+ "eval_loss": 0.8405108451843262,
232
+ "eval_runtime": 1.727,
233
+ "eval_samples_per_second": 257.097,
234
+ "eval_steps_per_second": 1.158,
235
+ "step": 795
236
+ },
237
+ {
238
+ "epoch": 15.09433962264151,
239
+ "grad_norm": 3.0982022285461426,
240
+ "learning_rate": 1.164993473753275e-05,
241
+ "loss": 0.6622,
242
+ "step": 800
243
+ },
244
+ {
245
+ "epoch": 16.0,
246
+ "eval_loss": 0.8529258966445923,
247
+ "eval_runtime": 1.86,
248
+ "eval_samples_per_second": 238.708,
249
+ "eval_steps_per_second": 1.075,
250
+ "step": 848
251
+ }
252
+ ],
253
+ "logging_steps": 50,
254
+ "max_steps": 1590,
255
+ "num_input_tokens_seen": 0,
256
+ "num_train_epochs": 30,
257
+ "save_steps": 500,
258
+ "stateful_callbacks": {
259
+ "EarlyStoppingCallback": {
260
+ "args": {
261
+ "early_stopping_patience": 2,
262
+ "early_stopping_threshold": 0.0
263
+ },
264
+ "attributes": {
265
+ "early_stopping_patience_counter": 1
266
+ }
267
+ },
268
+ "TrainerControl": {
269
+ "args": {
270
+ "should_epoch_stop": false,
271
+ "should_evaluate": false,
272
+ "should_log": false,
273
+ "should_save": true,
274
+ "should_training_stop": false
275
+ },
276
+ "attributes": {}
277
+ }
278
+ },
279
+ "total_flos": 0.0,
280
+ "train_batch_size": 80,
281
+ "trial_name": null,
282
+ "trial_params": null
283
+ }
checkpoint-848/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0d06e647210f13dbee2e3933b7061a89b32e1c16df743074b449e7c097a0ff51
3
+ size 6225
checkpoint-901/1_Pooling/config.json ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "word_embedding_dimension": 768,
3
+ "pooling_mode_cls_token": false,
4
+ "pooling_mode_mean_tokens": true,
5
+ "pooling_mode_max_tokens": false,
6
+ "pooling_mode_mean_sqrt_len_tokens": false,
7
+ "pooling_mode_weightedmean_tokens": false,
8
+ "pooling_mode_lasttoken": false,
9
+ "include_prompt": true
10
+ }
checkpoint-901/README.md ADDED
@@ -0,0 +1,431 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ tags:
3
+ - sentence-transformers
4
+ - sentence-similarity
5
+ - feature-extraction
6
+ - dense
7
+ - generated_from_trainer
8
+ - dataset_size:8434
9
+ - loss:MultipleNegativesRankingLoss
10
+ base_model: almanach/camembert-bio-base
11
+ widget:
12
+ - source_sentence: tumeur maligne
13
+ sentences:
14
+ - 8082/3 Carcinome lymphoépithélial
15
+ - 8000/3 Tumeur maligne, SAI
16
+ - 8000/3 Cancer
17
+ - source_sentence: hépatocarcinome oncocytaire fibrolamellaire (fh
18
+ sentences:
19
+ - 8171/3 Carcinome hépatocellulaire fibrolamellaire (C22.0)
20
+ - 8560/3 Adénocarcinome et carcinome à cellules épidermoïdes
21
+ - 8052/3 Carcinome épidermoïde papillaire
22
+ - source_sentence: implant tumoral
23
+ sentences:
24
+ - 8000/6 Néoplasme métastatique
25
+ - 8480/3 Carcinome muqueux
26
+ - 8140/3 Adénocarcinome, SAI
27
+ - source_sentence: Anémie réfractaire sidéroblastique
28
+ sentences:
29
+ - 9982/3 RARS
30
+ - 9591/3 LMNH, SAI
31
+ - 8041/3 Carcinome à cellules de réserve
32
+ - source_sentence: carcinome
33
+ sentences:
34
+ - 9800/3 Leucémie subaiguë, SAI [obs]
35
+ - 8000/6 Métastase, SAI
36
+ - 8075/3 Carcinome malpighien pseudoglandulaire
37
+ pipeline_tag: sentence-similarity
38
+ library_name: sentence-transformers
39
+ ---
40
+
41
+ # SentenceTransformer based on almanach/camembert-bio-base
42
+
43
+ This is a [sentence-transformers](https://www.SBERT.net) model finetuned from [almanach/camembert-bio-base](https://huggingface.co/almanach/camembert-bio-base). It maps sentences & paragraphs to a 768-dimensional dense vector space and can be used for semantic textual similarity, semantic search, paraphrase mining, text classification, clustering, and more.
44
+
45
+ ## Model Details
46
+
47
+ ### Model Description
48
+ - **Model Type:** Sentence Transformer
49
+ - **Base model:** [almanach/camembert-bio-base](https://huggingface.co/almanach/camembert-bio-base) <!-- at revision 5fbdc71cc8b8b59f9e40229ff1d5baa1808ffde6 -->
50
+ - **Maximum Sequence Length:** 512 tokens
51
+ - **Output Dimensionality:** 768 dimensions
52
+ - **Similarity Function:** Cosine Similarity
53
+ <!-- - **Training Dataset:** Unknown -->
54
+ <!-- - **Language:** Unknown -->
55
+ <!-- - **License:** Unknown -->
56
+
57
+ ### Model Sources
58
+
59
+ - **Documentation:** [Sentence Transformers Documentation](https://sbert.net)
60
+ - **Repository:** [Sentence Transformers on GitHub](https://github.com/huggingface/sentence-transformers)
61
+ - **Hugging Face:** [Sentence Transformers on Hugging Face](https://huggingface.co/models?library=sentence-transformers)
62
+
63
+ ### Full Model Architecture
64
+
65
+ ```
66
+ SentenceTransformer(
67
+ (0): Transformer({'max_seq_length': 512, 'do_lower_case': False, 'architecture': 'CamembertModel'})
68
+ (1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False, 'pooling_mode_weightedmean_tokens': False, 'pooling_mode_lasttoken': False, 'include_prompt': True})
69
+ )
70
+ ```
71
+
72
+ ## Usage
73
+
74
+ ### Direct Usage (Sentence Transformers)
75
+
76
+ First install the Sentence Transformers library:
77
+
78
+ ```bash
79
+ pip install -U sentence-transformers
80
+ ```
81
+
82
+ Then you can load this model and run inference.
83
+ ```python
84
+ from sentence_transformers import SentenceTransformer
85
+
86
+ # Download from the 🤗 Hub
87
+ model = SentenceTransformer("sentence_transformers_model_id")
88
+ # Run inference
89
+ sentences = [
90
+ 'carcinome',
91
+ '8000/6 Métastase, SAI',
92
+ '9800/3 Leucémie subaiguë, SAI [obs]',
93
+ ]
94
+ embeddings = model.encode(sentences)
95
+ print(embeddings.shape)
96
+ # [3, 768]
97
+
98
+ # Get the similarity scores for the embeddings
99
+ similarities = model.similarity(embeddings, embeddings)
100
+ print(similarities)
101
+ # tensor([[ 1.0000, 0.5949, -0.0352],
102
+ # [ 0.5949, 1.0000, 0.0605],
103
+ # [-0.0352, 0.0605, 1.0000]])
104
+ ```
105
+
106
+ <!--
107
+ ### Direct Usage (Transformers)
108
+
109
+ <details><summary>Click to see the direct usage in Transformers</summary>
110
+
111
+ </details>
112
+ -->
113
+
114
+ <!--
115
+ ### Downstream Usage (Sentence Transformers)
116
+
117
+ You can finetune this model on your own dataset.
118
+
119
+ <details><summary>Click to expand</summary>
120
+
121
+ </details>
122
+ -->
123
+
124
+ <!--
125
+ ### Out-of-Scope Use
126
+
127
+ *List how the model may foreseeably be misused and address what users ought not to do with the model.*
128
+ -->
129
+
130
+ <!--
131
+ ## Bias, Risks and Limitations
132
+
133
+ *What are the known or foreseeable issues stemming from this model? You could also flag here known failure cases or weaknesses of the model.*
134
+ -->
135
+
136
+ <!--
137
+ ### Recommendations
138
+
139
+ *What are recommendations with respect to the foreseeable issues? For example, filtering explicit content.*
140
+ -->
141
+
142
+ ## Training Details
143
+
144
+ ### Training Dataset
145
+
146
+ #### Unnamed Dataset
147
+
148
+ * Size: 8,434 training samples
149
+ * Columns: <code>sentence1</code> and <code>sentence2</code>
150
+ * Approximate statistics based on the first 1000 samples:
151
+ | | sentence1 | sentence2 |
152
+ |:--------|:----------------------------------------------------------------------------------|:----------------------------------------------------------------------------------|
153
+ | type | string | string |
154
+ | details | <ul><li>min: 3 tokens</li><li>mean: 11.31 tokens</li><li>max: 43 tokens</li></ul> | <ul><li>min: 6 tokens</li><li>mean: 15.69 tokens</li><li>max: 34 tokens</li></ul> |
155
+ * Samples:
156
+ | sentence1 | sentence2 |
157
+ |:------------------------------------------------|:--------------------------------------------------------------------|
158
+ | <code>tumeur à petites cellules rondes</code> | <code>8806/3 Tumeur desmoplastique à petites cellules rondes</code> |
159
+ | <code>dissémination oligométastatique</code> | <code>8000/6 Néoplasme métastatique</code> |
160
+ | <code>processus néoprolifératif primaire</code> | <code>8000/3 Tumeur maligne non classée</code> |
161
+ * Loss: [<code>MultipleNegativesRankingLoss</code>](https://sbert.net/docs/package_reference/sentence_transformer/losses.html#multiplenegativesrankingloss) with these parameters:
162
+ ```json
163
+ {
164
+ "scale": 20.0,
165
+ "similarity_fct": "cos_sim",
166
+ "gather_across_devices": false
167
+ }
168
+ ```
169
+
170
+ ### Evaluation Dataset
171
+
172
+ #### Unnamed Dataset
173
+
174
+ * Size: 444 evaluation samples
175
+ * Columns: <code>sentence1</code> and <code>sentence2</code>
176
+ * Approximate statistics based on the first 444 samples:
177
+ | | sentence1 | sentence2 |
178
+ |:--------|:----------------------------------------------------------------------------------|:----------------------------------------------------------------------------------|
179
+ | type | string | string |
180
+ | details | <ul><li>min: 3 tokens</li><li>mean: 11.68 tokens</li><li>max: 31 tokens</li></ul> | <ul><li>min: 6 tokens</li><li>mean: 15.76 tokens</li><li>max: 34 tokens</li></ul> |
181
+ * Samples:
182
+ | sentence1 | sentence2 |
183
+ |:---------------------------------------------|:----------------------------------------------|
184
+ | <code>neuroblastome</code> | <code>9500/3 Neuroblastome, SAI</code> |
185
+ | <code>Sarcome méningothélial</code> | <code>9530/3 Sarcome méningothélial</code> |
186
+ | <code>Carcinome excréto-urinaire, SAI</code> | <code>8120/3 Carcinome urothélial, SAI</code> |
187
+ * Loss: [<code>MultipleNegativesRankingLoss</code>](https://sbert.net/docs/package_reference/sentence_transformer/losses.html#multiplenegativesrankingloss) with these parameters:
188
+ ```json
189
+ {
190
+ "scale": 20.0,
191
+ "similarity_fct": "cos_sim",
192
+ "gather_across_devices": false
193
+ }
194
+ ```
195
+
196
+ ### Training Hyperparameters
197
+ #### Non-Default Hyperparameters
198
+
199
+ - `eval_strategy`: epoch
200
+ - `per_device_train_batch_size`: 16
201
+ - `per_device_eval_batch_size`: 64
202
+ - `gradient_accumulation_steps`: 2
203
+ - `learning_rate`: 2e-05
204
+ - `weight_decay`: 0.01
205
+ - `num_train_epochs`: 30
206
+ - `lr_scheduler_type`: cosine
207
+ - `warmup_ratio`: 0.1
208
+ - `fp16`: True
209
+ - `load_best_model_at_end`: True
210
+ - `batch_sampler`: no_duplicates
211
+
212
+ #### All Hyperparameters
213
+ <details><summary>Click to expand</summary>
214
+
215
+ - `overwrite_output_dir`: False
216
+ - `do_predict`: False
217
+ - `eval_strategy`: epoch
218
+ - `prediction_loss_only`: True
219
+ - `per_device_train_batch_size`: 16
220
+ - `per_device_eval_batch_size`: 64
221
+ - `per_gpu_train_batch_size`: None
222
+ - `per_gpu_eval_batch_size`: None
223
+ - `gradient_accumulation_steps`: 2
224
+ - `eval_accumulation_steps`: None
225
+ - `torch_empty_cache_steps`: None
226
+ - `learning_rate`: 2e-05
227
+ - `weight_decay`: 0.01
228
+ - `adam_beta1`: 0.9
229
+ - `adam_beta2`: 0.999
230
+ - `adam_epsilon`: 1e-08
231
+ - `max_grad_norm`: 1.0
232
+ - `num_train_epochs`: 30
233
+ - `max_steps`: -1
234
+ - `lr_scheduler_type`: cosine
235
+ - `lr_scheduler_kwargs`: {}
236
+ - `warmup_ratio`: 0.1
237
+ - `warmup_steps`: 0
238
+ - `log_level`: passive
239
+ - `log_level_replica`: warning
240
+ - `log_on_each_node`: True
241
+ - `logging_nan_inf_filter`: True
242
+ - `save_safetensors`: True
243
+ - `save_on_each_node`: False
244
+ - `save_only_model`: False
245
+ - `restore_callback_states_from_checkpoint`: False
246
+ - `no_cuda`: False
247
+ - `use_cpu`: False
248
+ - `use_mps_device`: False
249
+ - `seed`: 42
250
+ - `data_seed`: None
251
+ - `jit_mode_eval`: False
252
+ - `bf16`: False
253
+ - `fp16`: True
254
+ - `fp16_opt_level`: O1
255
+ - `half_precision_backend`: auto
256
+ - `bf16_full_eval`: False
257
+ - `fp16_full_eval`: False
258
+ - `tf32`: None
259
+ - `local_rank`: 0
260
+ - `ddp_backend`: None
261
+ - `tpu_num_cores`: None
262
+ - `tpu_metrics_debug`: False
263
+ - `debug`: []
264
+ - `dataloader_drop_last`: False
265
+ - `dataloader_num_workers`: 0
266
+ - `dataloader_prefetch_factor`: None
267
+ - `past_index`: -1
268
+ - `disable_tqdm`: False
269
+ - `remove_unused_columns`: True
270
+ - `label_names`: None
271
+ - `load_best_model_at_end`: True
272
+ - `ignore_data_skip`: False
273
+ - `fsdp`: []
274
+ - `fsdp_min_num_params`: 0
275
+ - `fsdp_config`: {'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False}
276
+ - `fsdp_transformer_layer_cls_to_wrap`: None
277
+ - `accelerator_config`: {'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None}
278
+ - `parallelism_config`: None
279
+ - `deepspeed`: None
280
+ - `label_smoothing_factor`: 0.0
281
+ - `optim`: adamw_torch_fused
282
+ - `optim_args`: None
283
+ - `adafactor`: False
284
+ - `group_by_length`: False
285
+ - `length_column_name`: length
286
+ - `project`: huggingface
287
+ - `trackio_space_id`: trackio
288
+ - `ddp_find_unused_parameters`: None
289
+ - `ddp_bucket_cap_mb`: None
290
+ - `ddp_broadcast_buffers`: False
291
+ - `dataloader_pin_memory`: True
292
+ - `dataloader_persistent_workers`: False
293
+ - `skip_memory_metrics`: True
294
+ - `use_legacy_prediction_loop`: False
295
+ - `push_to_hub`: False
296
+ - `resume_from_checkpoint`: None
297
+ - `hub_model_id`: None
298
+ - `hub_strategy`: every_save
299
+ - `hub_private_repo`: None
300
+ - `hub_always_push`: False
301
+ - `hub_revision`: None
302
+ - `gradient_checkpointing`: False
303
+ - `gradient_checkpointing_kwargs`: None
304
+ - `include_inputs_for_metrics`: False
305
+ - `include_for_metrics`: []
306
+ - `eval_do_concat_batches`: True
307
+ - `fp16_backend`: auto
308
+ - `push_to_hub_model_id`: None
309
+ - `push_to_hub_organization`: None
310
+ - `mp_parameters`:
311
+ - `auto_find_batch_size`: False
312
+ - `full_determinism`: False
313
+ - `torchdynamo`: None
314
+ - `ray_scope`: last
315
+ - `ddp_timeout`: 1800
316
+ - `torch_compile`: False
317
+ - `torch_compile_backend`: None
318
+ - `torch_compile_mode`: None
319
+ - `include_tokens_per_second`: False
320
+ - `include_num_input_tokens_seen`: no
321
+ - `neftune_noise_alpha`: None
322
+ - `optim_target_modules`: None
323
+ - `batch_eval_metrics`: False
324
+ - `eval_on_start`: False
325
+ - `use_liger_kernel`: False
326
+ - `liger_kernel_config`: None
327
+ - `eval_use_gather_object`: False
328
+ - `average_tokens_across_devices`: True
329
+ - `prompts`: None
330
+ - `batch_sampler`: no_duplicates
331
+ - `multi_dataset_batch_sampler`: proportional
332
+ - `router_mapping`: {}
333
+ - `learning_rate_mapping`: {}
334
+
335
+ </details>
336
+
337
+ ### Training Logs
338
+ | Epoch | Step | Training Loss | Validation Loss |
339
+ |:-------:|:----:|:-------------:|:---------------:|
340
+ | 0.9434 | 50 | 3.4933 | - |
341
+ | 1.0 | 53 | - | 2.1337 |
342
+ | 1.8868 | 100 | 2.2572 | - |
343
+ | 2.0 | 106 | - | 1.7004 |
344
+ | 2.8302 | 150 | 1.6176 | - |
345
+ | 3.0 | 159 | - | 1.4333 |
346
+ | 3.7736 | 200 | 1.2587 | - |
347
+ | 4.0 | 212 | - | 1.2576 |
348
+ | 4.7170 | 250 | 1.0359 | - |
349
+ | 5.0 | 265 | - | 1.1235 |
350
+ | 5.6604 | 300 | 0.9043 | - |
351
+ | 6.0 | 318 | - | 1.0555 |
352
+ | 6.6038 | 350 | 0.8331 | - |
353
+ | 7.0 | 371 | - | 0.9720 |
354
+ | 7.5472 | 400 | 0.7715 | - |
355
+ | 8.0 | 424 | - | 0.9189 |
356
+ | 8.4906 | 450 | 0.7566 | - |
357
+ | 9.0 | 477 | - | 0.8910 |
358
+ | 9.4340 | 500 | 0.7205 | - |
359
+ | 10.0 | 530 | - | 0.8724 |
360
+ | 10.3774 | 550 | 0.7056 | - |
361
+ | 11.0 | 583 | - | 0.8717 |
362
+ | 11.3208 | 600 | 0.6927 | - |
363
+ | 12.0 | 636 | - | 0.8567 |
364
+ | 12.2642 | 650 | 0.6798 | - |
365
+ | 13.0 | 689 | - | 0.8519 |
366
+ | 13.2075 | 700 | 0.6689 | - |
367
+ | 14.0 | 742 | - | 0.8806 |
368
+ | 14.1509 | 750 | 0.6643 | - |
369
+ | 15.0 | 795 | - | 0.8405 |
370
+ | 15.0943 | 800 | 0.6622 | - |
371
+ | 16.0 | 848 | - | 0.8529 |
372
+ | 16.0377 | 850 | 0.6546 | - |
373
+ | 16.9811 | 900 | 0.6358 | - |
374
+ | 17.0 | 901 | - | 0.8474 |
375
+
376
+
377
+ ### Framework Versions
378
+ - Python: 3.11.14
379
+ - Sentence Transformers: 5.1.2
380
+ - Transformers: 4.57.3
381
+ - PyTorch: 2.9.1+cu128
382
+ - Accelerate: 1.12.0
383
+ - Datasets: 4.4.1
384
+ - Tokenizers: 0.22.1
385
+
386
+ ## Citation
387
+
388
+ ### BibTeX
389
+
390
+ #### Sentence Transformers
391
+ ```bibtex
392
+ @inproceedings{reimers-2019-sentence-bert,
393
+ title = "Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks",
394
+ author = "Reimers, Nils and Gurevych, Iryna",
395
+ booktitle = "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing",
396
+ month = "11",
397
+ year = "2019",
398
+ publisher = "Association for Computational Linguistics",
399
+ url = "https://arxiv.org/abs/1908.10084",
400
+ }
401
+ ```
402
+
403
+ #### MultipleNegativesRankingLoss
404
+ ```bibtex
405
+ @misc{henderson2017efficient,
406
+ title={Efficient Natural Language Response Suggestion for Smart Reply},
407
+ author={Matthew Henderson and Rami Al-Rfou and Brian Strope and Yun-hsuan Sung and Laszlo Lukacs and Ruiqi Guo and Sanjiv Kumar and Balint Miklos and Ray Kurzweil},
408
+ year={2017},
409
+ eprint={1705.00652},
410
+ archivePrefix={arXiv},
411
+ primaryClass={cs.CL}
412
+ }
413
+ ```
414
+
415
+ <!--
416
+ ## Glossary
417
+
418
+ *Clearly define terms in order to be accessible across audiences.*
419
+ -->
420
+
421
+ <!--
422
+ ## Model Card Authors
423
+
424
+ *Lists the people who create the model card, providing recognition and accountability for the detailed work that goes into its construction.*
425
+ -->
426
+
427
+ <!--
428
+ ## Model Card Contact
429
+
430
+ *Provides a way for people who have updates to the Model Card, suggestions, or questions, to contact the Model Card authors.*
431
+ -->
checkpoint-901/config.json ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "CamembertModel"
4
+ ],
5
+ "attention_probs_dropout_prob": 0.1,
6
+ "bos_token_id": 5,
7
+ "classifier_dropout": null,
8
+ "dtype": "float32",
9
+ "eos_token_id": 6,
10
+ "hidden_act": "gelu",
11
+ "hidden_dropout_prob": 0.1,
12
+ "hidden_size": 768,
13
+ "initializer_range": 0.02,
14
+ "intermediate_size": 3072,
15
+ "layer_norm_eps": 1e-05,
16
+ "max_position_embeddings": 514,
17
+ "model_type": "camembert",
18
+ "num_attention_heads": 12,
19
+ "num_hidden_layers": 12,
20
+ "output_past": true,
21
+ "pad_token_id": 1,
22
+ "position_embedding_type": "absolute",
23
+ "transformers_version": "4.57.3",
24
+ "type_vocab_size": 1,
25
+ "use_cache": true,
26
+ "vocab_size": 32005
27
+ }
checkpoint-901/config_sentence_transformers.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model_type": "SentenceTransformer",
3
+ "__version__": {
4
+ "sentence_transformers": "5.1.2",
5
+ "transformers": "4.57.3",
6
+ "pytorch": "2.9.1+cu128"
7
+ },
8
+ "prompts": {
9
+ "query": "",
10
+ "document": ""
11
+ },
12
+ "default_prompt_name": null,
13
+ "similarity_fn_name": "cosine"
14
+ }
checkpoint-901/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d16a09c211175f59439e768d90d5dfb80b51124bb9571d8c3948d9a5b2c70e66
3
+ size 442510176
checkpoint-901/modules.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "idx": 0,
4
+ "name": "0",
5
+ "path": "",
6
+ "type": "sentence_transformers.models.Transformer"
7
+ },
8
+ {
9
+ "idx": 1,
10
+ "name": "1",
11
+ "path": "1_Pooling",
12
+ "type": "sentence_transformers.models.Pooling"
13
+ }
14
+ ]
checkpoint-901/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0145d7920274fa6722f0e757b5c54784b7cf604d3e7bf871c4355c497621a714
3
+ size 885145355
checkpoint-901/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:178424cc73e0090aef6cdcd30d15cd0995d0ec20ba72d908defc34620204e93d
3
+ size 14645
checkpoint-901/scaler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d01b7b0892b533347140f39660886ebfc517e7cce261bb35efdb01e06461a9d1
3
+ size 1383
checkpoint-901/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:10321e494353b1237e7db838d11bc883547d401648797f7ebbc6d6f56a3a0430
3
+ size 1465
checkpoint-901/sentence_bert_config.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "max_seq_length": 512,
3
+ "do_lower_case": false
4
+ }
checkpoint-901/special_tokens_map.json ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<s>NOTUSED",
4
+ "</s>NOTUSED",
5
+ "<unk>NOTUSED"
6
+ ],
7
+ "bos_token": {
8
+ "content": "<s>",
9
+ "lstrip": false,
10
+ "normalized": false,
11
+ "rstrip": false,
12
+ "single_word": false
13
+ },
14
+ "cls_token": {
15
+ "content": "<s>",
16
+ "lstrip": false,
17
+ "normalized": false,
18
+ "rstrip": false,
19
+ "single_word": false
20
+ },
21
+ "eos_token": {
22
+ "content": "</s>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false
27
+ },
28
+ "mask_token": {
29
+ "content": "<mask>",
30
+ "lstrip": true,
31
+ "normalized": false,
32
+ "rstrip": false,
33
+ "single_word": false
34
+ },
35
+ "pad_token": {
36
+ "content": "<pad>",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false
41
+ },
42
+ "sep_token": {
43
+ "content": "</s>",
44
+ "lstrip": false,
45
+ "normalized": false,
46
+ "rstrip": false,
47
+ "single_word": false
48
+ },
49
+ "unk_token": {
50
+ "content": "<unk>",
51
+ "lstrip": false,
52
+ "normalized": false,
53
+ "rstrip": false,
54
+ "single_word": false
55
+ }
56
+ }
checkpoint-901/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-901/tokenizer_config.json ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "<s>NOTUSED",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "1": {
12
+ "content": "<pad>",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "2": {
20
+ "content": "</s>NOTUSED",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "3": {
28
+ "content": "<unk>",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "4": {
36
+ "content": "<unk>NOTUSED",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ },
43
+ "5": {
44
+ "content": "<s>",
45
+ "lstrip": false,
46
+ "normalized": false,
47
+ "rstrip": false,
48
+ "single_word": false,
49
+ "special": true
50
+ },
51
+ "6": {
52
+ "content": "</s>",
53
+ "lstrip": false,
54
+ "normalized": false,
55
+ "rstrip": false,
56
+ "single_word": false,
57
+ "special": true
58
+ },
59
+ "32004": {
60
+ "content": "<mask>",
61
+ "lstrip": true,
62
+ "normalized": false,
63
+ "rstrip": false,
64
+ "single_word": false,
65
+ "special": true
66
+ }
67
+ },
68
+ "additional_special_tokens": [
69
+ "<s>NOTUSED",
70
+ "</s>NOTUSED",
71
+ "<unk>NOTUSED"
72
+ ],
73
+ "bos_token": "<s>",
74
+ "clean_up_tokenization_spaces": true,
75
+ "cls_token": "<s>",
76
+ "eos_token": "</s>",
77
+ "extra_special_tokens": {},
78
+ "mask_token": "<mask>",
79
+ "model_max_length": 512,
80
+ "pad_token": "<pad>",
81
+ "sep_token": "</s>",
82
+ "tokenizer_class": "CamembertTokenizer",
83
+ "unk_token": "<unk>"
84
+ }
checkpoint-901/trainer_state.json ADDED
@@ -0,0 +1,305 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_global_step": 795,
3
+ "best_metric": 0.8405108451843262,
4
+ "best_model_checkpoint": "../models/camembert-bio-morpho-bi-encoder/checkpoint-795",
5
+ "epoch": 17.0,
6
+ "eval_steps": 500,
7
+ "global_step": 901,
8
+ "is_hyper_param_search": false,
9
+ "is_local_process_zero": true,
10
+ "is_world_process_zero": true,
11
+ "log_history": [
12
+ {
13
+ "epoch": 0.9433962264150944,
14
+ "grad_norm": 7.664644241333008,
15
+ "learning_rate": 6.163522012578617e-06,
16
+ "loss": 3.4933,
17
+ "step": 50
18
+ },
19
+ {
20
+ "epoch": 1.0,
21
+ "eval_loss": 2.1337265968322754,
22
+ "eval_runtime": 3.1424,
23
+ "eval_samples_per_second": 141.294,
24
+ "eval_steps_per_second": 0.636,
25
+ "step": 53
26
+ },
27
+ {
28
+ "epoch": 1.8867924528301887,
29
+ "grad_norm": 7.603148937225342,
30
+ "learning_rate": 1.2452830188679246e-05,
31
+ "loss": 2.2572,
32
+ "step": 100
33
+ },
34
+ {
35
+ "epoch": 2.0,
36
+ "eval_loss": 1.7004107236862183,
37
+ "eval_runtime": 2.0559,
38
+ "eval_samples_per_second": 215.959,
39
+ "eval_steps_per_second": 0.973,
40
+ "step": 106
41
+ },
42
+ {
43
+ "epoch": 2.830188679245283,
44
+ "grad_norm": 12.284380912780762,
45
+ "learning_rate": 1.8742138364779877e-05,
46
+ "loss": 1.6176,
47
+ "step": 150
48
+ },
49
+ {
50
+ "epoch": 3.0,
51
+ "eval_loss": 1.4332726001739502,
52
+ "eval_runtime": 2.2469,
53
+ "eval_samples_per_second": 197.609,
54
+ "eval_steps_per_second": 0.89,
55
+ "step": 159
56
+ },
57
+ {
58
+ "epoch": 3.7735849056603774,
59
+ "grad_norm": 5.0590691566467285,
60
+ "learning_rate": 1.996146712998892e-05,
61
+ "loss": 1.2587,
62
+ "step": 200
63
+ },
64
+ {
65
+ "epoch": 4.0,
66
+ "eval_loss": 1.2575932741165161,
67
+ "eval_runtime": 1.7396,
68
+ "eval_samples_per_second": 255.237,
69
+ "eval_steps_per_second": 1.15,
70
+ "step": 212
71
+ },
72
+ {
73
+ "epoch": 4.716981132075472,
74
+ "grad_norm": 14.913520812988281,
75
+ "learning_rate": 1.9805436150436352e-05,
76
+ "loss": 1.0359,
77
+ "step": 250
78
+ },
79
+ {
80
+ "epoch": 5.0,
81
+ "eval_loss": 1.1234651803970337,
82
+ "eval_runtime": 1.8234,
83
+ "eval_samples_per_second": 243.507,
84
+ "eval_steps_per_second": 1.097,
85
+ "step": 265
86
+ },
87
+ {
88
+ "epoch": 5.660377358490566,
89
+ "grad_norm": 4.878383636474609,
90
+ "learning_rate": 1.9531375476817667e-05,
91
+ "loss": 0.9043,
92
+ "step": 300
93
+ },
94
+ {
95
+ "epoch": 6.0,
96
+ "eval_loss": 1.055496096611023,
97
+ "eval_runtime": 1.7553,
98
+ "eval_samples_per_second": 252.945,
99
+ "eval_steps_per_second": 1.139,
100
+ "step": 318
101
+ },
102
+ {
103
+ "epoch": 6.60377358490566,
104
+ "grad_norm": 10.084212303161621,
105
+ "learning_rate": 1.9142584023833506e-05,
106
+ "loss": 0.8331,
107
+ "step": 350
108
+ },
109
+ {
110
+ "epoch": 7.0,
111
+ "eval_loss": 0.9720313549041748,
112
+ "eval_runtime": 1.979,
113
+ "eval_samples_per_second": 224.355,
114
+ "eval_steps_per_second": 1.011,
115
+ "step": 371
116
+ },
117
+ {
118
+ "epoch": 7.547169811320755,
119
+ "grad_norm": 3.5150604248046875,
120
+ "learning_rate": 1.8643741739988672e-05,
121
+ "loss": 0.7715,
122
+ "step": 400
123
+ },
124
+ {
125
+ "epoch": 8.0,
126
+ "eval_loss": 0.9189229607582092,
127
+ "eval_runtime": 1.7408,
128
+ "eval_samples_per_second": 255.054,
129
+ "eval_steps_per_second": 1.149,
130
+ "step": 424
131
+ },
132
+ {
133
+ "epoch": 8.49056603773585,
134
+ "grad_norm": 3.50665020942688,
135
+ "learning_rate": 1.8040853274260137e-05,
136
+ "loss": 0.7566,
137
+ "step": 450
138
+ },
139
+ {
140
+ "epoch": 9.0,
141
+ "eval_loss": 0.8909644484519958,
142
+ "eval_runtime": 1.9496,
143
+ "eval_samples_per_second": 227.734,
144
+ "eval_steps_per_second": 1.026,
145
+ "step": 477
146
+ },
147
+ {
148
+ "epoch": 9.433962264150944,
149
+ "grad_norm": 3.0296542644500732,
150
+ "learning_rate": 1.7341175697121273e-05,
151
+ "loss": 0.7205,
152
+ "step": 500
153
+ },
154
+ {
155
+ "epoch": 10.0,
156
+ "eval_loss": 0.8723557591438293,
157
+ "eval_runtime": 1.7422,
158
+ "eval_samples_per_second": 254.844,
159
+ "eval_steps_per_second": 1.148,
160
+ "step": 530
161
+ },
162
+ {
163
+ "epoch": 10.377358490566039,
164
+ "grad_norm": 2.8000473976135254,
165
+ "learning_rate": 1.655313114595666e-05,
166
+ "loss": 0.7056,
167
+ "step": 550
168
+ },
169
+ {
170
+ "epoch": 11.0,
171
+ "eval_loss": 0.8716644644737244,
172
+ "eval_runtime": 1.7145,
173
+ "eval_samples_per_second": 258.973,
174
+ "eval_steps_per_second": 1.167,
175
+ "step": 583
176
+ },
177
+ {
178
+ "epoch": 11.320754716981131,
179
+ "grad_norm": 3.1076793670654297,
180
+ "learning_rate": 1.5686205446369293e-05,
181
+ "loss": 0.6927,
182
+ "step": 600
183
+ },
184
+ {
185
+ "epoch": 12.0,
186
+ "eval_loss": 0.8567394614219666,
187
+ "eval_runtime": 1.7089,
188
+ "eval_samples_per_second": 259.818,
189
+ "eval_steps_per_second": 1.17,
190
+ "step": 636
191
+ },
192
+ {
193
+ "epoch": 12.264150943396226,
194
+ "grad_norm": 3.407707929611206,
195
+ "learning_rate": 1.4750833929692785e-05,
196
+ "loss": 0.6798,
197
+ "step": 650
198
+ },
199
+ {
200
+ "epoch": 13.0,
201
+ "eval_loss": 0.8519091606140137,
202
+ "eval_runtime": 1.7527,
203
+ "eval_samples_per_second": 253.324,
204
+ "eval_steps_per_second": 1.141,
205
+ "step": 689
206
+ },
207
+ {
208
+ "epoch": 13.20754716981132,
209
+ "grad_norm": 2.503713607788086,
210
+ "learning_rate": 1.3758275821142382e-05,
211
+ "loss": 0.6689,
212
+ "step": 700
213
+ },
214
+ {
215
+ "epoch": 14.0,
216
+ "eval_loss": 0.8806237578392029,
217
+ "eval_runtime": 1.7483,
218
+ "eval_samples_per_second": 253.954,
219
+ "eval_steps_per_second": 1.144,
220
+ "step": 742
221
+ },
222
+ {
223
+ "epoch": 14.150943396226415,
224
+ "grad_norm": 2.356168270111084,
225
+ "learning_rate": 1.2720478710615944e-05,
226
+ "loss": 0.6643,
227
+ "step": 750
228
+ },
229
+ {
230
+ "epoch": 15.0,
231
+ "eval_loss": 0.8405108451843262,
232
+ "eval_runtime": 1.727,
233
+ "eval_samples_per_second": 257.097,
234
+ "eval_steps_per_second": 1.158,
235
+ "step": 795
236
+ },
237
+ {
238
+ "epoch": 15.09433962264151,
239
+ "grad_norm": 3.0982022285461426,
240
+ "learning_rate": 1.164993473753275e-05,
241
+ "loss": 0.6622,
242
+ "step": 800
243
+ },
244
+ {
245
+ "epoch": 16.0,
246
+ "eval_loss": 0.8529258966445923,
247
+ "eval_runtime": 1.86,
248
+ "eval_samples_per_second": 238.708,
249
+ "eval_steps_per_second": 1.075,
250
+ "step": 848
251
+ },
252
+ {
253
+ "epoch": 16.037735849056602,
254
+ "grad_norm": 2.3962793350219727,
255
+ "learning_rate": 1.0559530220837593e-05,
256
+ "loss": 0.6546,
257
+ "step": 850
258
+ },
259
+ {
260
+ "epoch": 16.9811320754717,
261
+ "grad_norm": 2.350217580795288,
262
+ "learning_rate": 9.462390544199221e-06,
263
+ "loss": 0.6358,
264
+ "step": 900
265
+ },
266
+ {
267
+ "epoch": 17.0,
268
+ "eval_loss": 0.8473597168922424,
269
+ "eval_runtime": 1.7403,
270
+ "eval_samples_per_second": 255.135,
271
+ "eval_steps_per_second": 1.149,
272
+ "step": 901
273
+ }
274
+ ],
275
+ "logging_steps": 50,
276
+ "max_steps": 1590,
277
+ "num_input_tokens_seen": 0,
278
+ "num_train_epochs": 30,
279
+ "save_steps": 500,
280
+ "stateful_callbacks": {
281
+ "EarlyStoppingCallback": {
282
+ "args": {
283
+ "early_stopping_patience": 2,
284
+ "early_stopping_threshold": 0.0
285
+ },
286
+ "attributes": {
287
+ "early_stopping_patience_counter": 2
288
+ }
289
+ },
290
+ "TrainerControl": {
291
+ "args": {
292
+ "should_epoch_stop": false,
293
+ "should_evaluate": false,
294
+ "should_log": false,
295
+ "should_save": true,
296
+ "should_training_stop": true
297
+ },
298
+ "attributes": {}
299
+ }
300
+ },
301
+ "total_flos": 0.0,
302
+ "train_batch_size": 80,
303
+ "trial_name": null,
304
+ "trial_params": null
305
+ }
checkpoint-901/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0d06e647210f13dbee2e3933b7061a89b32e1c16df743074b449e7c097a0ff51
3
+ size 6225