radoslavralev commited on
Commit
130ec6a
·
verified ·
1 Parent(s): 04f4ceb

Add new SentenceTransformer model

Browse files
1_Pooling/config.json CHANGED
@@ -1,7 +1,7 @@
1
  {
2
- "word_embedding_dimension": 512,
3
- "pooling_mode_cls_token": true,
4
- "pooling_mode_mean_tokens": false,
5
  "pooling_mode_max_tokens": false,
6
  "pooling_mode_mean_sqrt_len_tokens": false,
7
  "pooling_mode_weightedmean_tokens": false,
 
1
  {
2
+ "word_embedding_dimension": 768,
3
+ "pooling_mode_cls_token": false,
4
+ "pooling_mode_mean_tokens": true,
5
  "pooling_mode_max_tokens": false,
6
  "pooling_mode_mean_sqrt_len_tokens": false,
7
  "pooling_mode_weightedmean_tokens": false,
2_Dense/config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "in_features": 768,
3
+ "out_features": 3072,
4
+ "bias": false,
5
+ "activation_function": "torch.nn.modules.linear.Identity"
6
+ }
2_Dense/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3d2aa8f88c6a0e99a33e1795c480e6cedd06cfca13afd579a3eba0c79371db12
3
+ size 9437272
3_Dense/config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "in_features": 3072,
3
+ "out_features": 768,
4
+ "bias": false,
5
+ "activation_function": "torch.nn.modules.linear.Identity"
6
+ }
3_Dense/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:db522a8aa84c6b3d2bb1fb8bd7d8828e0f58bf897fcebcd331065e233a3b545c
3
+ size 9437272
README.md CHANGED
@@ -5,51 +5,123 @@ tags:
5
  - feature-extraction
6
  - dense
7
  - generated_from_trainer
8
- - dataset_size:100000
9
  - loss:MultipleNegativesRankingLoss
10
- base_model: prajjwal1/bert-small
11
  widget:
12
- - source_sentence: How do I calculate IQ?
13
  sentences:
14
- - What is the easiest way to know my IQ?
15
- - How do I calculate not IQ ?
16
- - What are some creative and innovative business ideas with less investment in India?
17
- - source_sentence: How can I learn martial arts in my home?
 
18
  sentences:
19
- - How can I learn martial arts by myself?
20
- - What are the advantages and disadvantages of investing in gold?
21
- - Can people see that I have looked at their pictures on instagram if I am not following
22
- them?
23
- - source_sentence: When Enterprise picks you up do you have to take them back?
24
  sentences:
25
- - Are there any software Training institute in Tuticorin?
26
- - When Enterprise picks you up do you have to take them back?
27
- - When Enterprise picks you up do them have to take youback?
28
- - source_sentence: What are some non-capital goods?
 
 
 
29
  sentences:
30
- - What are capital goods?
31
- - How is the value of [math]\pi[/math] calculated?
32
- - What are some non-capital goods?
33
- - source_sentence: What is the QuickBooks technical support phone number in New York?
34
  sentences:
35
- - What caused the Great Depression?
36
- - Can I apply for PR in Canada?
37
- - Which is the best QuickBooks Hosting Support Number in New York?
 
38
  pipeline_tag: sentence-similarity
39
  library_name: sentence-transformers
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
40
  ---
41
 
42
- # SentenceTransformer based on prajjwal1/bert-small
43
 
44
- This is a [sentence-transformers](https://www.SBERT.net) model finetuned from [prajjwal1/bert-small](https://huggingface.co/prajjwal1/bert-small). It maps sentences & paragraphs to a 512-dimensional dense vector space and can be used for semantic textual similarity, semantic search, paraphrase mining, text classification, clustering, and more.
45
 
46
  ## Model Details
47
 
48
  ### Model Description
49
  - **Model Type:** Sentence Transformer
50
- - **Base model:** [prajjwal1/bert-small](https://huggingface.co/prajjwal1/bert-small) <!-- at revision 0ec5f86f27c1a77d704439db5e01c307ea11b9d4 -->
51
  - **Maximum Sequence Length:** 128 tokens
52
- - **Output Dimensionality:** 512 dimensions
53
  - **Similarity Function:** Cosine Similarity
54
  <!-- - **Training Dataset:** Unknown -->
55
  <!-- - **Language:** Unknown -->
@@ -65,8 +137,11 @@ This is a [sentence-transformers](https://www.SBERT.net) model finetuned from [p
65
 
66
  ```
67
  SentenceTransformer(
68
- (0): Transformer({'max_seq_length': 128, 'do_lower_case': False, 'architecture': 'BertModel'})
69
- (1): Pooling({'word_embedding_dimension': 512, 'pooling_mode_cls_token': True, 'pooling_mode_mean_tokens': False, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False, 'pooling_mode_weightedmean_tokens': False, 'pooling_mode_lasttoken': False, 'include_prompt': True})
 
 
 
70
  )
71
  ```
72
 
@@ -85,23 +160,25 @@ Then you can load this model and run inference.
85
  from sentence_transformers import SentenceTransformer
86
 
87
  # Download from the 🤗 Hub
88
- model = SentenceTransformer("sentence_transformers_model_id")
89
  # Run inference
90
- sentences = [
91
- 'What is the QuickBooks technical support phone number in New York?',
92
- 'Which is the best QuickBooks Hosting Support Number in New York?',
93
- 'Can I apply for PR in Canada?',
94
  ]
95
- embeddings = model.encode(sentences)
96
- print(embeddings.shape)
97
- # [3, 512]
 
 
 
 
 
 
98
 
99
  # Get the similarity scores for the embeddings
100
- similarities = model.similarity(embeddings, embeddings)
101
  print(similarities)
102
- # tensor([[1.0000, 0.8563, 0.0594],
103
- # [0.8563, 1.0000, 0.1245],
104
- # [0.0594, 0.1245, 1.0000]])
105
  ```
106
 
107
  <!--
@@ -128,6 +205,32 @@ You can finetune this model on your own dataset.
128
  *List how the model may foreseeably be misused and address what users ought not to do with the model.*
129
  -->
130
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
131
  <!--
132
  ## Bias, Risks and Limitations
133
 
@@ -146,23 +249,49 @@ You can finetune this model on your own dataset.
146
 
147
  #### Unnamed Dataset
148
 
149
- * Size: 100,000 training samples
150
- * Columns: <code>sentence_0</code>, <code>sentence_1</code>, and <code>sentence_2</code>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
151
  * Approximate statistics based on the first 1000 samples:
152
- | | sentence_0 | sentence_1 | sentence_2 |
153
  |:--------|:----------------------------------------------------------------------------------|:----------------------------------------------------------------------------------|:----------------------------------------------------------------------------------|
154
  | type | string | string | string |
155
- | details | <ul><li>min: 6 tokens</li><li>mean: 15.79 tokens</li><li>max: 66 tokens</li></ul> | <ul><li>min: 6 tokens</li><li>mean: 15.68 tokens</li><li>max: 66 tokens</li></ul> | <ul><li>min: 7 tokens</li><li>mean: 16.37 tokens</li><li>max: 67 tokens</li></ul> |
156
  * Samples:
157
- | sentence_0 | sentence_1 | sentence_2 |
158
- |:-----------------------------------------------------------------|:-----------------------------------------------------------------|:----------------------------------------------------------------------------------|
159
- | <code>Is masturbating bad for boys?</code> | <code>Is masturbating bad for boys?</code> | <code>How harmful or unhealthy is masturbation?</code> |
160
- | <code>Does a train engine move in reverse?</code> | <code>Does a train engine move in reverse?</code> | <code>Time moves forward, not in reverse. Doesn't that make time a vector?</code> |
161
- | <code>What is the most badass thing anyone has ever done?</code> | <code>What is the most badass thing anyone has ever done?</code> | <code>anyone is the most badass thing Whathas ever done?</code> |
162
  * Loss: [<code>MultipleNegativesRankingLoss</code>](https://sbert.net/docs/package_reference/sentence_transformer/losses.html#multiplenegativesrankingloss) with these parameters:
163
  ```json
164
  {
165
- "scale": 20.0,
166
  "similarity_fct": "cos_sim",
167
  "gather_across_devices": false
168
  }
@@ -171,17 +300,30 @@ You can finetune this model on your own dataset.
171
  ### Training Hyperparameters
172
  #### Non-Default Hyperparameters
173
 
 
174
  - `per_device_train_batch_size`: 64
175
  - `per_device_eval_batch_size`: 64
 
 
 
 
176
  - `fp16`: True
177
- - `multi_dataset_batch_sampler`: round_robin
 
 
 
 
 
 
 
 
178
 
179
  #### All Hyperparameters
180
  <details><summary>Click to expand</summary>
181
 
182
  - `overwrite_output_dir`: False
183
  - `do_predict`: False
184
- - `eval_strategy`: no
185
  - `prediction_loss_only`: True
186
  - `per_device_train_batch_size`: 64
187
  - `per_device_eval_batch_size`: 64
@@ -190,17 +332,17 @@ You can finetune this model on your own dataset.
190
  - `gradient_accumulation_steps`: 1
191
  - `eval_accumulation_steps`: None
192
  - `torch_empty_cache_steps`: None
193
- - `learning_rate`: 5e-05
194
- - `weight_decay`: 0.0
195
  - `adam_beta1`: 0.9
196
  - `adam_beta2`: 0.999
197
  - `adam_epsilon`: 1e-08
198
- - `max_grad_norm`: 1
199
- - `num_train_epochs`: 3
200
- - `max_steps`: -1
201
  - `lr_scheduler_type`: linear
202
  - `lr_scheduler_kwargs`: {}
203
- - `warmup_ratio`: 0.0
204
  - `warmup_steps`: 0
205
  - `log_level`: passive
206
  - `log_level_replica`: warning
@@ -228,14 +370,14 @@ You can finetune this model on your own dataset.
228
  - `tpu_num_cores`: None
229
  - `tpu_metrics_debug`: False
230
  - `debug`: []
231
- - `dataloader_drop_last`: False
232
- - `dataloader_num_workers`: 0
233
- - `dataloader_prefetch_factor`: None
234
  - `past_index`: -1
235
  - `disable_tqdm`: False
236
  - `remove_unused_columns`: True
237
  - `label_names`: None
238
- - `load_best_model_at_end`: False
239
  - `ignore_data_skip`: False
240
  - `fsdp`: []
241
  - `fsdp_min_num_params`: 0
@@ -245,23 +387,23 @@ You can finetune this model on your own dataset.
245
  - `parallelism_config`: None
246
  - `deepspeed`: None
247
  - `label_smoothing_factor`: 0.0
248
- - `optim`: adamw_torch_fused
249
  - `optim_args`: None
250
  - `adafactor`: False
251
  - `group_by_length`: False
252
  - `length_column_name`: length
253
  - `project`: huggingface
254
  - `trackio_space_id`: trackio
255
- - `ddp_find_unused_parameters`: None
256
  - `ddp_bucket_cap_mb`: None
257
  - `ddp_broadcast_buffers`: False
258
  - `dataloader_pin_memory`: True
259
  - `dataloader_persistent_workers`: False
260
  - `skip_memory_metrics`: True
261
  - `use_legacy_prediction_loop`: False
262
- - `push_to_hub`: False
263
  - `resume_from_checkpoint`: None
264
- - `hub_model_id`: None
265
  - `hub_strategy`: every_save
266
  - `hub_private_repo`: None
267
  - `hub_always_push`: False
@@ -288,31 +430,43 @@ You can finetune this model on your own dataset.
288
  - `neftune_noise_alpha`: None
289
  - `optim_target_modules`: None
290
  - `batch_eval_metrics`: False
291
- - `eval_on_start`: False
292
  - `use_liger_kernel`: False
293
  - `liger_kernel_config`: None
294
  - `eval_use_gather_object`: False
295
  - `average_tokens_across_devices`: True
296
  - `prompts`: None
297
  - `batch_sampler`: batch_sampler
298
- - `multi_dataset_batch_sampler`: round_robin
299
  - `router_mapping`: {}
300
  - `learning_rate_mapping`: {}
301
 
302
  </details>
303
 
304
  ### Training Logs
305
- | Epoch | Step | Training Loss |
306
- |:------:|:----:|:-------------:|
307
- | 0.3199 | 500 | 0.4294 |
308
- | 0.6398 | 1000 | 0.1268 |
309
- | 0.9597 | 1500 | 0.1 |
310
- | 1.2796 | 2000 | 0.0792 |
311
- | 1.5995 | 2500 | 0.0706 |
312
- | 1.9194 | 3000 | 0.0687 |
313
- | 2.2393 | 3500 | 0.0584 |
314
- | 2.5592 | 4000 | 0.057 |
315
- | 2.8791 | 4500 | 0.0581 |
 
 
 
 
 
 
 
 
 
 
 
 
316
 
317
 
318
  ### Framework Versions
 
5
  - feature-extraction
6
  - dense
7
  - generated_from_trainer
8
+ - dataset_size:713743
9
  - loss:MultipleNegativesRankingLoss
10
+ base_model: google/embeddinggemma-300m
11
  widget:
12
+ - source_sentence: 'Abraham Lincoln: Why is the Gettysburg Address so memorable?'
13
  sentences:
14
+ - 'Abraham Lincoln: Why is the Gettysburg Address so memorable?'
15
+ - What does the Gettysburg Address really mean?
16
+ - What is eatalo.com?
17
+ - source_sentence: Has the influence of Ancient Carthage in science, math, and society
18
+ been underestimated?
19
  sentences:
20
+ - How does one earn money online without an investment from home?
21
+ - Has the influence of Ancient Carthage in science, math, and society been underestimated?
22
+ - Has the influence of the Ancient Etruscans in science and math been underestimated?
23
+ - source_sentence: Is there any app that shares charging to others like share it how
24
+ we transfer files?
25
  sentences:
26
+ - How do you think of Chinese claims that the present Private Arbitration is illegal,
27
+ its verdict violates the UNCLOS and is illegal?
28
+ - Is there any app that shares charging to others like share it how we transfer
29
+ files?
30
+ - Are there any platforms that provides end-to-end encryption for file transfer/
31
+ sharing?
32
+ - source_sentence: Why AAP’s MLA Dinesh Mohaniya has been arrested?
33
  sentences:
34
+ - What are your views on the latest sex scandal by AAP MLA Sandeep Kumar?
35
+ - What is a dc current? What are some examples?
36
+ - Why AAP’s MLA Dinesh Mohaniya has been arrested?
37
+ - source_sentence: What is the difference between economic growth and economic development?
38
  sentences:
39
+ - How cold can the Gobi Desert get, and how do its average temperatures compare
40
+ to the ones in the Simpson Desert?
41
+ - the difference between economic growth and economic development is What?
42
+ - What is the difference between economic growth and economic development?
43
  pipeline_tag: sentence-similarity
44
  library_name: sentence-transformers
45
+ metrics:
46
+ - cosine_accuracy@1
47
+ - cosine_accuracy@3
48
+ - cosine_accuracy@5
49
+ - cosine_precision@1
50
+ - cosine_precision@3
51
+ - cosine_precision@5
52
+ - cosine_recall@1
53
+ - cosine_recall@3
54
+ - cosine_recall@5
55
+ - cosine_ndcg@10
56
+ - cosine_mrr@1
57
+ - cosine_mrr@5
58
+ - cosine_mrr@10
59
+ - cosine_map@100
60
+ model-index:
61
+ - name: SentenceTransformer based on google/embeddinggemma-300m
62
+ results:
63
+ - task:
64
+ type: information-retrieval
65
+ name: Information Retrieval
66
+ dataset:
67
+ name: val
68
+ type: val
69
+ metrics:
70
+ - type: cosine_accuracy@1
71
+ value: 0.0
72
+ name: Cosine Accuracy@1
73
+ - type: cosine_accuracy@3
74
+ value: 0.0
75
+ name: Cosine Accuracy@3
76
+ - type: cosine_accuracy@5
77
+ value: 2.5e-05
78
+ name: Cosine Accuracy@5
79
+ - type: cosine_precision@1
80
+ value: 0.0
81
+ name: Cosine Precision@1
82
+ - type: cosine_precision@3
83
+ value: 0.0
84
+ name: Cosine Precision@3
85
+ - type: cosine_precision@5
86
+ value: 5.0e-06
87
+ name: Cosine Precision@5
88
+ - type: cosine_recall@1
89
+ value: 0.0
90
+ name: Cosine Recall@1
91
+ - type: cosine_recall@3
92
+ value: 0.0
93
+ name: Cosine Recall@3
94
+ - type: cosine_recall@5
95
+ value: 2.5e-05
96
+ name: Cosine Recall@5
97
+ - type: cosine_ndcg@10
98
+ value: 4.0643645983386815e-05
99
+ name: Cosine Ndcg@10
100
+ - type: cosine_mrr@1
101
+ value: 0.0
102
+ name: Cosine Mrr@1
103
+ - type: cosine_mrr@5
104
+ value: 5.0e-06
105
+ name: Cosine Mrr@5
106
+ - type: cosine_mrr@10
107
+ value: 1.697420634920635e-05
108
+ name: Cosine Mrr@10
109
+ - type: cosine_map@100
110
+ value: 5.219463554638405e-05
111
+ name: Cosine Map@100
112
  ---
113
 
114
+ # SentenceTransformer based on google/embeddinggemma-300m
115
 
116
+ This is a [sentence-transformers](https://www.SBERT.net) model finetuned from [google/embeddinggemma-300m](https://huggingface.co/google/embeddinggemma-300m). It maps sentences & paragraphs to a 768-dimensional dense vector space and can be used for semantic textual similarity, semantic search, paraphrase mining, text classification, clustering, and more.
117
 
118
  ## Model Details
119
 
120
  ### Model Description
121
  - **Model Type:** Sentence Transformer
122
+ - **Base model:** [google/embeddinggemma-300m](https://huggingface.co/google/embeddinggemma-300m) <!-- at revision 57c266a740f537b4dc058e1b0cda161fd15afa75 -->
123
  - **Maximum Sequence Length:** 128 tokens
124
+ - **Output Dimensionality:** 768 dimensions
125
  - **Similarity Function:** Cosine Similarity
126
  <!-- - **Training Dataset:** Unknown -->
127
  <!-- - **Language:** Unknown -->
 
137
 
138
  ```
139
  SentenceTransformer(
140
+ (0): Transformer({'max_seq_length': 128, 'do_lower_case': False, 'architecture': 'Gemma3TextModel'})
141
+ (1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False, 'pooling_mode_weightedmean_tokens': False, 'pooling_mode_lasttoken': False, 'include_prompt': True})
142
+ (2): Dense({'in_features': 768, 'out_features': 3072, 'bias': False, 'activation_function': 'torch.nn.modules.linear.Identity'})
143
+ (3): Dense({'in_features': 3072, 'out_features': 768, 'bias': False, 'activation_function': 'torch.nn.modules.linear.Identity'})
144
+ (4): Normalize()
145
  )
146
  ```
147
 
 
160
  from sentence_transformers import SentenceTransformer
161
 
162
  # Download from the 🤗 Hub
163
+ model = SentenceTransformer("redis/model-b-structured")
164
  # Run inference
165
+ queries = [
166
+ "What is the difference between economic growth and economic development?",
 
 
167
  ]
168
+ documents = [
169
+ 'What is the difference between economic growth and economic development?',
170
+ 'the difference between economic growth and economic development is What?',
171
+ 'How cold can the Gobi Desert get, and how do its average temperatures compare to the ones in the Simpson Desert?',
172
+ ]
173
+ query_embeddings = model.encode_query(queries)
174
+ document_embeddings = model.encode_document(documents)
175
+ print(query_embeddings.shape, document_embeddings.shape)
176
+ # [1, 768] [3, 768]
177
 
178
  # Get the similarity scores for the embeddings
179
+ similarities = model.similarity(query_embeddings, document_embeddings)
180
  print(similarities)
181
+ # tensor([[nan, nan, nan]])
 
 
182
  ```
183
 
184
  <!--
 
205
  *List how the model may foreseeably be misused and address what users ought not to do with the model.*
206
  -->
207
 
208
+ ## Evaluation
209
+
210
+ ### Metrics
211
+
212
+ #### Information Retrieval
213
+
214
+ * Dataset: `val`
215
+ * Evaluated with [<code>InformationRetrievalEvaluator</code>](https://sbert.net/docs/package_reference/sentence_transformer/evaluation.html#sentence_transformers.evaluation.InformationRetrievalEvaluator)
216
+
217
+ | Metric | Value |
218
+ |:-------------------|:--------|
219
+ | cosine_accuracy@1 | 0.0 |
220
+ | cosine_accuracy@3 | 0.0 |
221
+ | cosine_accuracy@5 | 0.0 |
222
+ | cosine_precision@1 | 0.0 |
223
+ | cosine_precision@3 | 0.0 |
224
+ | cosine_precision@5 | 0.0 |
225
+ | cosine_recall@1 | 0.0 |
226
+ | cosine_recall@3 | 0.0 |
227
+ | cosine_recall@5 | 0.0 |
228
+ | **cosine_ndcg@10** | **0.0** |
229
+ | cosine_mrr@1 | 0.0 |
230
+ | cosine_mrr@5 | 0.0 |
231
+ | cosine_mrr@10 | 0.0 |
232
+ | cosine_map@100 | 0.0001 |
233
+
234
  <!--
235
  ## Bias, Risks and Limitations
236
 
 
249
 
250
  #### Unnamed Dataset
251
 
252
+ * Size: 713,743 training samples
253
+ * Columns: <code>anchor</code>, <code>positive</code>, and <code>negative</code>
254
+ * Approximate statistics based on the first 1000 samples:
255
+ | | anchor | positive | negative |
256
+ |:--------|:---------------------------------------------------------------------------------|:----------------------------------------------------------------------------------|:----------------------------------------------------------------------------------|
257
+ | type | string | string | string |
258
+ | details | <ul><li>min: 6 tokens</li><li>mean: 15.9 tokens</li><li>max: 63 tokens</li></ul> | <ul><li>min: 6 tokens</li><li>mean: 15.86 tokens</li><li>max: 63 tokens</li></ul> | <ul><li>min: 6 tokens</li><li>mean: 16.63 tokens</li><li>max: 61 tokens</li></ul> |
259
+ * Samples:
260
+ | anchor | positive | negative |
261
+ |:-------------------------------------------------------------------------------|:--------------------------------------------------------------------------------------|:------------------------------------------------------------------------------------------------------------------|
262
+ | <code>Which one is better Linux OS? Ubuntu or Mint?</code> | <code>Why do you use Linux Mint?</code> | <code>Which one is not better Linux OS ? Ubuntu or Mint ?</code> |
263
+ | <code>What is flow?</code> | <code>What is flow?</code> | <code>What are flow lines?</code> |
264
+ | <code>How is Trump planning to get Mexico to pay for his supposed wall?</code> | <code>How is it possible for Donald Trump to force Mexico to pay for the wall?</code> | <code>Why do we connect the positive terminal before the negative terminal to ground in a vehicle battery?</code> |
265
+ * Loss: [<code>MultipleNegativesRankingLoss</code>](https://sbert.net/docs/package_reference/sentence_transformer/losses.html#multiplenegativesrankingloss) with these parameters:
266
+ ```json
267
+ {
268
+ "scale": 7.0,
269
+ "similarity_fct": "cos_sim",
270
+ "gather_across_devices": false
271
+ }
272
+ ```
273
+
274
+ ### Evaluation Dataset
275
+
276
+ #### Unnamed Dataset
277
+
278
+ * Size: 40,000 evaluation samples
279
+ * Columns: <code>anchor</code>, <code>positive</code>, and <code>negative</code>
280
  * Approximate statistics based on the first 1000 samples:
281
+ | | anchor | positive | negative |
282
  |:--------|:----------------------------------------------------------------------------------|:----------------------------------------------------------------------------------|:----------------------------------------------------------------------------------|
283
  | type | string | string | string |
284
+ | details | <ul><li>min: 6 tokens</li><li>mean: 15.38 tokens</li><li>max: 75 tokens</li></ul> | <ul><li>min: 6 tokens</li><li>mean: 15.39 tokens</li><li>max: 75 tokens</li></ul> | <ul><li>min: 6 tokens</li><li>mean: 16.58 tokens</li><li>max: 68 tokens</li></ul> |
285
  * Samples:
286
+ | anchor | positive | negative |
287
+ |:-------------------------------------------------------------------------------------------------------------------------------------------------------------|:-------------------------------------------------------------------------------------------------------------------------------------------------------------|:--------------------------------------------------------------------------------------------------------------------------------------------------------------------|
288
+ | <code>Why are all my questions on Quora marked needing improvement?</code> | <code>Why are all my questions immediately being marked as needing improvement?</code> | <code>For a post-graduate student in IIT, is it allowed to take an external scholarship as a top-up to his/her MHRD assistantship?</code> |
289
+ | <code>Can blue butter fly needle with vaccum tube be reused? Is it HIV risk? . Heard the needle is too small to be reused . Had blood draw at clinic?</code> | <code>Can blue butter fly needle with vaccum tube be reused? Is it HIV risk? . Heard the needle is too small to be reused . Had blood draw at clinic?</code> | <code>Can blue butter fly needle with vaccum tube be reused not ? Is it HIV risk ? . Heard the needle is too small to be reused . Had blood draw at clinic ?</code> |
290
+ | <code>Why do people still believe the world is flat?</code> | <code>Why are there still people who believe the world is flat?</code> | <code>I'm not able to buy Udemy course .it is not accepting mine and my friends debit card.my card can be used for Flipkart .how to purchase now?</code> |
291
  * Loss: [<code>MultipleNegativesRankingLoss</code>](https://sbert.net/docs/package_reference/sentence_transformer/losses.html#multiplenegativesrankingloss) with these parameters:
292
  ```json
293
  {
294
+ "scale": 7.0,
295
  "similarity_fct": "cos_sim",
296
  "gather_across_devices": false
297
  }
 
300
  ### Training Hyperparameters
301
  #### Non-Default Hyperparameters
302
 
303
+ - `eval_strategy`: steps
304
  - `per_device_train_batch_size`: 64
305
  - `per_device_eval_batch_size`: 64
306
+ - `learning_rate`: 2e-05
307
+ - `weight_decay`: 0.0001
308
+ - `max_steps`: 5000
309
+ - `warmup_ratio`: 0.1
310
  - `fp16`: True
311
+ - `dataloader_drop_last`: True
312
+ - `dataloader_num_workers`: 1
313
+ - `dataloader_prefetch_factor`: 1
314
+ - `load_best_model_at_end`: True
315
+ - `optim`: adamw_torch
316
+ - `ddp_find_unused_parameters`: False
317
+ - `push_to_hub`: True
318
+ - `hub_model_id`: redis/model-b-structured
319
+ - `eval_on_start`: True
320
 
321
  #### All Hyperparameters
322
  <details><summary>Click to expand</summary>
323
 
324
  - `overwrite_output_dir`: False
325
  - `do_predict`: False
326
+ - `eval_strategy`: steps
327
  - `prediction_loss_only`: True
328
  - `per_device_train_batch_size`: 64
329
  - `per_device_eval_batch_size`: 64
 
332
  - `gradient_accumulation_steps`: 1
333
  - `eval_accumulation_steps`: None
334
  - `torch_empty_cache_steps`: None
335
+ - `learning_rate`: 2e-05
336
+ - `weight_decay`: 0.0001
337
  - `adam_beta1`: 0.9
338
  - `adam_beta2`: 0.999
339
  - `adam_epsilon`: 1e-08
340
+ - `max_grad_norm`: 1.0
341
+ - `num_train_epochs`: 3.0
342
+ - `max_steps`: 5000
343
  - `lr_scheduler_type`: linear
344
  - `lr_scheduler_kwargs`: {}
345
+ - `warmup_ratio`: 0.1
346
  - `warmup_steps`: 0
347
  - `log_level`: passive
348
  - `log_level_replica`: warning
 
370
  - `tpu_num_cores`: None
371
  - `tpu_metrics_debug`: False
372
  - `debug`: []
373
+ - `dataloader_drop_last`: True
374
+ - `dataloader_num_workers`: 1
375
+ - `dataloader_prefetch_factor`: 1
376
  - `past_index`: -1
377
  - `disable_tqdm`: False
378
  - `remove_unused_columns`: True
379
  - `label_names`: None
380
+ - `load_best_model_at_end`: True
381
  - `ignore_data_skip`: False
382
  - `fsdp`: []
383
  - `fsdp_min_num_params`: 0
 
387
  - `parallelism_config`: None
388
  - `deepspeed`: None
389
  - `label_smoothing_factor`: 0.0
390
+ - `optim`: adamw_torch
391
  - `optim_args`: None
392
  - `adafactor`: False
393
  - `group_by_length`: False
394
  - `length_column_name`: length
395
  - `project`: huggingface
396
  - `trackio_space_id`: trackio
397
+ - `ddp_find_unused_parameters`: False
398
  - `ddp_bucket_cap_mb`: None
399
  - `ddp_broadcast_buffers`: False
400
  - `dataloader_pin_memory`: True
401
  - `dataloader_persistent_workers`: False
402
  - `skip_memory_metrics`: True
403
  - `use_legacy_prediction_loop`: False
404
+ - `push_to_hub`: True
405
  - `resume_from_checkpoint`: None
406
+ - `hub_model_id`: redis/model-b-structured
407
  - `hub_strategy`: every_save
408
  - `hub_private_repo`: None
409
  - `hub_always_push`: False
 
430
  - `neftune_noise_alpha`: None
431
  - `optim_target_modules`: None
432
  - `batch_eval_metrics`: False
433
+ - `eval_on_start`: True
434
  - `use_liger_kernel`: False
435
  - `liger_kernel_config`: None
436
  - `eval_use_gather_object`: False
437
  - `average_tokens_across_devices`: True
438
  - `prompts`: None
439
  - `batch_sampler`: batch_sampler
440
+ - `multi_dataset_batch_sampler`: proportional
441
  - `router_mapping`: {}
442
  - `learning_rate_mapping`: {}
443
 
444
  </details>
445
 
446
  ### Training Logs
447
+ | Epoch | Step | Training Loss | Validation Loss | val_cosine_ndcg@10 |
448
+ |:------:|:----:|:-------------:|:---------------:|:------------------:|
449
+ | 0 | 0 | - | 1.1196 | 0.8084 |
450
+ | 0.0224 | 250 | 0.3432 | 0.2247 | 0.8873 |
451
+ | 0.0448 | 500 | 0.2279 | 0.2306 | 0.8821 |
452
+ | 0.0673 | 750 | 0.2253 | 0.2339 | 0.8802 |
453
+ | 0.0897 | 1000 | 0.2231 | 0.2147 | 0.8882 |
454
+ | 0.1121 | 1250 | 0.2091 | 0.2115 | 0.8881 |
455
+ | 0.1345 | 1500 | 0.2145 | 0.2059 | 0.8886 |
456
+ | 0.1569 | 1750 | 0.2049 | 0.2025 | 0.8910 |
457
+ | 0.1793 | 2000 | 0.2066 | nan | 0.0346 |
458
+ | 0.2018 | 2250 | 0.2012 | 0.2087 | 0.8929 |
459
+ | 0.2242 | 2500 | 0.2175 | 0.2187 | 0.8960 |
460
+ | 0.2466 | 2750 | 0.2311 | nan | 0.2274 |
461
+ | 0.2690 | 3000 | 0.2477 | nan | 0.1686 |
462
+ | 0.2914 | 3250 | 0.2484 | nan | 0.1686 |
463
+ | 0.3138 | 3500 | 0.253 | nan | 0.1686 |
464
+ | 0.3363 | 3750 | 0.249 | nan | 0.1686 |
465
+ | 0.3587 | 4000 | 0.2524 | nan | 0.1686 |
466
+ | 0.3811 | 4250 | 0.2502 | nan | 0.0000 |
467
+ | 0.4035 | 4500 | 0.0 | nan | 0.0000 |
468
+ | 0.4259 | 4750 | 0.0 | nan | 0.0000 |
469
+ | 0.4484 | 5000 | 0.0 | nan | 0.0000 |
470
 
471
 
472
  ### Framework Versions
config_sentence_transformers.json CHANGED
@@ -6,8 +6,20 @@
6
  "pytorch": "2.9.1+cu128"
7
  },
8
  "prompts": {
9
- "query": "",
10
- "document": ""
 
 
 
 
 
 
 
 
 
 
 
 
11
  },
12
  "default_prompt_name": null,
13
  "similarity_fn_name": "cosine"
 
6
  "pytorch": "2.9.1+cu128"
7
  },
8
  "prompts": {
9
+ "query": "task: search result | query: ",
10
+ "document": "title: none | text: ",
11
+ "BitextMining": "task: search result | query: ",
12
+ "Clustering": "task: clustering | query: ",
13
+ "Classification": "task: classification | query: ",
14
+ "InstructionRetrieval": "task: code retrieval | query: ",
15
+ "MultilabelClassification": "task: classification | query: ",
16
+ "PairClassification": "task: sentence similarity | query: ",
17
+ "Reranking": "task: search result | query: ",
18
+ "Retrieval": "task: search result | query: ",
19
+ "Retrieval-query": "task: search result | query: ",
20
+ "Retrieval-document": "title: none | text: ",
21
+ "STS": "task: sentence similarity | query: ",
22
+ "Summarization": "task: summarization | query: "
23
  },
24
  "default_prompt_name": null,
25
  "similarity_fn_name": "cosine"
modules.json CHANGED
@@ -10,5 +10,23 @@
10
  "name": "1",
11
  "path": "1_Pooling",
12
  "type": "sentence_transformers.models.Pooling"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
13
  }
14
  ]
 
10
  "name": "1",
11
  "path": "1_Pooling",
12
  "type": "sentence_transformers.models.Pooling"
13
+ },
14
+ {
15
+ "idx": 2,
16
+ "name": "2",
17
+ "path": "2_Dense",
18
+ "type": "sentence_transformers.models.Dense"
19
+ },
20
+ {
21
+ "idx": 3,
22
+ "name": "3",
23
+ "path": "3_Dense",
24
+ "type": "sentence_transformers.models.Dense"
25
+ },
26
+ {
27
+ "idx": 4,
28
+ "name": "4",
29
+ "path": "4_Normalize",
30
+ "type": "sentence_transformers.models.Normalize"
31
  }
32
  ]