lealdaniel commited on
Commit
caaadd4
·
verified ·
1 Parent(s): 5e06627

Delete discipline-model-v3

Browse files
discipline-model-v3/1_Pooling/config.json DELETED
@@ -1,10 +0,0 @@
1
- {
2
- "word_embedding_dimension": 768,
3
- "pooling_mode_cls_token": false,
4
- "pooling_mode_mean_tokens": true,
5
- "pooling_mode_max_tokens": false,
6
- "pooling_mode_mean_sqrt_len_tokens": false,
7
- "pooling_mode_weightedmean_tokens": false,
8
- "pooling_mode_lasttoken": false,
9
- "include_prompt": true
10
- }
 
 
 
 
 
 
 
 
 
 
 
discipline-model-v3/README.md DELETED
@@ -1,561 +0,0 @@
1
- ---
2
- tags:
3
- - sentence-transformers
4
- - sentence-similarity
5
- - feature-extraction
6
- - generated_from_trainer
7
- - dataset_size:5005
8
- - loss:MultipleNegativesRankingLoss
9
- base_model: sentence-transformers/all-mpnet-base-v2
10
- widget:
11
- - source_sentence: especialista de risco e prevenção a fraudes​
12
- sentences:
13
- - risk & compliance
14
- - internal communication
15
- - accounting
16
- - source_sentence: coord integracao do cliente ii
17
- sentences:
18
- - strategic planning
19
- - customer experience
20
- - não encontrado (adicione nas observações)
21
- - source_sentence: gerente sr. marketing e performance
22
- sentences:
23
- - business operations
24
- - d&i
25
- - performance marketing
26
- - source_sentence: gerente executivo de operacoes
27
- sentences:
28
- - business operations
29
- - sdr
30
- - product management
31
- - source_sentence: sr designer
32
- sentences:
33
- - product design
34
- - talent acquisition
35
- - lawyer
36
- pipeline_tag: sentence-similarity
37
- library_name: sentence-transformers
38
- metrics:
39
- - cosine_accuracy@1
40
- - cosine_accuracy@3
41
- - cosine_accuracy@5
42
- - cosine_accuracy@10
43
- - cosine_precision@1
44
- - cosine_precision@3
45
- - cosine_precision@5
46
- - cosine_precision@10
47
- - cosine_recall@1
48
- - cosine_recall@3
49
- - cosine_recall@5
50
- - cosine_recall@10
51
- - cosine_ndcg@10
52
- - cosine_mrr@10
53
- - cosine_map@100
54
- - dot_accuracy@1
55
- - dot_accuracy@3
56
- - dot_accuracy@5
57
- - dot_accuracy@10
58
- - dot_precision@1
59
- - dot_precision@3
60
- - dot_precision@5
61
- - dot_precision@10
62
- - dot_recall@1
63
- - dot_recall@3
64
- - dot_recall@5
65
- - dot_recall@10
66
- - dot_ndcg@10
67
- - dot_mrr@10
68
- - dot_map@100
69
- model-index:
70
- - name: SentenceTransformer based on sentence-transformers/all-mpnet-base-v2
71
- results:
72
- - task:
73
- type: information-retrieval
74
- name: Information Retrieval
75
- dataset:
76
- name: Unknown
77
- type: unknown
78
- metrics:
79
- - type: cosine_accuracy@1
80
- value: 0.6245583038869258
81
- name: Cosine Accuracy@1
82
- - type: cosine_accuracy@3
83
- value: 0.8206713780918727
84
- name: Cosine Accuracy@3
85
- - type: cosine_accuracy@5
86
- value: 0.8754416961130742
87
- name: Cosine Accuracy@5
88
- - type: cosine_accuracy@10
89
- value: 0.926678445229682
90
- name: Cosine Accuracy@10
91
- - type: cosine_precision@1
92
- value: 0.6245583038869258
93
- name: Cosine Precision@1
94
- - type: cosine_precision@3
95
- value: 0.2735571260306242
96
- name: Cosine Precision@3
97
- - type: cosine_precision@5
98
- value: 0.17508833922261482
99
- name: Cosine Precision@5
100
- - type: cosine_precision@10
101
- value: 0.0926678445229682
102
- name: Cosine Precision@10
103
- - type: cosine_recall@1
104
- value: 0.6245583038869258
105
- name: Cosine Recall@1
106
- - type: cosine_recall@3
107
- value: 0.8206713780918727
108
- name: Cosine Recall@3
109
- - type: cosine_recall@5
110
- value: 0.8754416961130742
111
- name: Cosine Recall@5
112
- - type: cosine_recall@10
113
- value: 0.926678445229682
114
- name: Cosine Recall@10
115
- - type: cosine_ndcg@10
116
- value: 0.7790196193570564
117
- name: Cosine Ndcg@10
118
- - type: cosine_mrr@10
119
- value: 0.7312496494475299
120
- name: Cosine Mrr@10
121
- - type: cosine_map@100
122
- value: 0.7347864977321262
123
- name: Cosine Map@100
124
- - type: dot_accuracy@1
125
- value: 0.6245583038869258
126
- name: Dot Accuracy@1
127
- - type: dot_accuracy@3
128
- value: 0.8206713780918727
129
- name: Dot Accuracy@3
130
- - type: dot_accuracy@5
131
- value: 0.8754416961130742
132
- name: Dot Accuracy@5
133
- - type: dot_accuracy@10
134
- value: 0.926678445229682
135
- name: Dot Accuracy@10
136
- - type: dot_precision@1
137
- value: 0.6245583038869258
138
- name: Dot Precision@1
139
- - type: dot_precision@3
140
- value: 0.2735571260306242
141
- name: Dot Precision@3
142
- - type: dot_precision@5
143
- value: 0.17508833922261482
144
- name: Dot Precision@5
145
- - type: dot_precision@10
146
- value: 0.0926678445229682
147
- name: Dot Precision@10
148
- - type: dot_recall@1
149
- value: 0.6245583038869258
150
- name: Dot Recall@1
151
- - type: dot_recall@3
152
- value: 0.8206713780918727
153
- name: Dot Recall@3
154
- - type: dot_recall@5
155
- value: 0.8754416961130742
156
- name: Dot Recall@5
157
- - type: dot_recall@10
158
- value: 0.926678445229682
159
- name: Dot Recall@10
160
- - type: dot_ndcg@10
161
- value: 0.7790196193570564
162
- name: Dot Ndcg@10
163
- - type: dot_mrr@10
164
- value: 0.7312496494475299
165
- name: Dot Mrr@10
166
- - type: dot_map@100
167
- value: 0.7347864977321262
168
- name: Dot Map@100
169
- ---
170
-
171
- # SentenceTransformer based on sentence-transformers/all-mpnet-base-v2
172
-
173
- This is a [sentence-transformers](https://www.SBERT.net) model finetuned from [sentence-transformers/all-mpnet-base-v2](https://huggingface.co/sentence-transformers/all-mpnet-base-v2). It maps sentences & paragraphs to a 768-dimensional dense vector space and can be used for semantic textual similarity, semantic search, paraphrase mining, text classification, clustering, and more.
174
-
175
- ## Model Details
176
-
177
- ### Model Description
178
- - **Model Type:** Sentence Transformer
179
- - **Base model:** [sentence-transformers/all-mpnet-base-v2](https://huggingface.co/sentence-transformers/all-mpnet-base-v2) <!-- at revision 9a3225965996d404b775526de6dbfe85d3368642 -->
180
- - **Maximum Sequence Length:** 384 tokens
181
- - **Output Dimensionality:** 768 tokens
182
- - **Similarity Function:** Cosine Similarity
183
- <!-- - **Training Dataset:** Unknown -->
184
- <!-- - **Language:** Unknown -->
185
- <!-- - **License:** Unknown -->
186
-
187
- ### Model Sources
188
-
189
- - **Documentation:** [Sentence Transformers Documentation](https://sbert.net)
190
- - **Repository:** [Sentence Transformers on GitHub](https://github.com/UKPLab/sentence-transformers)
191
- - **Hugging Face:** [Sentence Transformers on Hugging Face](https://huggingface.co/models?library=sentence-transformers)
192
-
193
- ### Full Model Architecture
194
-
195
- ```
196
- SentenceTransformer(
197
- (0): Transformer({'max_seq_length': 384, 'do_lower_case': False}) with Transformer model: MPNetModel
198
- (1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False, 'pooling_mode_weightedmean_tokens': False, 'pooling_mode_lasttoken': False, 'include_prompt': True})
199
- (2): Normalize()
200
- )
201
- ```
202
-
203
- ## Usage
204
-
205
- ### Direct Usage (Sentence Transformers)
206
-
207
- First install the Sentence Transformers library:
208
-
209
- ```bash
210
- pip install -U sentence-transformers
211
- ```
212
-
213
- Then you can load this model and run inference.
214
- ```python
215
- from sentence_transformers import SentenceTransformer
216
-
217
- # Download from the 🤗 Hub
218
- model = SentenceTransformer("sentence_transformers_model_id")
219
- # Run inference
220
- sentences = [
221
- 'sr designer',
222
- 'product design',
223
- 'talent acquisition',
224
- ]
225
- embeddings = model.encode(sentences)
226
- print(embeddings.shape)
227
- # [3, 768]
228
-
229
- # Get the similarity scores for the embeddings
230
- similarities = model.similarity(embeddings, embeddings)
231
- print(similarities.shape)
232
- # [3, 3]
233
- ```
234
-
235
- <!--
236
- ### Direct Usage (Transformers)
237
-
238
- <details><summary>Click to see the direct usage in Transformers</summary>
239
-
240
- </details>
241
- -->
242
-
243
- <!--
244
- ### Downstream Usage (Sentence Transformers)
245
-
246
- You can finetune this model on your own dataset.
247
-
248
- <details><summary>Click to expand</summary>
249
-
250
- </details>
251
- -->
252
-
253
- <!--
254
- ### Out-of-Scope Use
255
-
256
- *List how the model may foreseeably be misused and address what users ought not to do with the model.*
257
- -->
258
-
259
- ## Evaluation
260
-
261
- ### Metrics
262
-
263
- #### Information Retrieval
264
-
265
- * Evaluated with [<code>InformationRetrievalEvaluator</code>](https://sbert.net/docs/package_reference/sentence_transformer/evaluation.html#sentence_transformers.evaluation.InformationRetrievalEvaluator)
266
-
267
- | Metric | Value |
268
- |:--------------------|:-----------|
269
- | cosine_accuracy@1 | 0.6246 |
270
- | cosine_accuracy@3 | 0.8207 |
271
- | cosine_accuracy@5 | 0.8754 |
272
- | cosine_accuracy@10 | 0.9267 |
273
- | cosine_precision@1 | 0.6246 |
274
- | cosine_precision@3 | 0.2736 |
275
- | cosine_precision@5 | 0.1751 |
276
- | cosine_precision@10 | 0.0927 |
277
- | cosine_recall@1 | 0.6246 |
278
- | cosine_recall@3 | 0.8207 |
279
- | cosine_recall@5 | 0.8754 |
280
- | cosine_recall@10 | 0.9267 |
281
- | cosine_ndcg@10 | 0.779 |
282
- | cosine_mrr@10 | 0.7312 |
283
- | **cosine_map@100** | **0.7348** |
284
- | dot_accuracy@1 | 0.6246 |
285
- | dot_accuracy@3 | 0.8207 |
286
- | dot_accuracy@5 | 0.8754 |
287
- | dot_accuracy@10 | 0.9267 |
288
- | dot_precision@1 | 0.6246 |
289
- | dot_precision@3 | 0.2736 |
290
- | dot_precision@5 | 0.1751 |
291
- | dot_precision@10 | 0.0927 |
292
- | dot_recall@1 | 0.6246 |
293
- | dot_recall@3 | 0.8207 |
294
- | dot_recall@5 | 0.8754 |
295
- | dot_recall@10 | 0.9267 |
296
- | dot_ndcg@10 | 0.779 |
297
- | dot_mrr@10 | 0.7312 |
298
- | dot_map@100 | 0.7348 |
299
-
300
- <!--
301
- ## Bias, Risks and Limitations
302
-
303
- *What are the known or foreseeable issues stemming from this model? You could also flag here known failure cases or weaknesses of the model.*
304
- -->
305
-
306
- <!--
307
- ### Recommendations
308
-
309
- *What are recommendations with respect to the foreseeable issues? For example, filtering explicit content.*
310
- -->
311
-
312
- ## Training Details
313
-
314
- ### Training Dataset
315
-
316
- #### Unnamed Dataset
317
-
318
-
319
- * Size: 5,005 training samples
320
- * Columns: <code>input</code> and <code>output</code>
321
- * Approximate statistics based on the first 1000 samples:
322
- | | input | output |
323
- |:--------|:---------------------------------------------------------------------------------|:---------------------------------------------------------------------------------|
324
- | type | string | string |
325
- | details | <ul><li>min: 3 tokens</li><li>mean: 8.83 tokens</li><li>max: 21 tokens</li></ul> | <ul><li>min: 3 tokens</li><li>mean: 7.21 tokens</li><li>max: 18 tokens</li></ul> |
326
- * Samples:
327
- | input | output |
328
- |:--------------------------------------------|:-------------------------------------------------------|
329
- | <code>fresador mecanico ii</code> | <code>não encontrado (adicione nas observações)</code> |
330
- | <code>analista de sistemas ui ux iii</code> | <code>product design</code> |
331
- | <code>devops</code> | <code>devops engineering</code> |
332
- * Loss: [<code>MultipleNegativesRankingLoss</code>](https://sbert.net/docs/package_reference/sentence_transformer/losses.html#multiplenegativesrankingloss) with these parameters:
333
- ```json
334
- {
335
- "scale": 20.0,
336
- "similarity_fct": "cos_sim"
337
- }
338
- ```
339
-
340
- ### Evaluation Dataset
341
-
342
- #### Unnamed Dataset
343
-
344
-
345
- * Size: 1,132 evaluation samples
346
- * Columns: <code>input</code> and <code>output</code>
347
- * Approximate statistics based on the first 1000 samples:
348
- | | input | output |
349
- |:--------|:---------------------------------------------------------------------------------|:---------------------------------------------------------------------------------|
350
- | type | string | string |
351
- | details | <ul><li>min: 3 tokens</li><li>mean: 8.76 tokens</li><li>max: 20 tokens</li></ul> | <ul><li>min: 3 tokens</li><li>mean: 7.08 tokens</li><li>max: 18 tokens</li></ul> |
352
- * Samples:
353
- | input | output |
354
- |:-----------------------------------------|:-------------------------------------------------------|
355
- | <code>produtor (a) de video pleno</code> | <code>não encontrado (adicione nas observações)</code> |
356
- | <code>ai staff software engineer</code> | <code>software engineering</code> |
357
- | <code>montador digital i</code> | <code>não encontrado (adicione nas observações)</code> |
358
- * Loss: [<code>MultipleNegativesRankingLoss</code>](https://sbert.net/docs/package_reference/sentence_transformer/losses.html#multiplenegativesrankingloss) with these parameters:
359
- ```json
360
- {
361
- "scale": 20.0,
362
- "similarity_fct": "cos_sim"
363
- }
364
- ```
365
-
366
- ### Training Hyperparameters
367
- #### Non-Default Hyperparameters
368
-
369
- - `eval_strategy`: steps
370
- - `warmup_ratio`: 0.1
371
-
372
- #### All Hyperparameters
373
- <details><summary>Click to expand</summary>
374
-
375
- - `overwrite_output_dir`: False
376
- - `do_predict`: False
377
- - `eval_strategy`: steps
378
- - `prediction_loss_only`: True
379
- - `per_device_train_batch_size`: 8
380
- - `per_device_eval_batch_size`: 8
381
- - `per_gpu_train_batch_size`: None
382
- - `per_gpu_eval_batch_size`: None
383
- - `gradient_accumulation_steps`: 1
384
- - `eval_accumulation_steps`: None
385
- - `torch_empty_cache_steps`: None
386
- - `learning_rate`: 5e-05
387
- - `weight_decay`: 0.0
388
- - `adam_beta1`: 0.9
389
- - `adam_beta2`: 0.999
390
- - `adam_epsilon`: 1e-08
391
- - `max_grad_norm`: 1.0
392
- - `num_train_epochs`: 3.0
393
- - `max_steps`: -1
394
- - `lr_scheduler_type`: linear
395
- - `lr_scheduler_kwargs`: {}
396
- - `warmup_ratio`: 0.1
397
- - `warmup_steps`: 0
398
- - `log_level`: passive
399
- - `log_level_replica`: warning
400
- - `log_on_each_node`: True
401
- - `logging_nan_inf_filter`: True
402
- - `save_safetensors`: True
403
- - `save_on_each_node`: False
404
- - `save_only_model`: False
405
- - `restore_callback_states_from_checkpoint`: False
406
- - `no_cuda`: False
407
- - `use_cpu`: False
408
- - `use_mps_device`: False
409
- - `seed`: 42
410
- - `data_seed`: None
411
- - `jit_mode_eval`: False
412
- - `use_ipex`: False
413
- - `bf16`: False
414
- - `fp16`: False
415
- - `fp16_opt_level`: O1
416
- - `half_precision_backend`: auto
417
- - `bf16_full_eval`: False
418
- - `fp16_full_eval`: False
419
- - `tf32`: None
420
- - `local_rank`: 0
421
- - `ddp_backend`: None
422
- - `tpu_num_cores`: None
423
- - `tpu_metrics_debug`: False
424
- - `debug`: []
425
- - `dataloader_drop_last`: False
426
- - `dataloader_num_workers`: 0
427
- - `dataloader_prefetch_factor`: None
428
- - `past_index`: -1
429
- - `disable_tqdm`: False
430
- - `remove_unused_columns`: True
431
- - `label_names`: None
432
- - `load_best_model_at_end`: False
433
- - `ignore_data_skip`: False
434
- - `fsdp`: []
435
- - `fsdp_min_num_params`: 0
436
- - `fsdp_config`: {'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False}
437
- - `fsdp_transformer_layer_cls_to_wrap`: None
438
- - `accelerator_config`: {'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None}
439
- - `deepspeed`: None
440
- - `label_smoothing_factor`: 0.0
441
- - `optim`: adamw_torch
442
- - `optim_args`: None
443
- - `adafactor`: False
444
- - `group_by_length`: False
445
- - `length_column_name`: length
446
- - `ddp_find_unused_parameters`: None
447
- - `ddp_bucket_cap_mb`: None
448
- - `ddp_broadcast_buffers`: False
449
- - `dataloader_pin_memory`: True
450
- - `dataloader_persistent_workers`: False
451
- - `skip_memory_metrics`: True
452
- - `use_legacy_prediction_loop`: False
453
- - `push_to_hub`: False
454
- - `resume_from_checkpoint`: None
455
- - `hub_model_id`: None
456
- - `hub_strategy`: every_save
457
- - `hub_private_repo`: False
458
- - `hub_always_push`: False
459
- - `gradient_checkpointing`: False
460
- - `gradient_checkpointing_kwargs`: None
461
- - `include_inputs_for_metrics`: False
462
- - `eval_do_concat_batches`: True
463
- - `fp16_backend`: auto
464
- - `push_to_hub_model_id`: None
465
- - `push_to_hub_organization`: None
466
- - `mp_parameters`:
467
- - `auto_find_batch_size`: False
468
- - `full_determinism`: False
469
- - `torchdynamo`: None
470
- - `ray_scope`: last
471
- - `ddp_timeout`: 1800
472
- - `torch_compile`: False
473
- - `torch_compile_backend`: None
474
- - `torch_compile_mode`: None
475
- - `dispatch_batches`: None
476
- - `split_batches`: None
477
- - `include_tokens_per_second`: False
478
- - `include_num_input_tokens_seen`: False
479
- - `neftune_noise_alpha`: None
480
- - `optim_target_modules`: None
481
- - `batch_eval_metrics`: False
482
- - `eval_on_start`: False
483
- - `use_liger_kernel`: False
484
- - `eval_use_gather_object`: False
485
- - `batch_sampler`: batch_sampler
486
- - `multi_dataset_batch_sampler`: proportional
487
-
488
- </details>
489
-
490
- ### Training Logs
491
- | Epoch | Step | Training Loss | loss | cosine_map@100 |
492
- |:------:|:----:|:-------------:|:------:|:--------------:|
493
- | 0 | 0 | - | - | 0.3578 |
494
- | 0.3195 | 200 | - | 0.9975 | 0.5035 |
495
- | 0.6390 | 400 | - | 0.8471 | 0.5845 |
496
- | 0.7987 | 500 | 1.0355 | - | - |
497
- | 0.9585 | 600 | - | 0.7569 | 0.6157 |
498
- | 1.2780 | 800 | - | 0.7542 | 0.6565 |
499
- | 1.5974 | 1000 | 0.648 | 0.6835 | 0.6786 |
500
- | 1.9169 | 1200 | - | 0.6569 | 0.6851 |
501
- | 2.2364 | 1400 | - | 0.6480 | 0.7167 |
502
- | 2.3962 | 1500 | 0.5253 | - | - |
503
- | 2.5559 | 1600 | - | 0.6506 | 0.7110 |
504
- | 2.8754 | 1800 | - | 0.6391 | 0.7348 |
505
-
506
-
507
- ### Framework Versions
508
- - Python: 3.11.6
509
- - Sentence Transformers: 3.1.1
510
- - Transformers: 4.45.2
511
- - PyTorch: 2.5.1+cu124
512
- - Accelerate: 1.1.1
513
- - Datasets: 2.14.4
514
- - Tokenizers: 0.20.3
515
-
516
- ## Citation
517
-
518
- ### BibTeX
519
-
520
- #### Sentence Transformers
521
- ```bibtex
522
- @inproceedings{reimers-2019-sentence-bert,
523
- title = "Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks",
524
- author = "Reimers, Nils and Gurevych, Iryna",
525
- booktitle = "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing",
526
- month = "11",
527
- year = "2019",
528
- publisher = "Association for Computational Linguistics",
529
- url = "https://arxiv.org/abs/1908.10084",
530
- }
531
- ```
532
-
533
- #### MultipleNegativesRankingLoss
534
- ```bibtex
535
- @misc{henderson2017efficient,
536
- title={Efficient Natural Language Response Suggestion for Smart Reply},
537
- author={Matthew Henderson and Rami Al-Rfou and Brian Strope and Yun-hsuan Sung and Laszlo Lukacs and Ruiqi Guo and Sanjiv Kumar and Balint Miklos and Ray Kurzweil},
538
- year={2017},
539
- eprint={1705.00652},
540
- archivePrefix={arXiv},
541
- primaryClass={cs.CL}
542
- }
543
- ```
544
-
545
- <!--
546
- ## Glossary
547
-
548
- *Clearly define terms in order to be accessible across audiences.*
549
- -->
550
-
551
- <!--
552
- ## Model Card Authors
553
-
554
- *Lists the people who create the model card, providing recognition and accountability for the detailed work that goes into its construction.*
555
- -->
556
-
557
- <!--
558
- ## Model Card Contact
559
-
560
- *Provides a way for people who have updates to the Model Card, suggestions, or questions, to contact the Model Card authors.*
561
- -->
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
discipline-model-v3/config.json DELETED
@@ -1,24 +0,0 @@
1
- {
2
- "_name_or_path": "sentence-transformers/all-mpnet-base-v2",
3
- "architectures": [
4
- "MPNetModel"
5
- ],
6
- "attention_probs_dropout_prob": 0.1,
7
- "bos_token_id": 0,
8
- "eos_token_id": 2,
9
- "hidden_act": "gelu",
10
- "hidden_dropout_prob": 0.1,
11
- "hidden_size": 768,
12
- "initializer_range": 0.02,
13
- "intermediate_size": 3072,
14
- "layer_norm_eps": 1e-05,
15
- "max_position_embeddings": 514,
16
- "model_type": "mpnet",
17
- "num_attention_heads": 12,
18
- "num_hidden_layers": 12,
19
- "pad_token_id": 1,
20
- "relative_attention_num_buckets": 32,
21
- "torch_dtype": "float32",
22
- "transformers_version": "4.45.2",
23
- "vocab_size": 30527
24
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
discipline-model-v3/config_sentence_transformers.json DELETED
@@ -1,10 +0,0 @@
1
- {
2
- "__version__": {
3
- "sentence_transformers": "3.1.1",
4
- "transformers": "4.45.2",
5
- "pytorch": "2.5.1+cu124"
6
- },
7
- "prompts": {},
8
- "default_prompt_name": null,
9
- "similarity_fn_name": null
10
- }
 
 
 
 
 
 
 
 
 
 
 
discipline-model-v3/model.safetensors DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:b5855a55cd3835eec991b1c6b1d902581ed783c5a6ac097472f3296a3e642cc6
3
- size 437967672
 
 
 
 
discipline-model-v3/modules.json DELETED
@@ -1,20 +0,0 @@
1
- [
2
- {
3
- "idx": 0,
4
- "name": "0",
5
- "path": "",
6
- "type": "sentence_transformers.models.Transformer"
7
- },
8
- {
9
- "idx": 1,
10
- "name": "1",
11
- "path": "1_Pooling",
12
- "type": "sentence_transformers.models.Pooling"
13
- },
14
- {
15
- "idx": 2,
16
- "name": "2",
17
- "path": "2_Normalize",
18
- "type": "sentence_transformers.models.Normalize"
19
- }
20
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
discipline-model-v3/sentence_bert_config.json DELETED
@@ -1,4 +0,0 @@
1
- {
2
- "max_seq_length": 384,
3
- "do_lower_case": false
4
- }
 
 
 
 
 
discipline-model-v3/special_tokens_map.json DELETED
@@ -1,51 +0,0 @@
1
- {
2
- "bos_token": {
3
- "content": "<s>",
4
- "lstrip": false,
5
- "normalized": false,
6
- "rstrip": false,
7
- "single_word": false
8
- },
9
- "cls_token": {
10
- "content": "<s>",
11
- "lstrip": false,
12
- "normalized": false,
13
- "rstrip": false,
14
- "single_word": false
15
- },
16
- "eos_token": {
17
- "content": "</s>",
18
- "lstrip": false,
19
- "normalized": false,
20
- "rstrip": false,
21
- "single_word": false
22
- },
23
- "mask_token": {
24
- "content": "<mask>",
25
- "lstrip": true,
26
- "normalized": false,
27
- "rstrip": false,
28
- "single_word": false
29
- },
30
- "pad_token": {
31
- "content": "<pad>",
32
- "lstrip": false,
33
- "normalized": false,
34
- "rstrip": false,
35
- "single_word": false
36
- },
37
- "sep_token": {
38
- "content": "</s>",
39
- "lstrip": false,
40
- "normalized": false,
41
- "rstrip": false,
42
- "single_word": false
43
- },
44
- "unk_token": {
45
- "content": "[UNK]",
46
- "lstrip": false,
47
- "normalized": false,
48
- "rstrip": false,
49
- "single_word": false
50
- }
51
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
discipline-model-v3/tokenizer.json DELETED
The diff for this file is too large to render. See raw diff
 
discipline-model-v3/tokenizer_config.json DELETED
@@ -1,72 +0,0 @@
1
- {
2
- "added_tokens_decoder": {
3
- "0": {
4
- "content": "<s>",
5
- "lstrip": false,
6
- "normalized": false,
7
- "rstrip": false,
8
- "single_word": false,
9
- "special": true
10
- },
11
- "1": {
12
- "content": "<pad>",
13
- "lstrip": false,
14
- "normalized": false,
15
- "rstrip": false,
16
- "single_word": false,
17
- "special": true
18
- },
19
- "2": {
20
- "content": "</s>",
21
- "lstrip": false,
22
- "normalized": false,
23
- "rstrip": false,
24
- "single_word": false,
25
- "special": true
26
- },
27
- "3": {
28
- "content": "<unk>",
29
- "lstrip": false,
30
- "normalized": true,
31
- "rstrip": false,
32
- "single_word": false,
33
- "special": true
34
- },
35
- "104": {
36
- "content": "[UNK]",
37
- "lstrip": false,
38
- "normalized": false,
39
- "rstrip": false,
40
- "single_word": false,
41
- "special": true
42
- },
43
- "30526": {
44
- "content": "<mask>",
45
- "lstrip": true,
46
- "normalized": false,
47
- "rstrip": false,
48
- "single_word": false,
49
- "special": true
50
- }
51
- },
52
- "bos_token": "<s>",
53
- "clean_up_tokenization_spaces": false,
54
- "cls_token": "<s>",
55
- "do_lower_case": true,
56
- "eos_token": "</s>",
57
- "mask_token": "<mask>",
58
- "max_length": 128,
59
- "model_max_length": 384,
60
- "pad_to_multiple_of": null,
61
- "pad_token": "<pad>",
62
- "pad_token_type_id": 0,
63
- "padding_side": "right",
64
- "sep_token": "</s>",
65
- "stride": 0,
66
- "strip_accents": null,
67
- "tokenize_chinese_chars": true,
68
- "tokenizer_class": "MPNetTokenizer",
69
- "truncation_side": "right",
70
- "truncation_strategy": "longest_first",
71
- "unk_token": "[UNK]"
72
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
discipline-model-v3/vocab.txt DELETED
The diff for this file is too large to render. See raw diff