Add SetFit model
Browse files- 1_Pooling/config.json +10 -0
- README.md +229 -0
- config.json +23 -0
- config_sentence_transformers.json +10 -0
- config_setfit.json +8 -0
- model.safetensors +3 -0
- model_head.pkl +3 -0
- modules.json +14 -0
- sentence_bert_config.json +4 -0
- special_tokens_map.json +51 -0
- tokenizer.json +0 -0
- tokenizer_config.json +60 -0
- vocab.txt +0 -0
1_Pooling/config.json
ADDED
|
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"word_embedding_dimension": 768,
|
| 3 |
+
"pooling_mode_cls_token": false,
|
| 4 |
+
"pooling_mode_mean_tokens": true,
|
| 5 |
+
"pooling_mode_max_tokens": false,
|
| 6 |
+
"pooling_mode_mean_sqrt_len_tokens": false,
|
| 7 |
+
"pooling_mode_weightedmean_tokens": false,
|
| 8 |
+
"pooling_mode_lasttoken": false,
|
| 9 |
+
"include_prompt": true
|
| 10 |
+
}
|
README.md
ADDED
|
@@ -0,0 +1,229 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
tags:
|
| 3 |
+
- setfit
|
| 4 |
+
- sentence-transformers
|
| 5 |
+
- text-classification
|
| 6 |
+
- generated_from_setfit_trainer
|
| 7 |
+
widget:
|
| 8 |
+
- text: has a similar tone to the raining example, acknowledges that it's right but
|
| 9 |
+
then asserts from his own perspective that it's not.
|
| 10 |
+
- text: it contradicted itself
|
| 11 |
+
- text: it doesn't make sense because it a repetition of statement in different form
|
| 12 |
+
that is to say answering a question and not been sure of the answer.
|
| 13 |
+
- text: because the fact and opinion were different
|
| 14 |
+
- text: the first part of the sentence stated that cyber bullying was wrong but then
|
| 15 |
+
the second part said they didn’t believe it was wrong and that didn’t make sense.
|
| 16 |
+
metrics:
|
| 17 |
+
- accuracy
|
| 18 |
+
- precision
|
| 19 |
+
- recall
|
| 20 |
+
- f1
|
| 21 |
+
pipeline_tag: text-classification
|
| 22 |
+
library_name: setfit
|
| 23 |
+
inference: true
|
| 24 |
+
base_model: sentence-transformers/paraphrase-mpnet-base-v2
|
| 25 |
+
model-index:
|
| 26 |
+
- name: SetFit with sentence-transformers/paraphrase-mpnet-base-v2
|
| 27 |
+
results:
|
| 28 |
+
- task:
|
| 29 |
+
type: text-classification
|
| 30 |
+
name: Text Classification
|
| 31 |
+
dataset:
|
| 32 |
+
name: Unknown
|
| 33 |
+
type: unknown
|
| 34 |
+
split: test
|
| 35 |
+
metrics:
|
| 36 |
+
- type: accuracy
|
| 37 |
+
value: 0.8421052631578947
|
| 38 |
+
name: Accuracy
|
| 39 |
+
- type: precision
|
| 40 |
+
value: 0.8238095238095239
|
| 41 |
+
name: Precision
|
| 42 |
+
- type: recall
|
| 43 |
+
value: 0.6436781609195402
|
| 44 |
+
name: Recall
|
| 45 |
+
- type: f1
|
| 46 |
+
value: 0.6768796175575836
|
| 47 |
+
name: F1
|
| 48 |
+
---
|
| 49 |
+
|
| 50 |
+
# SetFit with sentence-transformers/paraphrase-mpnet-base-v2
|
| 51 |
+
|
| 52 |
+
This is a [SetFit](https://github.com/huggingface/setfit) model that can be used for Text Classification. This SetFit model uses [sentence-transformers/paraphrase-mpnet-base-v2](https://huggingface.co/sentence-transformers/paraphrase-mpnet-base-v2) as the Sentence Transformer embedding model. A [LogisticRegression](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html) instance is used for classification.
|
| 53 |
+
|
| 54 |
+
The model has been trained using an efficient few-shot learning technique that involves:
|
| 55 |
+
|
| 56 |
+
1. Fine-tuning a [Sentence Transformer](https://www.sbert.net) with contrastive learning.
|
| 57 |
+
2. Training a classification head with features from the fine-tuned Sentence Transformer.
|
| 58 |
+
|
| 59 |
+
## Model Details
|
| 60 |
+
|
| 61 |
+
### Model Description
|
| 62 |
+
- **Model Type:** SetFit
|
| 63 |
+
- **Sentence Transformer body:** [sentence-transformers/paraphrase-mpnet-base-v2](https://huggingface.co/sentence-transformers/paraphrase-mpnet-base-v2)
|
| 64 |
+
- **Classification head:** a [LogisticRegression](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html) instance
|
| 65 |
+
- **Maximum Sequence Length:** 512 tokens
|
| 66 |
+
- **Number of Classes:** 3 classes
|
| 67 |
+
<!-- - **Training Dataset:** [Unknown](https://huggingface.co/datasets/unknown) -->
|
| 68 |
+
<!-- - **Language:** Unknown -->
|
| 69 |
+
<!-- - **License:** Unknown -->
|
| 70 |
+
|
| 71 |
+
### Model Sources
|
| 72 |
+
|
| 73 |
+
- **Repository:** [SetFit on GitHub](https://github.com/huggingface/setfit)
|
| 74 |
+
- **Paper:** [Efficient Few-Shot Learning Without Prompts](https://arxiv.org/abs/2209.11055)
|
| 75 |
+
- **Blogpost:** [SetFit: Efficient Few-Shot Learning Without Prompts](https://huggingface.co/blog/setfit)
|
| 76 |
+
|
| 77 |
+
### Model Labels
|
| 78 |
+
| Label | Examples |
|
| 79 |
+
|:-----------------------------------------------|:---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
| 80 |
+
| Enrichment / reinterpretation | <ul><li>"it made sense because it is tom's opinion that cyberbullying is not wrong."</li><li>'general opnion versues personal opion, both can be correct'</li><li>"the second part of the statement contradicted the first part, i.e. the first part made a statement of fact and the second part stated the opinion that it wasn't a fact."</li></ul> |
|
| 81 |
+
| Linguistic (in)felicity | <ul><li>"if the person speaking states helping the homeless is right how can they also have the opinion that it's wrong? it doesn't make sense. surely they should of said others may think helping the homeless is right but i think it is wrong."</li><li>'the second part which say it is not compassionate contradicts the first part which say it is'</li><li>"the first half of the sentence makes a statement, but the second half contradicts what the statement says, as it is an opinion saying the opposite. if the same person is speaking, it doesn't make sense to make a statement and then contradict it in the same sentence"</li></ul> |
|
| 82 |
+
| Lack of understanding / clear misunderstanding | <ul><li>'sentence makes sense but the message is wrong'</li><li>'it statement didnt make any sense, for us to better understand, tom needs to further explain his reason for stating why its not cruel after first saying it is'</li><li>'he is saying somehhing with opposing meaning'</li></ul> |
|
| 83 |
+
|
| 84 |
+
## Evaluation
|
| 85 |
+
|
| 86 |
+
### Metrics
|
| 87 |
+
| Label | Accuracy | Precision | Recall | F1 |
|
| 88 |
+
|:--------|:---------|:----------|:-------|:-------|
|
| 89 |
+
| **all** | 0.8421 | 0.8238 | 0.6437 | 0.6769 |
|
| 90 |
+
|
| 91 |
+
## Uses
|
| 92 |
+
|
| 93 |
+
### Direct Use for Inference
|
| 94 |
+
|
| 95 |
+
First install the SetFit library:
|
| 96 |
+
|
| 97 |
+
```bash
|
| 98 |
+
pip install setfit
|
| 99 |
+
```
|
| 100 |
+
|
| 101 |
+
Then you can load this model and run inference.
|
| 102 |
+
|
| 103 |
+
```python
|
| 104 |
+
from setfit import SetFitModel
|
| 105 |
+
|
| 106 |
+
# Download from the 🤗 Hub
|
| 107 |
+
model = SetFitModel.from_pretrained("setfit_model_id")
|
| 108 |
+
# Run inference
|
| 109 |
+
preds = model("it contradicted itself")
|
| 110 |
+
```
|
| 111 |
+
|
| 112 |
+
<!--
|
| 113 |
+
### Downstream Use
|
| 114 |
+
|
| 115 |
+
*List how someone could finetune this model on their own dataset.*
|
| 116 |
+
-->
|
| 117 |
+
|
| 118 |
+
<!--
|
| 119 |
+
### Out-of-Scope Use
|
| 120 |
+
|
| 121 |
+
*List how the model may foreseeably be misused and address what users ought not to do with the model.*
|
| 122 |
+
-->
|
| 123 |
+
|
| 124 |
+
<!--
|
| 125 |
+
## Bias, Risks and Limitations
|
| 126 |
+
|
| 127 |
+
*What are the known or foreseeable issues stemming from this model? You could also flag here known failure cases or weaknesses of the model.*
|
| 128 |
+
-->
|
| 129 |
+
|
| 130 |
+
<!--
|
| 131 |
+
### Recommendations
|
| 132 |
+
|
| 133 |
+
*What are recommendations with respect to the foreseeable issues? For example, filtering explicit content.*
|
| 134 |
+
-->
|
| 135 |
+
|
| 136 |
+
## Training Details
|
| 137 |
+
|
| 138 |
+
### Training Set Metrics
|
| 139 |
+
| Training set | Min | Median | Max |
|
| 140 |
+
|:-------------|:----|:-------|:----|
|
| 141 |
+
| Word count | 2 | 16.625 | 92 |
|
| 142 |
+
|
| 143 |
+
| Label | Training Sample Count |
|
| 144 |
+
|:-----------------------------------------------|:----------------------|
|
| 145 |
+
| Enrichment / reinterpretation | 36 |
|
| 146 |
+
| Lack of understanding / clear misunderstanding | 8 |
|
| 147 |
+
| Linguistic (in)felicity | 108 |
|
| 148 |
+
|
| 149 |
+
### Training Hyperparameters
|
| 150 |
+
- batch_size: (16, 16)
|
| 151 |
+
- num_epochs: (2, 2)
|
| 152 |
+
- max_steps: -1
|
| 153 |
+
- sampling_strategy: oversampling
|
| 154 |
+
- num_iterations: 20
|
| 155 |
+
- body_learning_rate: (2e-05, 2e-05)
|
| 156 |
+
- head_learning_rate: 2e-05
|
| 157 |
+
- loss: CosineSimilarityLoss
|
| 158 |
+
- distance_metric: cosine_distance
|
| 159 |
+
- margin: 0.25
|
| 160 |
+
- end_to_end: False
|
| 161 |
+
- use_amp: False
|
| 162 |
+
- warmup_proportion: 0.1
|
| 163 |
+
- l2_weight: 0.01
|
| 164 |
+
- seed: 3786
|
| 165 |
+
- eval_max_steps: -1
|
| 166 |
+
- load_best_model_at_end: False
|
| 167 |
+
|
| 168 |
+
### Training Results
|
| 169 |
+
| Epoch | Step | Training Loss | Validation Loss |
|
| 170 |
+
|:------:|:----:|:-------------:|:---------------:|
|
| 171 |
+
| 0.0026 | 1 | 0.2637 | - |
|
| 172 |
+
| 0.1316 | 50 | 0.2039 | - |
|
| 173 |
+
| 0.2632 | 100 | 0.0495 | - |
|
| 174 |
+
| 0.3947 | 150 | 0.0032 | - |
|
| 175 |
+
| 0.5263 | 200 | 0.0022 | - |
|
| 176 |
+
| 0.6579 | 250 | 0.0005 | - |
|
| 177 |
+
| 0.7895 | 300 | 0.0005 | - |
|
| 178 |
+
| 0.9211 | 350 | 0.002 | - |
|
| 179 |
+
| 1.0526 | 400 | 0.0003 | - |
|
| 180 |
+
| 1.1842 | 450 | 0.0012 | - |
|
| 181 |
+
| 1.3158 | 500 | 0.0007 | - |
|
| 182 |
+
| 1.4474 | 550 | 0.001 | - |
|
| 183 |
+
| 1.5789 | 600 | 0.0004 | - |
|
| 184 |
+
| 1.7105 | 650 | 0.0003 | - |
|
| 185 |
+
| 1.8421 | 700 | 0.0002 | - |
|
| 186 |
+
| 1.9737 | 750 | 0.0002 | - |
|
| 187 |
+
|
| 188 |
+
### Framework Versions
|
| 189 |
+
- Python: 3.11.9
|
| 190 |
+
- SetFit: 1.1.2
|
| 191 |
+
- Sentence Transformers: 4.1.0
|
| 192 |
+
- Transformers: 4.52.4
|
| 193 |
+
- PyTorch: 2.7.1
|
| 194 |
+
- Datasets: 3.6.0
|
| 195 |
+
- Tokenizers: 0.21.1
|
| 196 |
+
|
| 197 |
+
## Citation
|
| 198 |
+
|
| 199 |
+
### BibTeX
|
| 200 |
+
```bibtex
|
| 201 |
+
@article{https://doi.org/10.48550/arxiv.2209.11055,
|
| 202 |
+
doi = {10.48550/ARXIV.2209.11055},
|
| 203 |
+
url = {https://arxiv.org/abs/2209.11055},
|
| 204 |
+
author = {Tunstall, Lewis and Reimers, Nils and Jo, Unso Eun Seo and Bates, Luke and Korat, Daniel and Wasserblat, Moshe and Pereg, Oren},
|
| 205 |
+
keywords = {Computation and Language (cs.CL), FOS: Computer and information sciences, FOS: Computer and information sciences},
|
| 206 |
+
title = {Efficient Few-Shot Learning Without Prompts},
|
| 207 |
+
publisher = {arXiv},
|
| 208 |
+
year = {2022},
|
| 209 |
+
copyright = {Creative Commons Attribution 4.0 International}
|
| 210 |
+
}
|
| 211 |
+
```
|
| 212 |
+
|
| 213 |
+
<!--
|
| 214 |
+
## Glossary
|
| 215 |
+
|
| 216 |
+
*Clearly define terms in order to be accessible across audiences.*
|
| 217 |
+
-->
|
| 218 |
+
|
| 219 |
+
<!--
|
| 220 |
+
## Model Card Authors
|
| 221 |
+
|
| 222 |
+
*Lists the people who create the model card, providing recognition and accountability for the detailed work that goes into its construction.*
|
| 223 |
+
-->
|
| 224 |
+
|
| 225 |
+
<!--
|
| 226 |
+
## Model Card Contact
|
| 227 |
+
|
| 228 |
+
*Provides a way for people who have updates to the Model Card, suggestions, or questions, to contact the Model Card authors.*
|
| 229 |
+
-->
|
config.json
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"architectures": [
|
| 3 |
+
"MPNetModel"
|
| 4 |
+
],
|
| 5 |
+
"attention_probs_dropout_prob": 0.1,
|
| 6 |
+
"bos_token_id": 0,
|
| 7 |
+
"eos_token_id": 2,
|
| 8 |
+
"hidden_act": "gelu",
|
| 9 |
+
"hidden_dropout_prob": 0.1,
|
| 10 |
+
"hidden_size": 768,
|
| 11 |
+
"initializer_range": 0.02,
|
| 12 |
+
"intermediate_size": 3072,
|
| 13 |
+
"layer_norm_eps": 1e-05,
|
| 14 |
+
"max_position_embeddings": 514,
|
| 15 |
+
"model_type": "mpnet",
|
| 16 |
+
"num_attention_heads": 12,
|
| 17 |
+
"num_hidden_layers": 12,
|
| 18 |
+
"pad_token_id": 1,
|
| 19 |
+
"relative_attention_num_buckets": 32,
|
| 20 |
+
"torch_dtype": "float32",
|
| 21 |
+
"transformers_version": "4.52.4",
|
| 22 |
+
"vocab_size": 30527
|
| 23 |
+
}
|
config_sentence_transformers.json
ADDED
|
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"__version__": {
|
| 3 |
+
"sentence_transformers": "4.1.0",
|
| 4 |
+
"transformers": "4.52.4",
|
| 5 |
+
"pytorch": "2.7.1"
|
| 6 |
+
},
|
| 7 |
+
"prompts": {},
|
| 8 |
+
"default_prompt_name": null,
|
| 9 |
+
"similarity_fn_name": "cosine"
|
| 10 |
+
}
|
config_setfit.json
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"normalize_embeddings": false,
|
| 3 |
+
"labels": [
|
| 4 |
+
"Enrichment / reinterpretation",
|
| 5 |
+
"Lack of understanding / clear misunderstanding",
|
| 6 |
+
"Linguistic (in)felicity"
|
| 7 |
+
]
|
| 8 |
+
}
|
model.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:4e434945caf6376493fe52ba65083ac22a0357d77d74c343dc08965155598f3c
|
| 3 |
+
size 437967672
|
model_head.pkl
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:6275f644da652b1528ce79f4ce30114a7650049e97202bd80acc3a6873c746ef
|
| 3 |
+
size 19855
|
modules.json
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[
|
| 2 |
+
{
|
| 3 |
+
"idx": 0,
|
| 4 |
+
"name": "0",
|
| 5 |
+
"path": "",
|
| 6 |
+
"type": "sentence_transformers.models.Transformer"
|
| 7 |
+
},
|
| 8 |
+
{
|
| 9 |
+
"idx": 1,
|
| 10 |
+
"name": "1",
|
| 11 |
+
"path": "1_Pooling",
|
| 12 |
+
"type": "sentence_transformers.models.Pooling"
|
| 13 |
+
}
|
| 14 |
+
]
|
sentence_bert_config.json
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"max_seq_length": 512,
|
| 3 |
+
"do_lower_case": false
|
| 4 |
+
}
|
special_tokens_map.json
ADDED
|
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"bos_token": {
|
| 3 |
+
"content": "<s>",
|
| 4 |
+
"lstrip": false,
|
| 5 |
+
"normalized": false,
|
| 6 |
+
"rstrip": false,
|
| 7 |
+
"single_word": false
|
| 8 |
+
},
|
| 9 |
+
"cls_token": {
|
| 10 |
+
"content": "<s>",
|
| 11 |
+
"lstrip": false,
|
| 12 |
+
"normalized": false,
|
| 13 |
+
"rstrip": false,
|
| 14 |
+
"single_word": false
|
| 15 |
+
},
|
| 16 |
+
"eos_token": {
|
| 17 |
+
"content": "</s>",
|
| 18 |
+
"lstrip": false,
|
| 19 |
+
"normalized": false,
|
| 20 |
+
"rstrip": false,
|
| 21 |
+
"single_word": false
|
| 22 |
+
},
|
| 23 |
+
"mask_token": {
|
| 24 |
+
"content": "<mask>",
|
| 25 |
+
"lstrip": true,
|
| 26 |
+
"normalized": false,
|
| 27 |
+
"rstrip": false,
|
| 28 |
+
"single_word": false
|
| 29 |
+
},
|
| 30 |
+
"pad_token": {
|
| 31 |
+
"content": "<pad>",
|
| 32 |
+
"lstrip": false,
|
| 33 |
+
"normalized": false,
|
| 34 |
+
"rstrip": false,
|
| 35 |
+
"single_word": false
|
| 36 |
+
},
|
| 37 |
+
"sep_token": {
|
| 38 |
+
"content": "</s>",
|
| 39 |
+
"lstrip": false,
|
| 40 |
+
"normalized": false,
|
| 41 |
+
"rstrip": false,
|
| 42 |
+
"single_word": false
|
| 43 |
+
},
|
| 44 |
+
"unk_token": {
|
| 45 |
+
"content": "[UNK]",
|
| 46 |
+
"lstrip": false,
|
| 47 |
+
"normalized": false,
|
| 48 |
+
"rstrip": false,
|
| 49 |
+
"single_word": false
|
| 50 |
+
}
|
| 51 |
+
}
|
tokenizer.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
tokenizer_config.json
ADDED
|
@@ -0,0 +1,60 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"added_tokens_decoder": {
|
| 3 |
+
"0": {
|
| 4 |
+
"content": "<s>",
|
| 5 |
+
"lstrip": false,
|
| 6 |
+
"normalized": false,
|
| 7 |
+
"rstrip": false,
|
| 8 |
+
"single_word": false,
|
| 9 |
+
"special": true
|
| 10 |
+
},
|
| 11 |
+
"1": {
|
| 12 |
+
"content": "<pad>",
|
| 13 |
+
"lstrip": false,
|
| 14 |
+
"normalized": false,
|
| 15 |
+
"rstrip": false,
|
| 16 |
+
"single_word": false,
|
| 17 |
+
"special": true
|
| 18 |
+
},
|
| 19 |
+
"2": {
|
| 20 |
+
"content": "</s>",
|
| 21 |
+
"lstrip": false,
|
| 22 |
+
"normalized": false,
|
| 23 |
+
"rstrip": false,
|
| 24 |
+
"single_word": false,
|
| 25 |
+
"special": true
|
| 26 |
+
},
|
| 27 |
+
"104": {
|
| 28 |
+
"content": "[UNK]",
|
| 29 |
+
"lstrip": false,
|
| 30 |
+
"normalized": false,
|
| 31 |
+
"rstrip": false,
|
| 32 |
+
"single_word": false,
|
| 33 |
+
"special": true
|
| 34 |
+
},
|
| 35 |
+
"30526": {
|
| 36 |
+
"content": "<mask>",
|
| 37 |
+
"lstrip": true,
|
| 38 |
+
"normalized": false,
|
| 39 |
+
"rstrip": false,
|
| 40 |
+
"single_word": false,
|
| 41 |
+
"special": true
|
| 42 |
+
}
|
| 43 |
+
},
|
| 44 |
+
"bos_token": "<s>",
|
| 45 |
+
"clean_up_tokenization_spaces": false,
|
| 46 |
+
"cls_token": "<s>",
|
| 47 |
+
"do_basic_tokenize": true,
|
| 48 |
+
"do_lower_case": true,
|
| 49 |
+
"eos_token": "</s>",
|
| 50 |
+
"extra_special_tokens": {},
|
| 51 |
+
"mask_token": "<mask>",
|
| 52 |
+
"model_max_length": 512,
|
| 53 |
+
"never_split": null,
|
| 54 |
+
"pad_token": "<pad>",
|
| 55 |
+
"sep_token": "</s>",
|
| 56 |
+
"strip_accents": null,
|
| 57 |
+
"tokenize_chinese_chars": true,
|
| 58 |
+
"tokenizer_class": "MPNetTokenizer",
|
| 59 |
+
"unk_token": "[UNK]"
|
| 60 |
+
}
|
vocab.txt
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|