Ryenhails commited on
Commit
b532237
·
verified ·
1 Parent(s): cf8084c

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ banner.png filter=lfs diff=lfs merge=lfs -text
0_Transformer/config.json ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "activation": "gelu",
3
+ "architectures": [
4
+ "DistilBertModel"
5
+ ],
6
+ "attention_dropout": 0.1,
7
+ "dim": 768,
8
+ "dropout": 0.1,
9
+ "dtype": "float32",
10
+ "hidden_dim": 3072,
11
+ "initializer_range": 0.02,
12
+ "max_position_embeddings": 512,
13
+ "model_type": "distilbert",
14
+ "n_heads": 12,
15
+ "n_layers": 6,
16
+ "pad_token_id": 0,
17
+ "qa_dropout": 0.1,
18
+ "seq_classif_dropout": 0.2,
19
+ "sinusoidal_pos_embds": false,
20
+ "tie_weights_": true,
21
+ "transformers_version": "4.57.6",
22
+ "vocab_size": 30522
23
+ }
0_Transformer/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e13a257b8515a108d1bd687e409d8531577a73f7cad72c8342f48b90992922b6
3
+ size 265462608
0_Transformer/sentence_bert_config.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "max_seq_length": 512,
3
+ "do_lower_case": true
4
+ }
0_Transformer/special_tokens_map.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": "[CLS]",
3
+ "mask_token": "[MASK]",
4
+ "pad_token": "[PAD]",
5
+ "sep_token": "[SEP]",
6
+ "unk_token": "[UNK]"
7
+ }
0_Transformer/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
0_Transformer/tokenizer_config.json ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "[PAD]",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "100": {
12
+ "content": "[UNK]",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "101": {
20
+ "content": "[CLS]",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "102": {
28
+ "content": "[SEP]",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "103": {
36
+ "content": "[MASK]",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ }
43
+ },
44
+ "clean_up_tokenization_spaces": false,
45
+ "cls_token": "[CLS]",
46
+ "do_lower_case": true,
47
+ "extra_special_tokens": {},
48
+ "mask_token": "[MASK]",
49
+ "model_max_length": 512,
50
+ "pad_token": "[PAD]",
51
+ "sep_token": "[SEP]",
52
+ "strip_accents": null,
53
+ "tokenize_chinese_chars": true,
54
+ "tokenizer_class": "DistilBertTokenizer",
55
+ "unk_token": "[UNK]"
56
+ }
0_Transformer/vocab.txt ADDED
The diff for this file is too large to render. See raw diff
 
1_Pooling/config.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "word_embedding_dimension": 768,
3
+ "pooling_mode_cls_token": false,
4
+ "pooling_mode_mean_tokens": true,
5
+ "pooling_mode_max_tokens": false,
6
+ "pooling_mode_mean_sqrt_len_tokens": false,
7
+ "pooling_mode_weightedmean_tokens": false,
8
+ "pooling_mode_lasttoken": false
9
+ }
2_Dense/config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "in_features": 768,
3
+ "out_features": 768,
4
+ "bias": true,
5
+ "activation_function": "torch.nn.modules.activation.GELU"
6
+ }
2_Dense/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:96bce0986fa92c8845038b022e5928204a5dab7b730304154d38989a9563c716
3
+ size 2364309
3_Dense/config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "in_features": 768,
3
+ "out_features": 2048,
4
+ "bias": true,
5
+ "activation_function": "torch.nn.modules.linear.Identity"
6
+ }
3_Dense/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:93b4e4ddf2abd0b96bb573f55fb57559d28a056dc7a3658e7ddd1a6eca000dc4
3
+ size 6301589
README.md ADDED
@@ -0,0 +1,256 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: sentence-transformers
3
+ pipeline_tag: sentence-similarity
4
+ tags:
5
+ - sentence-transformers
6
+ - feature-extraction
7
+ - sentence-similarity
8
+ - visual-document-retrieval
9
+ - cross-modal-distillation
10
+ - knowledge-distillation
11
+ - document-retrieval
12
+ - multilingual
13
+ - nanovdr
14
+ base_model: distilbert/distilbert-base-uncased
15
+ language:
16
+ - en
17
+ - de
18
+ - fr
19
+ - es
20
+ - it
21
+ - pt
22
+ license: apache-2.0
23
+ datasets:
24
+ - openbmb/VisRAG-Ret-Train-Synthetic-data
25
+ - openbmb/VisRAG-Ret-Train-In-domain-data
26
+ - vidore/colpali_train_set
27
+ - llamaindex/vdr-multilingual-train
28
+ model-index:
29
+ - name: NanoVDR-S-Multi
30
+ results:
31
+ - task:
32
+ type: retrieval
33
+ dataset:
34
+ name: ViDoRe v1
35
+ type: vidore/vidore-benchmark-667173f98e70a1c0fa4d
36
+ metrics:
37
+ - name: NDCG@5
38
+ type: ndcg_at_5
39
+ value: 82.2
40
+ - task:
41
+ type: retrieval
42
+ dataset:
43
+ name: ViDoRe v2
44
+ type: vidore/vidore-benchmark-v2
45
+ metrics:
46
+ - name: NDCG@5
47
+ type: ndcg_at_5
48
+ value: 61.9
49
+ ---
50
+
51
+ <p align="center">
52
+ <img width="560" src="banner.png" alt="NanoVDR"/>
53
+ </p>
54
+
55
+ <p align="center">
56
+ <a href="https://arxiv.org/abs/2502.XXXXX">Paper</a> |
57
+ <a href="https://github.com/nanovdr/nanovdr">GitHub</a> |
58
+ <a href="https://huggingface.co/collections/nanovdr/nanovdr">All Models</a>
59
+ </p>
60
+
61
+ # NanoVDR-S-Multi
62
+
63
+ **The recommended NanoVDR model for production use.**
64
+
65
+ NanoVDR-S-Multi is a **69M-parameter multilingual text-only** query encoder for visual document retrieval. It encodes text queries into the same embedding space as a frozen 2B VLM teacher ([Qwen3-VL-Embedding-2B](https://huggingface.co/Qwen/Qwen3-VL-Embedding-2B)), so you can retrieve document page images using **only a DistilBERT forward pass** — no vision model at query time.
66
+
67
+ ### Highlights
68
+
69
+ - **95.1% teacher retention** — a 69M text-only model recovers 95% of a 2B VLM teacher across 22 ViDoRe datasets
70
+ - **Outperforms DSE-Qwen2 (2B)** on multilingual v2 (+6.2) and v3 (+4.1) with **32x fewer parameters**
71
+ - **Outperforms ColPali (~3B)** on all three benchmarks with **single-vector cosine** retrieval (no MaxSim)
72
+ - **51 ms CPU query latency** — 50x faster than DSE-Qwen2, 143x faster than ColPali
73
+ - **6 languages**: English, German, French, Spanish, Italian, Portuguese — all >92% teacher retention
74
+
75
+ ---
76
+
77
+ ## Results
78
+
79
+ | Model | Type | Params | ViDoRe v1 | ViDoRe v2 | ViDoRe v3 | Avg Retention |
80
+ |-------|------|--------|-----------|-----------|-----------|---------------|
81
+ | Tomoro-8B | VLM | 8.0B | 90.6 | 65.0 | 59.0 | — |
82
+ | Qwen3-VL-Emb (Teacher) | VLM | 2.0B | 84.3 | 65.3 | 50.0 | — |
83
+ | DSE-Qwen2 | VLM | 2.2B | 85.1 | 55.7 | 42.4 | — |
84
+ | ColPali | VLM | ~3B | 84.2 | 54.7 | 42.0 | — |
85
+ | **NanoVDR-S-Multi** | **Text-only** | **69M** | **82.2** | **61.9** | **46.5** | **95.1%** |
86
+
87
+ <sub>NDCG@5 (×100). v1 = 10 English datasets, v2 = 4 multilingual datasets, v3 = 8 multilingual datasets.</sub>
88
+
89
+ ### Per-Language Retention (v2 + v3, 19,537 queries)
90
+
91
+ | Language | #Queries | Teacher | NanoVDR-S-Multi | Retention |
92
+ |----------|----------|---------|-----------------|-----------|
93
+ | English | 6,237 | 64.0 | 60.3 | 94.3% |
94
+ | French | 2,694 | 51.0 | 47.8 | 93.6% |
95
+ | Portuguese | 2,419 | 48.7 | 46.1 | 94.6% |
96
+ | Spanish | 2,694 | 51.4 | 47.8 | 93.1% |
97
+ | Italian | 2,419 | 49.0 | 45.7 | 93.3% |
98
+ | German | 2,694 | 49.3 | 45.4 | 92.0% |
99
+
100
+ All 6 languages achieve **>92%** of the 2B teacher's performance.
101
+
102
+ ---
103
+
104
+ ## Quick Start
105
+
106
+ ```python
107
+ from sentence_transformers import SentenceTransformer
108
+
109
+ model = SentenceTransformer("nanovdr/NanoVDR-S-Multi")
110
+
111
+ queries = [
112
+ "What was the revenue growth in Q3 2024?", # English
113
+ "Quel est le chiffre d'affaires du trimestre?", # French
114
+ "Wie hoch war das Umsatzwachstum im dritten Quartal?", # German
115
+ "¿Cuál fue el crecimiento de ingresos en el Q3?", # Spanish
116
+ "Qual foi o crescimento da receita no terceiro trimestre?", # Portuguese
117
+ "Qual è stata la crescita dei ricavi nel terzo trimestre?", # Italian
118
+ ]
119
+ query_embeddings = model.encode(queries)
120
+ print(query_embeddings.shape) # (6, 2048)
121
+
122
+ # Cosine similarity against pre-indexed document embeddings
123
+ # scores = query_embeddings @ doc_embeddings.T
124
+ ```
125
+
126
+ ### Prerequisites: Document Indexing with Teacher Model
127
+
128
+ NanoVDR is a **query encoder only**. Documents must be indexed offline using the teacher VLM ([Qwen3-VL-Embedding-2B](https://huggingface.co/Qwen/Qwen3-VL-Embedding-2B)), which encodes page images into 2048-d embeddings. This is a one-time cost.
129
+
130
+ ```python
131
+ # pip install transformers qwen-vl-utils torch
132
+ from scripts.qwen3_vl_embedding import Qwen3VLEmbedder
133
+
134
+ teacher = Qwen3VLEmbedder(model_name_or_path="Qwen/Qwen3-VL-Embedding-2B")
135
+
136
+ # Encode document page images
137
+ documents = [
138
+ {"image": "page_001.png"},
139
+ {"image": "page_002.png"},
140
+ # ... all document pages in your corpus
141
+ ]
142
+ doc_embeddings = teacher.process(documents) # (N, 2048), L2-normalized
143
+ ```
144
+
145
+ > **Note:** The `Qwen3VLEmbedder` class and full usage guide (including vLLM/SGLang acceleration) are available at the [Qwen3-VL-Embedding-2B model page](https://huggingface.co/Qwen/Qwen3-VL-Embedding-2B). Document indexing requires a GPU; once indexed, retrieval uses only CPU.
146
+
147
+ ### Full Retrieval Pipeline
148
+
149
+ ```python
150
+ import numpy as np
151
+ from sentence_transformers import SentenceTransformer
152
+
153
+ # doc_embeddings: (N, 2048) numpy array from teacher indexing above
154
+
155
+ # Step 1: Encode text queries with NanoVDR (CPU, ~51ms per query)
156
+ student = SentenceTransformer("nanovdr/NanoVDR-S-Multi")
157
+ query_emb = student.encode("Quel est le chiffre d'affaires?") # shape: (2048,)
158
+
159
+ # Step 2: Retrieve via cosine similarity
160
+ scores = query_emb @ doc_embeddings.T
161
+ top_k_indices = np.argsort(scores)[-5:][::-1]
162
+ ```
163
+
164
+ ---
165
+
166
+ ## How It Works
167
+
168
+ NanoVDR uses **asymmetric cross-modal distillation** to decouple query and document encoding:
169
+
170
+ | | Document Encoding (offline) | Query Encoding (online) |
171
+ |-|----------------------------|------------------------|
172
+ | **Model** | Qwen3-VL-Embedding-2B (frozen) | NanoVDR-S-Multi (69M) |
173
+ | **Input** | Page images | Text queries (6 languages) |
174
+ | **Output** | 2048-d embedding | 2048-d embedding |
175
+ | **Hardware** | GPU (one-time indexing) | CPU (real-time serving) |
176
+
177
+ The student is trained to **align query embeddings** with the teacher's query embeddings via pointwise cosine loss — no document embeddings or hard negatives are needed during training. At inference, student query embeddings are directly compatible with teacher document embeddings.
178
+
179
+ ---
180
+
181
+ ## Training
182
+
183
+ | | Value |
184
+ |--|-------|
185
+ | Base model | `distilbert/distilbert-base-uncased` (66M) |
186
+ | Projector | 2-layer MLP: 768 → 768 → 2048 (2.4M params) |
187
+ | Total params | 69M |
188
+ | Objective | Pointwise cosine alignment with teacher query embeddings |
189
+ | Training data | 1.49M pairs — 711K original + 778K translated queries |
190
+ | Languages | EN (original) + DE, FR, ES, IT, PT (translated via [Helsinki-NLP Opus-MT](https://huggingface.co/Helsinki-NLP)) |
191
+ | Epochs | 10 |
192
+ | Batch size | 1,024 (effective) |
193
+ | Learning rate | 3e-4 (OneCycleLR, 3% warmup) |
194
+ | Hardware | 1× H200 GPU |
195
+ | Training time | ~10 GPU-hours |
196
+ | Embedding caching | ~1 GPU-hour (teacher encodes all queries in text mode) |
197
+
198
+ ### Multilingual Augmentation Pipeline
199
+
200
+ 1. Extract 489K English queries from the 711K training set
201
+ 2. Translate each to 5 languages using Helsinki-NLP Opus-MT → 778K translated queries
202
+ 3. Re-encode translated queries with the frozen teacher in text mode (15 min on H200)
203
+ 4. Combine: 711K original + 778K translated = **1.49M training pairs**
204
+ 5. Train with halved epochs (10 vs 20) and slightly higher lr (3e-4 vs 2e-4) to match total steps
205
+
206
+ ---
207
+
208
+ ## Efficiency
209
+
210
+ | | NanoVDR-S-Multi | DSE-Qwen2 | ColPali | Tomoro-8B |
211
+ |--|-----------------|-----------|---------|-----------|
212
+ | Parameters | **69M** | 2,209M | ~3B | 8,000M |
213
+ | Query latency (CPU, B=1) | **51 ms** | 2,539 ms | 7,300 ms | GPU only |
214
+ | Checkpoint size | **274 MB** | 8.8 GB | 11.9 GB | 35.1 GB |
215
+ | Index type | Single-vector | Single-vector | Multi-vector | Multi-vector |
216
+ | Scoring | Cosine | Cosine | MaxSim | MaxSim |
217
+ | Index storage (500K pages) | **4.1 GB** | 3.1 GB | 128 GB | 128 GB |
218
+
219
+ ---
220
+
221
+ ## Model Variants
222
+
223
+ NanoVDR-S-Multi is the **recommended model**. The other variants are provided for research and ablation purposes.
224
+
225
+ | Model | Backbone | Params | v1 | v2 | v3 | Retention | Latency | Recommended |
226
+ |-------|----------|--------|----|----|----|-----------|---------| ------------|
227
+ | **[NanoVDR-S-Multi](https://huggingface.co/nanovdr/NanoVDR-S-Multi)** | **DistilBERT** | **69M** | **82.2** | **61.9** | **46.5** | **95.1%** | **51 ms** | **Yes** |
228
+ | [NanoVDR-S](https://huggingface.co/nanovdr/NanoVDR-S) | DistilBERT | 69M | 82.2 | 60.5 | 43.5 | 92.4% | 51 ms | EN-only |
229
+ | [NanoVDR-M](https://huggingface.co/nanovdr/NanoVDR-M) | BERT-base | 112M | 82.1 | 62.2 | 44.7 | 94.0% | 101 ms | Ablation |
230
+ | [NanoVDR-L](https://huggingface.co/nanovdr/NanoVDR-L) | ModernBERT | 151M | 82.4 | 61.5 | 44.2 | 93.4% | 109 ms | Ablation |
231
+
232
+ ## Key Properties
233
+
234
+ | Property | Value |
235
+ |----------|-------|
236
+ | Output dimension | 2048 (aligned with Qwen3-VL-Embedding-2B) |
237
+ | Max sequence length | 512 tokens |
238
+ | Supported languages | EN, DE, FR, ES, IT, PT |
239
+ | Similarity function | Cosine similarity |
240
+ | Pooling | Mean pooling |
241
+ | Normalization | L2-normalized |
242
+
243
+ ## Citation
244
+
245
+ ```bibtex
246
+ @article{nanovdr2026,
247
+ title={NanoVDR: Distilling a 2B Vision-Language Retriever into a 70M Text-Only Encoder for Visual Document Retrieval},
248
+ author={Liu, Zhuchenyang and Zhang, Yao and Xiao, Yu},
249
+ journal={arXiv preprint arXiv:2502.XXXXX},
250
+ year={2026}
251
+ }
252
+ ```
253
+
254
+ ## License
255
+
256
+ Apache 2.0
banner.png ADDED

Git LFS Details

  • SHA256: caec09fa91e56deeb2d53e7ca2613573fc67c4e5af50817437f67a606d8fe200
  • Pointer size: 132 Bytes
  • Size of remote file: 7.47 MB
config.json ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "activation": "gelu",
3
+ "architectures": [
4
+ "DistilBertModel"
5
+ ],
6
+ "attention_dropout": 0.1,
7
+ "dim": 768,
8
+ "dropout": 0.1,
9
+ "dtype": "float32",
10
+ "hidden_dim": 3072,
11
+ "initializer_range": 0.02,
12
+ "max_position_embeddings": 512,
13
+ "model_type": "distilbert",
14
+ "n_heads": 12,
15
+ "n_layers": 6,
16
+ "pad_token_id": 0,
17
+ "qa_dropout": 0.1,
18
+ "seq_classif_dropout": 0.2,
19
+ "sinusoidal_pos_embds": false,
20
+ "tie_weights_": true,
21
+ "transformers_version": "4.57.6",
22
+ "vocab_size": 30522
23
+ }
config_sentence_transformers.json ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {
2
+ "prompts": {},
3
+ "default_prompt_name": null,
4
+ "similarity_fn_name": "cosine"
5
+ }
modules.json ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "idx": 0,
4
+ "name": "0_Transformer",
5
+ "path": "0_Transformer",
6
+ "type": "sentence_transformers.models.Transformer"
7
+ },
8
+ {
9
+ "idx": 1,
10
+ "name": "1_Pooling",
11
+ "path": "1_Pooling",
12
+ "type": "sentence_transformers.models.Pooling"
13
+ },
14
+ {
15
+ "idx": 2,
16
+ "name": "2_Dense",
17
+ "path": "2_Dense",
18
+ "type": "sentence_transformers.models.Dense"
19
+ },
20
+ {
21
+ "idx": 3,
22
+ "name": "3_Dense",
23
+ "path": "3_Dense",
24
+ "type": "sentence_transformers.models.Dense"
25
+ },
26
+ {
27
+ "idx": 4,
28
+ "name": "4_Normalize",
29
+ "path": "4_Normalize",
30
+ "type": "sentence_transformers.models.Normalize"
31
+ }
32
+ ]
special_tokens_map.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": "[CLS]",
3
+ "mask_token": "[MASK]",
4
+ "pad_token": "[PAD]",
5
+ "sep_token": "[SEP]",
6
+ "unk_token": "[UNK]"
7
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "[PAD]",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "100": {
12
+ "content": "[UNK]",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "101": {
20
+ "content": "[CLS]",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "102": {
28
+ "content": "[SEP]",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "103": {
36
+ "content": "[MASK]",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ }
43
+ },
44
+ "clean_up_tokenization_spaces": false,
45
+ "cls_token": "[CLS]",
46
+ "do_lower_case": true,
47
+ "extra_special_tokens": {},
48
+ "mask_token": "[MASK]",
49
+ "model_max_length": 512,
50
+ "pad_token": "[PAD]",
51
+ "sep_token": "[SEP]",
52
+ "strip_accents": null,
53
+ "tokenize_chinese_chars": true,
54
+ "tokenizer_class": "DistilBertTokenizer",
55
+ "unk_token": "[UNK]"
56
+ }
vocab.txt ADDED
The diff for this file is too large to render. See raw diff