Training done
Browse files- added_tokens.json +29 -0
- preprocessor_config.json +26 -0
- special_tokens_map.json +8 -0
- tokenizer.json +0 -0
- tokenizer_config.json +18 -0
- vocab.txt +0 -0
added_tokens.json
ADDED
|
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"</s>": 32000,
|
| 3 |
+
"</s_address_bn>": 32025,
|
| 4 |
+
"</s_birth_place>": 32023,
|
| 5 |
+
"</s_blood_group>": 32021,
|
| 6 |
+
"</s_date_of_birth>": 32019,
|
| 7 |
+
"</s_fathers_name_bn>": 32017,
|
| 8 |
+
"</s_id_no>": 32015,
|
| 9 |
+
"</s_mothers_name_bn>": 32013,
|
| 10 |
+
"</s_mrz_code>": 32011,
|
| 11 |
+
"</s_name_bn>": 32009,
|
| 12 |
+
"</s_name_en>": 32007,
|
| 13 |
+
"</s_post_code>": 32005,
|
| 14 |
+
"<s_address_bn>": 32024,
|
| 15 |
+
"<s_birth_place>": 32022,
|
| 16 |
+
"<s_blood_group>": 32020,
|
| 17 |
+
"<s_cord-v2>": 32026,
|
| 18 |
+
"<s_date_of_birth>": 32018,
|
| 19 |
+
"<s_fathers_name_bn>": 32016,
|
| 20 |
+
"<s_id_no>": 32014,
|
| 21 |
+
"<s_mothers_name_bn>": 32012,
|
| 22 |
+
"<s_mrz_code>": 32010,
|
| 23 |
+
"<s_name_bn>": 32008,
|
| 24 |
+
"<s_name_en>": 32006,
|
| 25 |
+
"<s_post_code>": 32004,
|
| 26 |
+
"কুষ্টিয়া": 32001,
|
| 27 |
+
"নারায়ণগঞ্জ": 32003,
|
| 28 |
+
"ময়মনসিংহ": 32002
|
| 29 |
+
}
|
preprocessor_config.json
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"do_align_long_axis": false,
|
| 3 |
+
"do_normalize": true,
|
| 4 |
+
"do_pad": true,
|
| 5 |
+
"do_rescale": true,
|
| 6 |
+
"do_resize": true,
|
| 7 |
+
"do_thumbnail": true,
|
| 8 |
+
"image_mean": [
|
| 9 |
+
0.5,
|
| 10 |
+
0.5,
|
| 11 |
+
0.5
|
| 12 |
+
],
|
| 13 |
+
"image_processor_type": "DonutImageProcessor",
|
| 14 |
+
"image_std": [
|
| 15 |
+
0.5,
|
| 16 |
+
0.5,
|
| 17 |
+
0.5
|
| 18 |
+
],
|
| 19 |
+
"processor_class": "DonutProcessor",
|
| 20 |
+
"resample": 2,
|
| 21 |
+
"rescale_factor": 0.00392156862745098,
|
| 22 |
+
"size": [
|
| 23 |
+
640,
|
| 24 |
+
320
|
| 25 |
+
]
|
| 26 |
+
}
|
special_tokens_map.json
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"cls_token": "[CLS]",
|
| 3 |
+
"eos_token": "</s>",
|
| 4 |
+
"mask_token": "[MASK]",
|
| 5 |
+
"pad_token": "[PAD]",
|
| 6 |
+
"sep_token": "[SEP]",
|
| 7 |
+
"unk_token": "[UNK]"
|
| 8 |
+
}
|
tokenizer.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
tokenizer_config.json
ADDED
|
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"cls_token": "[CLS]",
|
| 3 |
+
"do_basic_tokenize": true,
|
| 4 |
+
"do_lower_case": false,
|
| 5 |
+
"full_tokenizer_file": null,
|
| 6 |
+
"mask_token": "[MASK]",
|
| 7 |
+
"model_max_length": 1000000000000000019884624838656,
|
| 8 |
+
"name_or_path": "csebuetnlp/banglabert_large",
|
| 9 |
+
"never_split": null,
|
| 10 |
+
"pad_token": "[PAD]",
|
| 11 |
+
"processor_class": "DonutProcessor",
|
| 12 |
+
"sep_token": "[SEP]",
|
| 13 |
+
"special_tokens_map_file": null,
|
| 14 |
+
"strip_accents": null,
|
| 15 |
+
"tokenize_chinese_chars": false,
|
| 16 |
+
"tokenizer_class": "ElectraTokenizer",
|
| 17 |
+
"unk_token": "[UNK]"
|
| 18 |
+
}
|
vocab.txt
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|