Miruzen commited on
Commit
3cb23bd
·
verified ·
1 Parent(s): 2ed769e

Upload folder using huggingface_hub

Browse files
Readme.md ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # LongFormer Classification Model
2
+
3
+ ## Model Overview
4
+ - **Model Type**: LongFormer
5
+ - **Task**: Text Classification
6
+ - **Framework**: Hugging Face Transformers
7
+ - **Selected Checkpoint**: checkpoint-714
8
+
9
+ ## Model Details
10
+ - **Base Model**: allenai/longformer-base-4096
11
+ - **Max Sequence Length**: 4096 tokens
12
+ - **Model Size**: ~149M parameters
13
+ - **Training Data**: LF_Labelled.csv
14
+
15
+ ## Training Information
16
+ - **Training Date**: October 17-18, 2023
17
+ - **Hardware Used**: GPU (CUDA)
18
+ - **Training Steps**: 714
19
+ - **Optimizer**: AdamW
20
+ - **Learning Rate**: 2e-5
21
+
22
+ ## Model Files
23
+ - `model.safetensors`: Model weights
24
+ - `config.json`: Model architecture configuration
25
+ - `tokenizer.json`: Tokenizer configuration
26
+ - `vocab.json`: Vocabulary file
27
+ - `merges.txt`: BPE merges
28
+ - `optimizer.pt`: Optimizer state
29
+ - `trainer_state.json`: Training state and metrics
30
+
31
+ ## Usage
32
+ ```python
33
+ from transformers import AutoModelForSequenceClassification, AutoTokenizer
34
+
35
+ model_path = "best_model/"
36
+ tokenizer = AutoTokenizer.from_pretrained(model_path)
37
+ model = AutoModelForSequenceClassification.from_pretrained(model_path)
38
+ ```
39
+
40
+ ## Citation
41
+ ```bibtex
42
+ @misc{longformer_classification_2023,
43
+ author = {[Miruzen]},
44
+ title = {LongFormer Text Classification Model},
45
+ year = {2023},
46
+ publisher = {GitHub},
47
+ journal = {GitHub repository},
48
+ }
49
+ ```
50
+
51
+ Note: Replace the placeholder information in brackets with your specific details.
checkpoint-714/config.json ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "LongformerForSequenceClassification"
4
+ ],
5
+ "attention_mode": "longformer",
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "attention_window": [
8
+ 512,
9
+ 512,
10
+ 512,
11
+ 512,
12
+ 512,
13
+ 512,
14
+ 512,
15
+ 512,
16
+ 512,
17
+ 512,
18
+ 512,
19
+ 512
20
+ ],
21
+ "bos_token_id": 0,
22
+ "dtype": "float32",
23
+ "eos_token_id": 2,
24
+ "gradient_checkpointing": false,
25
+ "hidden_act": "gelu",
26
+ "hidden_dropout_prob": 0.1,
27
+ "hidden_size": 768,
28
+ "id2label": {
29
+ "0": "negative",
30
+ "1": "neutral",
31
+ "2": "positive"
32
+ },
33
+ "ignore_attention_mask": false,
34
+ "initializer_range": 0.02,
35
+ "intermediate_size": 3072,
36
+ "label2id": {
37
+ "negative": 0,
38
+ "neutral": 1,
39
+ "positive": 2
40
+ },
41
+ "layer_norm_eps": 1e-05,
42
+ "max_position_embeddings": 4098,
43
+ "model_type": "longformer",
44
+ "num_attention_heads": 12,
45
+ "num_hidden_layers": 12,
46
+ "onnx_export": false,
47
+ "pad_token_id": 1,
48
+ "problem_type": "single_label_classification",
49
+ "sep_token_id": 2,
50
+ "transformers_version": "4.57.1",
51
+ "type_vocab_size": 1,
52
+ "vocab_size": 50265
53
+ }
checkpoint-714/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-714/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8ee03f55926fe9d1ea2e1fbf6953d2a25a6a59146ff2dbc435cb3a0630f7a332
3
+ size 14645
checkpoint-714/scaler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0a99e1c04333b112c3dead49d9a1e5cb9b108be4d2ec0b265b66336f5d39f01d
3
+ size 1383
checkpoint-714/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:767beb689495c50074030315a3e85a069553f68b8d6fc18ada19ae0d4ff25011
3
+ size 1465
checkpoint-714/special_tokens_map.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<s>",
3
+ "cls_token": "<s>",
4
+ "eos_token": "</s>",
5
+ "mask_token": {
6
+ "content": "<mask>",
7
+ "lstrip": true,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false
11
+ },
12
+ "pad_token": "<pad>",
13
+ "sep_token": "</s>",
14
+ "unk_token": "<unk>"
15
+ }
checkpoint-714/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-714/tokenizer_config.json ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "0": {
5
+ "content": "<s>",
6
+ "lstrip": false,
7
+ "normalized": true,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ },
12
+ "1": {
13
+ "content": "<pad>",
14
+ "lstrip": false,
15
+ "normalized": true,
16
+ "rstrip": false,
17
+ "single_word": false,
18
+ "special": true
19
+ },
20
+ "2": {
21
+ "content": "</s>",
22
+ "lstrip": false,
23
+ "normalized": true,
24
+ "rstrip": false,
25
+ "single_word": false,
26
+ "special": true
27
+ },
28
+ "3": {
29
+ "content": "<unk>",
30
+ "lstrip": false,
31
+ "normalized": true,
32
+ "rstrip": false,
33
+ "single_word": false,
34
+ "special": true
35
+ },
36
+ "50264": {
37
+ "content": "<mask>",
38
+ "lstrip": true,
39
+ "normalized": false,
40
+ "rstrip": false,
41
+ "single_word": false,
42
+ "special": true
43
+ }
44
+ },
45
+ "bos_token": "<s>",
46
+ "clean_up_tokenization_spaces": false,
47
+ "cls_token": "<s>",
48
+ "eos_token": "</s>",
49
+ "errors": "replace",
50
+ "extra_special_tokens": {},
51
+ "mask_token": "<mask>",
52
+ "model_max_length": 1000000000000000019884624838656,
53
+ "pad_token": "<pad>",
54
+ "sep_token": "</s>",
55
+ "tokenizer_class": "LongformerTokenizer",
56
+ "trim_offsets": true,
57
+ "unk_token": "<unk>"
58
+ }
checkpoint-714/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8497a935ae5b53de8200aba72d0ee850572929ea833a1b47df109a3c6e0dd8bd
3
+ size 5905
checkpoint-714/vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
config.json ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "LongformerForSequenceClassification"
4
+ ],
5
+ "attention_mode": "longformer",
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "attention_window": [
8
+ 512,
9
+ 512,
10
+ 512,
11
+ 512,
12
+ 512,
13
+ 512,
14
+ 512,
15
+ 512,
16
+ 512,
17
+ 512,
18
+ 512,
19
+ 512
20
+ ],
21
+ "bos_token_id": 0,
22
+ "dtype": "float32",
23
+ "eos_token_id": 2,
24
+ "gradient_checkpointing": false,
25
+ "hidden_act": "gelu",
26
+ "hidden_dropout_prob": 0.1,
27
+ "hidden_size": 768,
28
+ "id2label": {
29
+ "0": "negative",
30
+ "1": "neutral",
31
+ "2": "positive"
32
+ },
33
+ "ignore_attention_mask": false,
34
+ "initializer_range": 0.02,
35
+ "intermediate_size": 3072,
36
+ "label2id": {
37
+ "negative": 0,
38
+ "neutral": 1,
39
+ "positive": 2
40
+ },
41
+ "layer_norm_eps": 1e-05,
42
+ "max_position_embeddings": 4098,
43
+ "model_type": "longformer",
44
+ "num_attention_heads": 12,
45
+ "num_hidden_layers": 12,
46
+ "onnx_export": false,
47
+ "pad_token_id": 1,
48
+ "problem_type": "single_label_classification",
49
+ "sep_token_id": 2,
50
+ "transformers_version": "4.57.1",
51
+ "type_vocab_size": 1,
52
+ "vocab_size": 50265
53
+ }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5f4855974f95d343dbd03f5595e14bece833b7061fb94bc1372febc4fab72b17
3
+ size 594681260
optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:68854ebfbf33e117faf74236126816804dd9932f3111132ea3d6f401322d9dde
3
+ size 1189526731
runs/Oct17_19-10-37_6bcb8a5f9bf6/events.out.tfevents.1760728242.6bcb8a5f9bf6.23580.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:41ae85016f94e72b28bf74ca11e1b7937c7769012dc715635b8b41289a5c27d0
3
+ size 5416
runs/Oct17_19-38-41_6bcb8a5f9bf6/events.out.tfevents.1760729924.6bcb8a5f9bf6.30330.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e21aa3bf22de4fc2901bd5fac3d68cf86e22d8e94b3c570f0719e95ba6d7f2d7
3
+ size 5415
runs/Oct17_19-53-25_6bcb8a5f9bf6/events.out.tfevents.1760730809.6bcb8a5f9bf6.34317.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ba25b369f1f5922e35ff5662ab72f4a06f4749d520ad55a6ea203056e0106dde
3
+ size 5415
runs/Oct17_20-20-10_6bcb8a5f9bf6/events.out.tfevents.1760732413.6bcb8a5f9bf6.37927.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7ea5ea49d49f0f1f11a82e94f38a35df24684350840a3933423421a2ba6dee2c
3
+ size 5415
runs/Oct18_01-04-56_9ce0259bbb4f/events.out.tfevents.1760749501.9ce0259bbb4f.498.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:111140f7ddcd2ca64d59d588358162fa4daada44e227df6b4ce538595ea4f6fe
3
+ size 5415
runs/Oct18_10-56-57_191877b48d26/events.out.tfevents.1760785021.191877b48d26.1449.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:845413291b00316a64021683e2f5e6c104f7f731c9986a81de0ed05f179ecfec
3
+ size 5415
runs/Oct18_11-17-17_191877b48d26/events.out.tfevents.1760786240.191877b48d26.5233.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b78c2cdf76e41002ac6743f38a5cbe8fde3331ec9a86f599efcd866c75e5f53a
3
+ size 5415
runs/Oct18_11-38-21_191877b48d26/events.out.tfevents.1760787504.191877b48d26.10887.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:34a97e190e00ed0c8b0a0d3443dc8549aa0f3054a99410779f53a355bfe5953f
3
+ size 5415
special_tokens_map.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<s>",
3
+ "cls_token": "<s>",
4
+ "eos_token": "</s>",
5
+ "mask_token": {
6
+ "content": "<mask>",
7
+ "lstrip": true,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false
11
+ },
12
+ "pad_token": "<pad>",
13
+ "sep_token": "</s>",
14
+ "unk_token": "<unk>"
15
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "0": {
5
+ "content": "<s>",
6
+ "lstrip": false,
7
+ "normalized": true,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ },
12
+ "1": {
13
+ "content": "<pad>",
14
+ "lstrip": false,
15
+ "normalized": true,
16
+ "rstrip": false,
17
+ "single_word": false,
18
+ "special": true
19
+ },
20
+ "2": {
21
+ "content": "</s>",
22
+ "lstrip": false,
23
+ "normalized": true,
24
+ "rstrip": false,
25
+ "single_word": false,
26
+ "special": true
27
+ },
28
+ "3": {
29
+ "content": "<unk>",
30
+ "lstrip": false,
31
+ "normalized": true,
32
+ "rstrip": false,
33
+ "single_word": false,
34
+ "special": true
35
+ },
36
+ "50264": {
37
+ "content": "<mask>",
38
+ "lstrip": true,
39
+ "normalized": false,
40
+ "rstrip": false,
41
+ "single_word": false,
42
+ "special": true
43
+ }
44
+ },
45
+ "bos_token": "<s>",
46
+ "clean_up_tokenization_spaces": false,
47
+ "cls_token": "<s>",
48
+ "eos_token": "</s>",
49
+ "errors": "replace",
50
+ "extra_special_tokens": {},
51
+ "mask_token": "<mask>",
52
+ "model_max_length": 1000000000000000019884624838656,
53
+ "pad_token": "<pad>",
54
+ "sep_token": "</s>",
55
+ "tokenizer_class": "LongformerTokenizer",
56
+ "trim_offsets": true,
57
+ "unk_token": "<unk>"
58
+ }
trainer_state.json ADDED
@@ -0,0 +1,177 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_global_step": 714,
3
+ "best_metric": 0.7832178763628642,
4
+ "best_model_checkpoint": "/content/drive/MyDrive/Skripsi/output/LongFormer/best_model/checkpoint-714",
5
+ "epoch": 3.0,
6
+ "eval_steps": 500,
7
+ "global_step": 714,
8
+ "is_hyper_param_search": false,
9
+ "is_local_process_zero": true,
10
+ "is_world_process_zero": true,
11
+ "log_history": [
12
+ {
13
+ "epoch": 0.21019442984760903,
14
+ "grad_norm": 4.426321983337402,
15
+ "learning_rate": 9.65686274509804e-06,
16
+ "loss": 0.8342,
17
+ "step": 50
18
+ },
19
+ {
20
+ "epoch": 0.42038885969521805,
21
+ "grad_norm": 8.081218719482422,
22
+ "learning_rate": 9.306722689075631e-06,
23
+ "loss": 0.6552,
24
+ "step": 100
25
+ },
26
+ {
27
+ "epoch": 0.6305832895428272,
28
+ "grad_norm": 13.813941955566406,
29
+ "learning_rate": 8.956582633053222e-06,
30
+ "loss": 0.5327,
31
+ "step": 150
32
+ },
33
+ {
34
+ "epoch": 0.8407777193904361,
35
+ "grad_norm": 15.198319435119629,
36
+ "learning_rate": 8.606442577030813e-06,
37
+ "loss": 0.4968,
38
+ "step": 200
39
+ },
40
+ {
41
+ "epoch": 1.0,
42
+ "eval_accuracy": 0.8308823529411765,
43
+ "eval_f1": 0.7542367470980773,
44
+ "eval_loss": 0.4402936100959778,
45
+ "eval_precision": 0.7549875294509866,
46
+ "eval_recall": 0.7534970480092084,
47
+ "eval_runtime": 267.13,
48
+ "eval_samples_per_second": 9.164,
49
+ "eval_steps_per_second": 3.055,
50
+ "step": 238
51
+ },
52
+ {
53
+ "epoch": 1.0504466631634262,
54
+ "grad_norm": 9.528948783874512,
55
+ "learning_rate": 8.256302521008404e-06,
56
+ "loss": 0.4899,
57
+ "step": 250
58
+ },
59
+ {
60
+ "epoch": 1.2606410930110352,
61
+ "grad_norm": 13.074601173400879,
62
+ "learning_rate": 7.906162464985995e-06,
63
+ "loss": 0.4595,
64
+ "step": 300
65
+ },
66
+ {
67
+ "epoch": 1.4708355228586443,
68
+ "grad_norm": 9.981877326965332,
69
+ "learning_rate": 7.556022408963586e-06,
70
+ "loss": 0.4191,
71
+ "step": 350
72
+ },
73
+ {
74
+ "epoch": 1.6810299527062533,
75
+ "grad_norm": 9.738601684570312,
76
+ "learning_rate": 7.205882352941177e-06,
77
+ "loss": 0.3928,
78
+ "step": 400
79
+ },
80
+ {
81
+ "epoch": 1.8912243825538622,
82
+ "grad_norm": 10.563243865966797,
83
+ "learning_rate": 6.855742296918768e-06,
84
+ "loss": 0.3938,
85
+ "step": 450
86
+ },
87
+ {
88
+ "epoch": 2.0,
89
+ "eval_accuracy": 0.8378267973856209,
90
+ "eval_f1": 0.7525604819272612,
91
+ "eval_loss": 0.4178110957145691,
92
+ "eval_precision": 0.7918661592956496,
93
+ "eval_recall": 0.7238836613451252,
94
+ "eval_runtime": 266.9708,
95
+ "eval_samples_per_second": 9.17,
96
+ "eval_steps_per_second": 3.057,
97
+ "step": 476
98
+ },
99
+ {
100
+ "epoch": 2.1008933263268523,
101
+ "grad_norm": 12.036543846130371,
102
+ "learning_rate": 6.50560224089636e-06,
103
+ "loss": 0.3576,
104
+ "step": 500
105
+ },
106
+ {
107
+ "epoch": 2.3110877561744614,
108
+ "grad_norm": 14.761561393737793,
109
+ "learning_rate": 6.155462184873951e-06,
110
+ "loss": 0.3408,
111
+ "step": 550
112
+ },
113
+ {
114
+ "epoch": 2.5212821860220704,
115
+ "grad_norm": 13.06762409210205,
116
+ "learning_rate": 5.805322128851542e-06,
117
+ "loss": 0.3219,
118
+ "step": 600
119
+ },
120
+ {
121
+ "epoch": 2.7314766158696795,
122
+ "grad_norm": 6.505790710449219,
123
+ "learning_rate": 5.455182072829132e-06,
124
+ "loss": 0.3129,
125
+ "step": 650
126
+ },
127
+ {
128
+ "epoch": 2.9416710457172885,
129
+ "grad_norm": 14.373762130737305,
130
+ "learning_rate": 5.105042016806723e-06,
131
+ "loss": 0.3312,
132
+ "step": 700
133
+ },
134
+ {
135
+ "epoch": 3.0,
136
+ "eval_accuracy": 0.8443627450980392,
137
+ "eval_f1": 0.7832178763628642,
138
+ "eval_loss": 0.407488614320755,
139
+ "eval_precision": 0.7723397049441368,
140
+ "eval_recall": 0.7975281041748842,
141
+ "eval_runtime": 266.6187,
142
+ "eval_samples_per_second": 9.182,
143
+ "eval_steps_per_second": 3.061,
144
+ "step": 714
145
+ }
146
+ ],
147
+ "logging_steps": 50,
148
+ "max_steps": 1428,
149
+ "num_input_tokens_seen": 0,
150
+ "num_train_epochs": 6,
151
+ "save_steps": 500,
152
+ "stateful_callbacks": {
153
+ "EarlyStoppingCallback": {
154
+ "args": {
155
+ "early_stopping_patience": 4,
156
+ "early_stopping_threshold": 0.0
157
+ },
158
+ "attributes": {
159
+ "early_stopping_patience_counter": 0
160
+ }
161
+ },
162
+ "TrainerControl": {
163
+ "args": {
164
+ "should_epoch_stop": false,
165
+ "should_evaluate": false,
166
+ "should_log": false,
167
+ "should_save": true,
168
+ "should_training_stop": false
169
+ },
170
+ "attributes": {}
171
+ }
172
+ },
173
+ "total_flos": 9219670817855454.0,
174
+ "train_batch_size": 3,
175
+ "trial_name": null,
176
+ "trial_params": null
177
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8497a935ae5b53de8200aba72d0ee850572929ea833a1b47df109a3c6e0dd8bd
3
+ size 5905
vocab.json ADDED
The diff for this file is too large to render. See raw diff