batoulnn commited on
Commit
7a1519a
·
verified ·
1 Parent(s): 2b6028b

Upload folder using huggingface_hub

Browse files
checkpoint-6177/config.json ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "BertForSequenceClassification"
4
+ ],
5
+ "attention_probs_dropout_prob": 0.1,
6
+ "classifier_dropout": null,
7
+ "gradient_checkpointing": false,
8
+ "hidden_act": "gelu",
9
+ "hidden_dropout_prob": 0.1,
10
+ "hidden_size": 768,
11
+ "id2label": {
12
+ "0": "LABEL_0",
13
+ "1": "LABEL_1",
14
+ "2": "LABEL_2",
15
+ "3": "LABEL_3",
16
+ "4": "LABEL_4",
17
+ "5": "LABEL_5",
18
+ "6": "LABEL_6",
19
+ "7": "LABEL_7",
20
+ "8": "LABEL_8",
21
+ "9": "LABEL_9",
22
+ "10": "LABEL_10",
23
+ "11": "LABEL_11",
24
+ "12": "LABEL_12",
25
+ "13": "LABEL_13",
26
+ "14": "LABEL_14",
27
+ "15": "LABEL_15",
28
+ "16": "LABEL_16",
29
+ "17": "LABEL_17",
30
+ "18": "LABEL_18",
31
+ "19": "LABEL_19",
32
+ "20": "LABEL_20"
33
+ },
34
+ "initializer_range": 0.02,
35
+ "intermediate_size": 3072,
36
+ "label2id": {
37
+ "LABEL_0": 0,
38
+ "LABEL_1": 1,
39
+ "LABEL_10": 10,
40
+ "LABEL_11": 11,
41
+ "LABEL_12": 12,
42
+ "LABEL_13": 13,
43
+ "LABEL_14": 14,
44
+ "LABEL_15": 15,
45
+ "LABEL_16": 16,
46
+ "LABEL_17": 17,
47
+ "LABEL_18": 18,
48
+ "LABEL_19": 19,
49
+ "LABEL_2": 2,
50
+ "LABEL_20": 20,
51
+ "LABEL_3": 3,
52
+ "LABEL_4": 4,
53
+ "LABEL_5": 5,
54
+ "LABEL_6": 6,
55
+ "LABEL_7": 7,
56
+ "LABEL_8": 8,
57
+ "LABEL_9": 9
58
+ },
59
+ "layer_norm_eps": 1e-12,
60
+ "max_position_embeddings": 512,
61
+ "model_type": "bert",
62
+ "num_attention_heads": 12,
63
+ "num_hidden_layers": 12,
64
+ "pad_token_id": 0,
65
+ "position_embedding_type": "absolute",
66
+ "problem_type": "single_label_classification",
67
+ "torch_dtype": "float32",
68
+ "transformers_version": "4.52.4",
69
+ "type_vocab_size": 2,
70
+ "use_cache": true,
71
+ "vocab_size": 30000
72
+ }
checkpoint-6177/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:761d8fbfbbf03b0fad3f1f1bb479f5a1b7e6545f2897c772e5446c8c7ea091e3
3
+ size 436413500
checkpoint-6177/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:55d0520ec59c52e6688c5b01627a544c59d01c35cf0966eaf554bc67ad44e7e4
3
+ size 872948026
checkpoint-6177/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0d7baddc67c6abe7e68aba72c0c98ae0e893195479dda062921953c89984a7b9
3
+ size 14244
checkpoint-6177/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:56d60e61073e934f0f6e7b2955a0924c3969cf56dadc2a6d6f37b83d7620244f
3
+ size 1064
checkpoint-6177/special_tokens_map.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": "[CLS]",
3
+ "mask_token": "[MASK]",
4
+ "pad_token": "[PAD]",
5
+ "sep_token": "[SEP]",
6
+ "unk_token": "[UNK]"
7
+ }
checkpoint-6177/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-6177/tokenizer_config.json ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "[PAD]",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "1": {
12
+ "content": "[UNK]",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "2": {
20
+ "content": "[CLS]",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "3": {
28
+ "content": "[SEP]",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "4": {
36
+ "content": "[MASK]",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ }
43
+ },
44
+ "clean_up_tokenization_spaces": true,
45
+ "cls_token": "[CLS]",
46
+ "do_basic_tokenize": true,
47
+ "do_lower_case": false,
48
+ "extra_special_tokens": {},
49
+ "full_tokenizer_file": null,
50
+ "mask_token": "[MASK]",
51
+ "model_max_length": 1000000000000000019884624838656,
52
+ "never_split": null,
53
+ "pad_token": "[PAD]",
54
+ "sep_token": "[SEP]",
55
+ "strip_accents": null,
56
+ "tokenize_chinese_chars": true,
57
+ "tokenizer_class": "BertTokenizer",
58
+ "unk_token": "[UNK]"
59
+ }
checkpoint-6177/trainer_state.json ADDED
@@ -0,0 +1,100 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_global_step": 6177,
3
+ "best_metric": 0.8350373600980577,
4
+ "best_model_checkpoint": "./camelbert-ner-author/checkpoint-6177",
5
+ "epoch": 3.0,
6
+ "eval_steps": 500,
7
+ "global_step": 6177,
8
+ "is_hyper_param_search": false,
9
+ "is_local_process_zero": true,
10
+ "is_world_process_zero": true,
11
+ "log_history": [
12
+ {
13
+ "epoch": 1.0,
14
+ "grad_norm": 0.9559553861618042,
15
+ "learning_rate": 1.5002428363283148e-05,
16
+ "loss": 0.6744,
17
+ "step": 2059
18
+ },
19
+ {
20
+ "epoch": 1.0,
21
+ "eval_accuracy": 0.830406543180178,
22
+ "eval_f1_macro": 0.791267966245864,
23
+ "eval_f1_micro": 0.830406543180178,
24
+ "eval_loss": 0.5986002683639526,
25
+ "eval_precision_macro": 0.7902411997408786,
26
+ "eval_precision_micro": 0.830406543180178,
27
+ "eval_recall_macro": 0.8145205534902739,
28
+ "eval_recall_micro": 0.830406543180178,
29
+ "eval_runtime": 75.6826,
30
+ "eval_samples_per_second": 54.927,
31
+ "eval_steps_per_second": 3.435,
32
+ "step": 2059
33
+ },
34
+ {
35
+ "epoch": 2.0,
36
+ "grad_norm": 60.31794357299805,
37
+ "learning_rate": 1.0002428363283147e-05,
38
+ "loss": 0.1155,
39
+ "step": 4118
40
+ },
41
+ {
42
+ "epoch": 2.0,
43
+ "eval_accuracy": 0.8448400288669714,
44
+ "eval_f1_macro": 0.7972921054481455,
45
+ "eval_f1_micro": 0.8448400288669714,
46
+ "eval_loss": 0.7185700535774231,
47
+ "eval_precision_macro": 0.8081525378723841,
48
+ "eval_precision_micro": 0.8448400288669714,
49
+ "eval_recall_macro": 0.8163042932269564,
50
+ "eval_recall_micro": 0.8448400288669714,
51
+ "eval_runtime": 75.1555,
52
+ "eval_samples_per_second": 55.312,
53
+ "eval_steps_per_second": 3.459,
54
+ "step": 4118
55
+ },
56
+ {
57
+ "epoch": 3.0,
58
+ "grad_norm": 0.02701091766357422,
59
+ "learning_rate": 5.002428363283147e-06,
60
+ "loss": 0.0355,
61
+ "step": 6177
62
+ },
63
+ {
64
+ "epoch": 3.0,
65
+ "eval_accuracy": 0.8857349049795525,
66
+ "eval_f1_macro": 0.8350373600980577,
67
+ "eval_f1_micro": 0.8857349049795525,
68
+ "eval_loss": 0.5690950155258179,
69
+ "eval_precision_macro": 0.8303194562290555,
70
+ "eval_precision_micro": 0.8857349049795525,
71
+ "eval_recall_macro": 0.8522223907532204,
72
+ "eval_recall_micro": 0.8857349049795525,
73
+ "eval_runtime": 75.1931,
74
+ "eval_samples_per_second": 55.284,
75
+ "eval_steps_per_second": 3.458,
76
+ "step": 6177
77
+ }
78
+ ],
79
+ "logging_steps": 500,
80
+ "max_steps": 8236,
81
+ "num_input_tokens_seen": 0,
82
+ "num_train_epochs": 4,
83
+ "save_steps": 500,
84
+ "stateful_callbacks": {
85
+ "TrainerControl": {
86
+ "args": {
87
+ "should_epoch_stop": false,
88
+ "should_evaluate": false,
89
+ "should_log": false,
90
+ "should_save": true,
91
+ "should_training_stop": false
92
+ },
93
+ "attributes": {}
94
+ }
95
+ },
96
+ "total_flos": 2.6005859493405696e+16,
97
+ "train_batch_size": 16,
98
+ "trial_name": null,
99
+ "trial_params": null
100
+ }
checkpoint-6177/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6f8b28bd51ea74ebd4ce8c5c0c75365a5b6a1d8d082ce92438ccf4291d4ef2fd
3
+ size 5304
checkpoint-6177/vocab.txt ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-8236/config.json ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "BertForSequenceClassification"
4
+ ],
5
+ "attention_probs_dropout_prob": 0.1,
6
+ "classifier_dropout": null,
7
+ "gradient_checkpointing": false,
8
+ "hidden_act": "gelu",
9
+ "hidden_dropout_prob": 0.1,
10
+ "hidden_size": 768,
11
+ "id2label": {
12
+ "0": "LABEL_0",
13
+ "1": "LABEL_1",
14
+ "2": "LABEL_2",
15
+ "3": "LABEL_3",
16
+ "4": "LABEL_4",
17
+ "5": "LABEL_5",
18
+ "6": "LABEL_6",
19
+ "7": "LABEL_7",
20
+ "8": "LABEL_8",
21
+ "9": "LABEL_9",
22
+ "10": "LABEL_10",
23
+ "11": "LABEL_11",
24
+ "12": "LABEL_12",
25
+ "13": "LABEL_13",
26
+ "14": "LABEL_14",
27
+ "15": "LABEL_15",
28
+ "16": "LABEL_16",
29
+ "17": "LABEL_17",
30
+ "18": "LABEL_18",
31
+ "19": "LABEL_19",
32
+ "20": "LABEL_20"
33
+ },
34
+ "initializer_range": 0.02,
35
+ "intermediate_size": 3072,
36
+ "label2id": {
37
+ "LABEL_0": 0,
38
+ "LABEL_1": 1,
39
+ "LABEL_10": 10,
40
+ "LABEL_11": 11,
41
+ "LABEL_12": 12,
42
+ "LABEL_13": 13,
43
+ "LABEL_14": 14,
44
+ "LABEL_15": 15,
45
+ "LABEL_16": 16,
46
+ "LABEL_17": 17,
47
+ "LABEL_18": 18,
48
+ "LABEL_19": 19,
49
+ "LABEL_2": 2,
50
+ "LABEL_20": 20,
51
+ "LABEL_3": 3,
52
+ "LABEL_4": 4,
53
+ "LABEL_5": 5,
54
+ "LABEL_6": 6,
55
+ "LABEL_7": 7,
56
+ "LABEL_8": 8,
57
+ "LABEL_9": 9
58
+ },
59
+ "layer_norm_eps": 1e-12,
60
+ "max_position_embeddings": 512,
61
+ "model_type": "bert",
62
+ "num_attention_heads": 12,
63
+ "num_hidden_layers": 12,
64
+ "pad_token_id": 0,
65
+ "position_embedding_type": "absolute",
66
+ "problem_type": "single_label_classification",
67
+ "torch_dtype": "float32",
68
+ "transformers_version": "4.52.4",
69
+ "type_vocab_size": 2,
70
+ "use_cache": true,
71
+ "vocab_size": 30000
72
+ }
checkpoint-8236/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7929e9ec8307921584fdea61e827780239f175a0b03ae2aa1f6230eefde4da17
3
+ size 436413500
checkpoint-8236/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5023c5319971f6ad3e84a6b003221f984fc2238c8187f9a318fdade46748df64
3
+ size 872948026
checkpoint-8236/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c960a7d4d06152a066c3b8e175345bb9d9705cbf6d6ff24ba20ee6af4fc44299
3
+ size 14244
checkpoint-8236/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c49c4c3f9280587c8ffc7eba095e0030369b0c09f7ff72035d62a66e48c3f2d2
3
+ size 1064
checkpoint-8236/special_tokens_map.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": "[CLS]",
3
+ "mask_token": "[MASK]",
4
+ "pad_token": "[PAD]",
5
+ "sep_token": "[SEP]",
6
+ "unk_token": "[UNK]"
7
+ }
checkpoint-8236/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-8236/tokenizer_config.json ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "[PAD]",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "1": {
12
+ "content": "[UNK]",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "2": {
20
+ "content": "[CLS]",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "3": {
28
+ "content": "[SEP]",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "4": {
36
+ "content": "[MASK]",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ }
43
+ },
44
+ "clean_up_tokenization_spaces": true,
45
+ "cls_token": "[CLS]",
46
+ "do_basic_tokenize": true,
47
+ "do_lower_case": false,
48
+ "extra_special_tokens": {},
49
+ "full_tokenizer_file": null,
50
+ "mask_token": "[MASK]",
51
+ "model_max_length": 1000000000000000019884624838656,
52
+ "never_split": null,
53
+ "pad_token": "[PAD]",
54
+ "sep_token": "[SEP]",
55
+ "strip_accents": null,
56
+ "tokenize_chinese_chars": true,
57
+ "tokenizer_class": "BertTokenizer",
58
+ "unk_token": "[UNK]"
59
+ }
checkpoint-8236/trainer_state.json ADDED
@@ -0,0 +1,122 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_global_step": 8236,
3
+ "best_metric": 0.8473409433764713,
4
+ "best_model_checkpoint": "./camelbert-ner-author/checkpoint-8236",
5
+ "epoch": 4.0,
6
+ "eval_steps": 500,
7
+ "global_step": 8236,
8
+ "is_hyper_param_search": false,
9
+ "is_local_process_zero": true,
10
+ "is_world_process_zero": true,
11
+ "log_history": [
12
+ {
13
+ "epoch": 1.0,
14
+ "grad_norm": 0.9559553861618042,
15
+ "learning_rate": 1.5002428363283148e-05,
16
+ "loss": 0.6744,
17
+ "step": 2059
18
+ },
19
+ {
20
+ "epoch": 1.0,
21
+ "eval_accuracy": 0.830406543180178,
22
+ "eval_f1_macro": 0.791267966245864,
23
+ "eval_f1_micro": 0.830406543180178,
24
+ "eval_loss": 0.5986002683639526,
25
+ "eval_precision_macro": 0.7902411997408786,
26
+ "eval_precision_micro": 0.830406543180178,
27
+ "eval_recall_macro": 0.8145205534902739,
28
+ "eval_recall_micro": 0.830406543180178,
29
+ "eval_runtime": 75.6826,
30
+ "eval_samples_per_second": 54.927,
31
+ "eval_steps_per_second": 3.435,
32
+ "step": 2059
33
+ },
34
+ {
35
+ "epoch": 2.0,
36
+ "grad_norm": 60.31794357299805,
37
+ "learning_rate": 1.0002428363283147e-05,
38
+ "loss": 0.1155,
39
+ "step": 4118
40
+ },
41
+ {
42
+ "epoch": 2.0,
43
+ "eval_accuracy": 0.8448400288669714,
44
+ "eval_f1_macro": 0.7972921054481455,
45
+ "eval_f1_micro": 0.8448400288669714,
46
+ "eval_loss": 0.7185700535774231,
47
+ "eval_precision_macro": 0.8081525378723841,
48
+ "eval_precision_micro": 0.8448400288669714,
49
+ "eval_recall_macro": 0.8163042932269564,
50
+ "eval_recall_micro": 0.8448400288669714,
51
+ "eval_runtime": 75.1555,
52
+ "eval_samples_per_second": 55.312,
53
+ "eval_steps_per_second": 3.459,
54
+ "step": 4118
55
+ },
56
+ {
57
+ "epoch": 3.0,
58
+ "grad_norm": 0.02701091766357422,
59
+ "learning_rate": 5.002428363283147e-06,
60
+ "loss": 0.0355,
61
+ "step": 6177
62
+ },
63
+ {
64
+ "epoch": 3.0,
65
+ "eval_accuracy": 0.8857349049795525,
66
+ "eval_f1_macro": 0.8350373600980577,
67
+ "eval_f1_micro": 0.8857349049795525,
68
+ "eval_loss": 0.5690950155258179,
69
+ "eval_precision_macro": 0.8303194562290555,
70
+ "eval_precision_micro": 0.8857349049795525,
71
+ "eval_recall_macro": 0.8522223907532204,
72
+ "eval_recall_micro": 0.8857349049795525,
73
+ "eval_runtime": 75.1931,
74
+ "eval_samples_per_second": 55.284,
75
+ "eval_steps_per_second": 3.458,
76
+ "step": 6177
77
+ },
78
+ {
79
+ "epoch": 4.0,
80
+ "grad_norm": 0.015318239107728004,
81
+ "learning_rate": 2.428363283147159e-09,
82
+ "loss": 0.0091,
83
+ "step": 8236
84
+ },
85
+ {
86
+ "epoch": 4.0,
87
+ "eval_accuracy": 0.8951166706759682,
88
+ "eval_f1_macro": 0.8473409433764713,
89
+ "eval_f1_micro": 0.8951166706759682,
90
+ "eval_loss": 0.5703061819076538,
91
+ "eval_precision_macro": 0.8399460951777158,
92
+ "eval_precision_micro": 0.8951166706759682,
93
+ "eval_recall_macro": 0.8710852747128797,
94
+ "eval_recall_micro": 0.8951166706759682,
95
+ "eval_runtime": 75.5896,
96
+ "eval_samples_per_second": 54.994,
97
+ "eval_steps_per_second": 3.44,
98
+ "step": 8236
99
+ }
100
+ ],
101
+ "logging_steps": 500,
102
+ "max_steps": 8236,
103
+ "num_input_tokens_seen": 0,
104
+ "num_train_epochs": 4,
105
+ "save_steps": 500,
106
+ "stateful_callbacks": {
107
+ "TrainerControl": {
108
+ "args": {
109
+ "should_epoch_stop": false,
110
+ "should_evaluate": false,
111
+ "should_log": false,
112
+ "should_save": true,
113
+ "should_training_stop": true
114
+ },
115
+ "attributes": {}
116
+ }
117
+ },
118
+ "total_flos": 3.467447932454093e+16,
119
+ "train_batch_size": 16,
120
+ "trial_name": null,
121
+ "trial_params": null
122
+ }
checkpoint-8236/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6f8b28bd51ea74ebd4ce8c5c0c75365a5b6a1d8d082ce92438ccf4291d4ef2fd
3
+ size 5304
checkpoint-8236/vocab.txt ADDED
The diff for this file is too large to render. See raw diff
 
config.json ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "BertForSequenceClassification"
4
+ ],
5
+ "attention_probs_dropout_prob": 0.1,
6
+ "classifier_dropout": null,
7
+ "gradient_checkpointing": false,
8
+ "hidden_act": "gelu",
9
+ "hidden_dropout_prob": 0.1,
10
+ "hidden_size": 768,
11
+ "id2label": {
12
+ "0": "LABEL_0",
13
+ "1": "LABEL_1",
14
+ "2": "LABEL_2",
15
+ "3": "LABEL_3",
16
+ "4": "LABEL_4",
17
+ "5": "LABEL_5",
18
+ "6": "LABEL_6",
19
+ "7": "LABEL_7",
20
+ "8": "LABEL_8",
21
+ "9": "LABEL_9",
22
+ "10": "LABEL_10",
23
+ "11": "LABEL_11",
24
+ "12": "LABEL_12",
25
+ "13": "LABEL_13",
26
+ "14": "LABEL_14",
27
+ "15": "LABEL_15",
28
+ "16": "LABEL_16",
29
+ "17": "LABEL_17",
30
+ "18": "LABEL_18",
31
+ "19": "LABEL_19",
32
+ "20": "LABEL_20"
33
+ },
34
+ "initializer_range": 0.02,
35
+ "intermediate_size": 3072,
36
+ "label2id": {
37
+ "LABEL_0": 0,
38
+ "LABEL_1": 1,
39
+ "LABEL_10": 10,
40
+ "LABEL_11": 11,
41
+ "LABEL_12": 12,
42
+ "LABEL_13": 13,
43
+ "LABEL_14": 14,
44
+ "LABEL_15": 15,
45
+ "LABEL_16": 16,
46
+ "LABEL_17": 17,
47
+ "LABEL_18": 18,
48
+ "LABEL_19": 19,
49
+ "LABEL_2": 2,
50
+ "LABEL_20": 20,
51
+ "LABEL_3": 3,
52
+ "LABEL_4": 4,
53
+ "LABEL_5": 5,
54
+ "LABEL_6": 6,
55
+ "LABEL_7": 7,
56
+ "LABEL_8": 8,
57
+ "LABEL_9": 9
58
+ },
59
+ "layer_norm_eps": 1e-12,
60
+ "max_position_embeddings": 512,
61
+ "model_type": "bert",
62
+ "num_attention_heads": 12,
63
+ "num_hidden_layers": 12,
64
+ "pad_token_id": 0,
65
+ "position_embedding_type": "absolute",
66
+ "problem_type": "single_label_classification",
67
+ "torch_dtype": "float32",
68
+ "transformers_version": "4.52.4",
69
+ "type_vocab_size": 2,
70
+ "use_cache": true,
71
+ "vocab_size": 30000
72
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7929e9ec8307921584fdea61e827780239f175a0b03ae2aa1f6230eefde4da17
3
+ size 436413500
special_tokens_map.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": "[CLS]",
3
+ "mask_token": "[MASK]",
4
+ "pad_token": "[PAD]",
5
+ "sep_token": "[SEP]",
6
+ "unk_token": "[UNK]"
7
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "[PAD]",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "1": {
12
+ "content": "[UNK]",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "2": {
20
+ "content": "[CLS]",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "3": {
28
+ "content": "[SEP]",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "4": {
36
+ "content": "[MASK]",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ }
43
+ },
44
+ "clean_up_tokenization_spaces": true,
45
+ "cls_token": "[CLS]",
46
+ "do_basic_tokenize": true,
47
+ "do_lower_case": false,
48
+ "extra_special_tokens": {},
49
+ "full_tokenizer_file": null,
50
+ "mask_token": "[MASK]",
51
+ "model_max_length": 1000000000000000019884624838656,
52
+ "never_split": null,
53
+ "pad_token": "[PAD]",
54
+ "sep_token": "[SEP]",
55
+ "strip_accents": null,
56
+ "tokenize_chinese_chars": true,
57
+ "tokenizer_class": "BertTokenizer",
58
+ "unk_token": "[UNK]"
59
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6f8b28bd51ea74ebd4ce8c5c0c75365a5b6a1d8d082ce92438ccf4291d4ef2fd
3
+ size 5304
vocab.txt ADDED
The diff for this file is too large to render. See raw diff