DIACDE commited on
Commit
71a3448
·
verified ·
1 Parent(s): f741b3e

Upload 11 files

Browse files

atualização do modelo, com um novo label "RUIDO"

config.json CHANGED
@@ -1,41 +1,41 @@
1
- {
2
- "_name_or_path": "neuralmind/bert-base-portuguese-cased",
3
- "architectures": [
4
- "BertForSequenceClassification"
5
- ],
6
- "attention_probs_dropout_prob": 0.1,
7
- "classifier_dropout": null,
8
- "directionality": "bidi",
9
- "hidden_act": "gelu",
10
- "hidden_dropout_prob": 0.1,
11
- "hidden_size": 768,
12
- "id2label": {
13
- "0": "TESE",
14
- "1": "FATO"
15
- },
16
- "initializer_range": 0.02,
17
- "intermediate_size": 3072,
18
- "label2id": {
19
- "FATO": 1,
20
- "TESE": 0
21
- },
22
- "layer_norm_eps": 1e-12,
23
- "max_position_embeddings": 512,
24
- "model_type": "bert",
25
- "num_attention_heads": 12,
26
- "num_hidden_layers": 12,
27
- "output_past": true,
28
- "pad_token_id": 0,
29
- "pooler_fc_size": 768,
30
- "pooler_num_attention_heads": 12,
31
- "pooler_num_fc_layers": 3,
32
- "pooler_size_per_head": 128,
33
- "pooler_type": "first_token_transform",
34
- "position_embedding_type": "absolute",
35
- "problem_type": "single_label_classification",
36
- "torch_dtype": "float32",
37
- "transformers_version": "4.47.0",
38
- "type_vocab_size": 2,
39
- "use_cache": true,
40
- "vocab_size": 29794
41
- }
 
1
+ {
2
+ "architectures": [
3
+ "BertForSequenceClassification"
4
+ ],
5
+ "attention_probs_dropout_prob": 0.1,
6
+ "classifier_dropout": null,
7
+ "directionality": "bidi",
8
+ "hidden_act": "gelu",
9
+ "hidden_dropout_prob": 0.1,
10
+ "hidden_size": 768,
11
+ "id2label": {
12
+ "0": "TESE",
13
+ "1": "FATO",
14
+ "2": "RUIDO"
15
+ },
16
+ "initializer_range": 0.02,
17
+ "intermediate_size": 3072,
18
+ "label2id": {
19
+ "FATO": 1,
20
+ "RUIDO": 2,
21
+ "TESE": 0
22
+ },
23
+ "layer_norm_eps": 1e-12,
24
+ "max_position_embeddings": 512,
25
+ "model_type": "bert",
26
+ "num_attention_heads": 12,
27
+ "num_hidden_layers": 12,
28
+ "pad_token_id": 0,
29
+ "pooler_fc_size": 768,
30
+ "pooler_num_attention_heads": 12,
31
+ "pooler_num_fc_layers": 3,
32
+ "pooler_size_per_head": 128,
33
+ "pooler_type": "first_token_transform",
34
+ "position_embedding_type": "absolute",
35
+ "problem_type": "single_label_classification",
36
+ "torch_dtype": "float32",
37
+ "transformers_version": "4.51.3",
38
+ "type_vocab_size": 2,
39
+ "use_cache": true,
40
+ "vocab_size": 119547
41
+ }
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3a597861f209c325bdb0c9a1de0f1871e057cfb90439626dc6ef4c918316cde4
3
- size 435722224
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2de1e06daab2515672323fb9be7cfd4b13ba2352edcbabb98d90fddf64358e75
3
+ size 711446532
optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b2370006cdbc3fc607e39df14c0ce60f013544782f5869e1442e47fded7aefef
3
- size 871559930
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5c1d1d1ec8a3ce7cd018b2d040490720d40fc9aaf77197cba25a1dcb9d84f6c1
3
+ size 1423008506
rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:862c52e12566446fb74babfa164eb66eacbd2bdba0c993bc568c8b80b2fd576a
3
  size 13990
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:85eeb6471f65295b3454f24aa5343a9c665d4af2810a5b9428b38718a8ea8d4e
3
  size 13990
scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:dfd7737f7f28f9b23ccf7b2a3ce0b5ef642fa341fb7e72703bf9977a6c5aada9
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3a141896d041f2762af0a64ef0acd25fdd2f3b377bcc28cf0d0a99878198d9ba
3
  size 1064
special_tokens_map.json CHANGED
@@ -1,7 +1,7 @@
1
- {
2
- "cls_token": "[CLS]",
3
- "mask_token": "[MASK]",
4
- "pad_token": "[PAD]",
5
- "sep_token": "[SEP]",
6
- "unk_token": "[UNK]"
7
- }
 
1
+ {
2
+ "cls_token": "[CLS]",
3
+ "mask_token": "[MASK]",
4
+ "pad_token": "[PAD]",
5
+ "sep_token": "[SEP]",
6
+ "unk_token": "[UNK]"
7
+ }
tokenizer.json CHANGED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json CHANGED
@@ -1,58 +1,56 @@
1
- {
2
- "added_tokens_decoder": {
3
- "0": {
4
- "content": "[PAD]",
5
- "lstrip": false,
6
- "normalized": false,
7
- "rstrip": false,
8
- "single_word": false,
9
- "special": true
10
- },
11
- "100": {
12
- "content": "[UNK]",
13
- "lstrip": false,
14
- "normalized": false,
15
- "rstrip": false,
16
- "single_word": false,
17
- "special": true
18
- },
19
- "101": {
20
- "content": "[CLS]",
21
- "lstrip": false,
22
- "normalized": false,
23
- "rstrip": false,
24
- "single_word": false,
25
- "special": true
26
- },
27
- "102": {
28
- "content": "[SEP]",
29
- "lstrip": false,
30
- "normalized": false,
31
- "rstrip": false,
32
- "single_word": false,
33
- "special": true
34
- },
35
- "103": {
36
- "content": "[MASK]",
37
- "lstrip": false,
38
- "normalized": false,
39
- "rstrip": false,
40
- "single_word": false,
41
- "special": true
42
- }
43
- },
44
- "clean_up_tokenization_spaces": true,
45
- "cls_token": "[CLS]",
46
- "do_basic_tokenize": true,
47
- "do_lower_case": false,
48
- "extra_special_tokens": {},
49
- "mask_token": "[MASK]",
50
- "model_max_length": 512,
51
- "never_split": null,
52
- "pad_token": "[PAD]",
53
- "sep_token": "[SEP]",
54
- "strip_accents": null,
55
- "tokenize_chinese_chars": true,
56
- "tokenizer_class": "BertTokenizer",
57
- "unk_token": "[UNK]"
58
- }
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "[PAD]",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "100": {
12
+ "content": "[UNK]",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "101": {
20
+ "content": "[CLS]",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "102": {
28
+ "content": "[SEP]",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "103": {
36
+ "content": "[MASK]",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ }
43
+ },
44
+ "clean_up_tokenization_spaces": false,
45
+ "cls_token": "[CLS]",
46
+ "do_lower_case": false,
47
+ "extra_special_tokens": {},
48
+ "mask_token": "[MASK]",
49
+ "model_max_length": 512,
50
+ "pad_token": "[PAD]",
51
+ "sep_token": "[SEP]",
52
+ "strip_accents": null,
53
+ "tokenize_chinese_chars": true,
54
+ "tokenizer_class": "BertTokenizer",
55
+ "unk_token": "[UNK]"
56
+ }
 
 
trainer_state.json CHANGED
@@ -1,167 +1,224 @@
1
- {
2
- "best_metric": 0.3189895749092102,
3
- "best_model_checkpoint": "my_awesome_model_truncaded_split_36k\\checkpoint-1837",
4
- "epoch": 4.0,
5
- "eval_steps": 500,
6
- "global_step": 7348,
7
- "is_hyper_param_search": false,
8
- "is_local_process_zero": true,
9
- "is_world_process_zero": true,
10
- "log_history": [
11
- {
12
- "epoch": 0.2721829069134458,
13
- "grad_norm": 4.77004337310791,
14
- "learning_rate": 9.319542732716386e-06,
15
- "loss": 0.5661,
16
- "step": 500
17
- },
18
- {
19
- "epoch": 0.5443658138268916,
20
- "grad_norm": 13.415882110595703,
21
- "learning_rate": 8.639085465432772e-06,
22
- "loss": 0.457,
23
- "step": 1000
24
- },
25
- {
26
- "epoch": 0.8165487207403375,
27
- "grad_norm": 13.942790985107422,
28
- "learning_rate": 7.958628198149157e-06,
29
- "loss": 0.3841,
30
- "step": 1500
31
- },
32
- {
33
- "epoch": 1.0,
34
- "eval_accuracy": 0.8633455832312509,
35
- "eval_loss": 0.3189895749092102,
36
- "eval_runtime": 2115.733,
37
- "eval_samples_per_second": 3.473,
38
- "eval_steps_per_second": 0.217,
39
- "step": 1837
40
- },
41
- {
42
- "epoch": 1.0887316276537833,
43
- "grad_norm": 21.668664932250977,
44
- "learning_rate": 7.2781709308655426e-06,
45
- "loss": 0.343,
46
- "step": 2000
47
- },
48
- {
49
- "epoch": 1.360914534567229,
50
- "grad_norm": 14.608024597167969,
51
- "learning_rate": 6.597713663581927e-06,
52
- "loss": 0.3067,
53
- "step": 2500
54
- },
55
- {
56
- "epoch": 1.633097441480675,
57
- "grad_norm": 11.27497673034668,
58
- "learning_rate": 5.917256396298313e-06,
59
- "loss": 0.2714,
60
- "step": 3000
61
- },
62
- {
63
- "epoch": 1.905280348394121,
64
- "grad_norm": 8.441925048828125,
65
- "learning_rate": 5.236799129014698e-06,
66
- "loss": 0.2698,
67
- "step": 3500
68
- },
69
- {
70
- "epoch": 2.0,
71
- "eval_accuracy": 0.8595345038791343,
72
- "eval_loss": 0.3298398554325104,
73
- "eval_runtime": 2168.9042,
74
- "eval_samples_per_second": 3.387,
75
- "eval_steps_per_second": 0.212,
76
- "step": 3674
77
- },
78
- {
79
- "epoch": 2.1774632553075666,
80
- "grad_norm": 35.317108154296875,
81
- "learning_rate": 4.5563418617310835e-06,
82
- "loss": 0.2298,
83
- "step": 4000
84
- },
85
- {
86
- "epoch": 2.4496461622210126,
87
- "grad_norm": 9.94012451171875,
88
- "learning_rate": 3.875884594447469e-06,
89
- "loss": 0.2087,
90
- "step": 4500
91
- },
92
- {
93
- "epoch": 2.721829069134458,
94
- "grad_norm": 24.299028396606445,
95
- "learning_rate": 3.1954273271638544e-06,
96
- "loss": 0.2119,
97
- "step": 5000
98
- },
99
- {
100
- "epoch": 2.9940119760479043,
101
- "grad_norm": 5.556156158447266,
102
- "learning_rate": 2.5149700598802396e-06,
103
- "loss": 0.2042,
104
- "step": 5500
105
- },
106
- {
107
- "epoch": 3.0,
108
- "eval_accuracy": 0.881312100176943,
109
- "eval_loss": 0.3292213976383209,
110
- "eval_runtime": 2182.1168,
111
- "eval_samples_per_second": 3.367,
112
- "eval_steps_per_second": 0.211,
113
- "step": 5511
114
- },
115
- {
116
- "epoch": 3.26619488296135,
117
- "grad_norm": 30.425508499145508,
118
- "learning_rate": 1.834512792596625e-06,
119
- "loss": 0.1642,
120
- "step": 6000
121
- },
122
- {
123
- "epoch": 3.538377789874796,
124
- "grad_norm": 10.257048606872559,
125
- "learning_rate": 1.1540555253130105e-06,
126
- "loss": 0.1731,
127
- "step": 6500
128
- },
129
- {
130
- "epoch": 3.810560696788242,
131
- "grad_norm": 37.602664947509766,
132
- "learning_rate": 4.735982580293958e-07,
133
- "loss": 0.1641,
134
- "step": 7000
135
- },
136
- {
137
- "epoch": 4.0,
138
- "eval_accuracy": 0.8960119776779638,
139
- "eval_loss": 0.34636014699935913,
140
- "eval_runtime": 2178.6484,
141
- "eval_samples_per_second": 3.372,
142
- "eval_steps_per_second": 0.211,
143
- "step": 7348
144
- }
145
- ],
146
- "logging_steps": 500,
147
- "max_steps": 7348,
148
- "num_input_tokens_seen": 0,
149
- "num_train_epochs": 4,
150
- "save_steps": 500,
151
- "stateful_callbacks": {
152
- "TrainerControl": {
153
- "args": {
154
- "should_epoch_stop": false,
155
- "should_evaluate": false,
156
- "should_log": false,
157
- "should_save": true,
158
- "should_training_stop": true
159
- },
160
- "attributes": {}
161
- }
162
- },
163
- "total_flos": 2.907596283903792e+16,
164
- "train_batch_size": 16,
165
- "trial_name": null,
166
- "trial_params": null
167
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_global_step": 8265,
3
+ "best_metric": 0.10672979801893234,
4
+ "best_model_checkpoint": "modelos/treinados/modelo_bert_fato_teses_bert_multilingual_cased/checkpoint-8265",
5
+ "epoch": 4.0,
6
+ "eval_steps": 500,
7
+ "global_step": 11020,
8
+ "is_hyper_param_search": false,
9
+ "is_local_process_zero": true,
10
+ "is_world_process_zero": true,
11
+ "log_history": [
12
+ {
13
+ "epoch": 0.18148820326678766,
14
+ "grad_norm": 0.9450770020484924,
15
+ "learning_rate": 1.9094373865698732e-05,
16
+ "loss": 0.3273,
17
+ "step": 500
18
+ },
19
+ {
20
+ "epoch": 0.3629764065335753,
21
+ "grad_norm": 1.8763504028320312,
22
+ "learning_rate": 1.8186932849364793e-05,
23
+ "loss": 0.213,
24
+ "step": 1000
25
+ },
26
+ {
27
+ "epoch": 0.5444646098003629,
28
+ "grad_norm": 6.744544982910156,
29
+ "learning_rate": 1.7279491833030854e-05,
30
+ "loss": 0.1823,
31
+ "step": 1500
32
+ },
33
+ {
34
+ "epoch": 0.7259528130671506,
35
+ "grad_norm": 0.1076168492436409,
36
+ "learning_rate": 1.6372050816696915e-05,
37
+ "loss": 0.1568,
38
+ "step": 2000
39
+ },
40
+ {
41
+ "epoch": 0.9074410163339383,
42
+ "grad_norm": 0.03117656148970127,
43
+ "learning_rate": 1.5464609800362976e-05,
44
+ "loss": 0.1619,
45
+ "step": 2500
46
+ },
47
+ {
48
+ "epoch": 1.0,
49
+ "eval_accuracy": 0.9738704409363091,
50
+ "eval_loss": 0.11249715089797974,
51
+ "eval_runtime": 700.568,
52
+ "eval_samples_per_second": 7.866,
53
+ "eval_steps_per_second": 0.983,
54
+ "step": 2755
55
+ },
56
+ {
57
+ "epoch": 1.0889292196007259,
58
+ "grad_norm": 0.010204868391156197,
59
+ "learning_rate": 1.4557168784029038e-05,
60
+ "loss": 0.1101,
61
+ "step": 3000
62
+ },
63
+ {
64
+ "epoch": 1.2704174228675136,
65
+ "grad_norm": 0.07266418635845184,
66
+ "learning_rate": 1.36497277676951e-05,
67
+ "loss": 0.1146,
68
+ "step": 3500
69
+ },
70
+ {
71
+ "epoch": 1.4519056261343013,
72
+ "grad_norm": 16.94893455505371,
73
+ "learning_rate": 1.2742286751361164e-05,
74
+ "loss": 0.0961,
75
+ "step": 4000
76
+ },
77
+ {
78
+ "epoch": 1.633393829401089,
79
+ "grad_norm": 4.739220142364502,
80
+ "learning_rate": 1.1834845735027225e-05,
81
+ "loss": 0.0783,
82
+ "step": 4500
83
+ },
84
+ {
85
+ "epoch": 1.8148820326678767,
86
+ "grad_norm": 0.09760759770870209,
87
+ "learning_rate": 1.0927404718693286e-05,
88
+ "loss": 0.0901,
89
+ "step": 5000
90
+ },
91
+ {
92
+ "epoch": 1.9963702359346642,
93
+ "grad_norm": 7.626439571380615,
94
+ "learning_rate": 1.0019963702359348e-05,
95
+ "loss": 0.0863,
96
+ "step": 5500
97
+ },
98
+ {
99
+ "epoch": 2.0,
100
+ "eval_accuracy": 0.9740518962075848,
101
+ "eval_loss": 0.1354905664920807,
102
+ "eval_runtime": 699.3375,
103
+ "eval_samples_per_second": 7.88,
104
+ "eval_steps_per_second": 0.985,
105
+ "step": 5510
106
+ },
107
+ {
108
+ "epoch": 2.1778584392014517,
109
+ "grad_norm": 0.00856301560997963,
110
+ "learning_rate": 9.11252268602541e-06,
111
+ "loss": 0.0625,
112
+ "step": 6000
113
+ },
114
+ {
115
+ "epoch": 2.3593466424682394,
116
+ "grad_norm": 0.054624781012535095,
117
+ "learning_rate": 8.20508166969147e-06,
118
+ "loss": 0.0416,
119
+ "step": 6500
120
+ },
121
+ {
122
+ "epoch": 2.540834845735027,
123
+ "grad_norm": 1.524115800857544,
124
+ "learning_rate": 7.297640653357533e-06,
125
+ "loss": 0.0478,
126
+ "step": 7000
127
+ },
128
+ {
129
+ "epoch": 2.722323049001815,
130
+ "grad_norm": 0.007021903060376644,
131
+ "learning_rate": 6.390199637023594e-06,
132
+ "loss": 0.0378,
133
+ "step": 7500
134
+ },
135
+ {
136
+ "epoch": 2.9038112522686026,
137
+ "grad_norm": 0.003797353943809867,
138
+ "learning_rate": 5.4827586206896556e-06,
139
+ "loss": 0.0482,
140
+ "step": 8000
141
+ },
142
+ {
143
+ "epoch": 3.0,
144
+ "eval_accuracy": 0.9834875703139176,
145
+ "eval_loss": 0.10672979801893234,
146
+ "eval_runtime": 708.5009,
147
+ "eval_samples_per_second": 7.778,
148
+ "eval_steps_per_second": 0.972,
149
+ "step": 8265
150
+ },
151
+ {
152
+ "epoch": 3.0852994555353903,
153
+ "grad_norm": 0.0551212877035141,
154
+ "learning_rate": 4.575317604355717e-06,
155
+ "loss": 0.0396,
156
+ "step": 8500
157
+ },
158
+ {
159
+ "epoch": 3.266787658802178,
160
+ "grad_norm": 0.003503380110487342,
161
+ "learning_rate": 3.6678765880217788e-06,
162
+ "loss": 0.0258,
163
+ "step": 9000
164
+ },
165
+ {
166
+ "epoch": 3.4482758620689653,
167
+ "grad_norm": 0.034748516976833344,
168
+ "learning_rate": 2.7604355716878406e-06,
169
+ "loss": 0.0168,
170
+ "step": 9500
171
+ },
172
+ {
173
+ "epoch": 3.629764065335753,
174
+ "grad_norm": 0.28772714734077454,
175
+ "learning_rate": 1.8529945553539021e-06,
176
+ "loss": 0.0246,
177
+ "step": 10000
178
+ },
179
+ {
180
+ "epoch": 3.8112522686025407,
181
+ "grad_norm": 0.008872357197105885,
182
+ "learning_rate": 9.455535390199638e-07,
183
+ "loss": 0.0172,
184
+ "step": 10500
185
+ },
186
+ {
187
+ "epoch": 3.9927404718693285,
188
+ "grad_norm": 0.0023940089158713818,
189
+ "learning_rate": 3.8112522686025416e-08,
190
+ "loss": 0.0149,
191
+ "step": 11000
192
+ },
193
+ {
194
+ "epoch": 4.0,
195
+ "eval_accuracy": 0.9831246597713663,
196
+ "eval_loss": 0.11994421482086182,
197
+ "eval_runtime": 701.1801,
198
+ "eval_samples_per_second": 7.86,
199
+ "eval_steps_per_second": 0.983,
200
+ "step": 11020
201
+ }
202
+ ],
203
+ "logging_steps": 500,
204
+ "max_steps": 11020,
205
+ "num_input_tokens_seen": 0,
206
+ "num_train_epochs": 4,
207
+ "save_steps": 500,
208
+ "stateful_callbacks": {
209
+ "TrainerControl": {
210
+ "args": {
211
+ "should_epoch_stop": false,
212
+ "should_evaluate": false,
213
+ "should_log": false,
214
+ "should_save": true,
215
+ "should_training_stop": true
216
+ },
217
+ "attributes": {}
218
+ }
219
+ },
220
+ "total_flos": 2.319607890690048e+16,
221
+ "train_batch_size": 8,
222
+ "trial_name": null,
223
+ "trial_params": null
224
+ }
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8c38d559860d0ab1110e8f338bc12f77eba2b935bd16546c70c18b5be03ec9ce
3
- size 5368
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:104e102d00914bb4c1dc475d344189724634888a455631eceaac535c4e3e1b2c
3
+ size 5432
vocab.txt CHANGED
The diff for this file is too large to render. See raw diff