seedflora commited on
Commit
1af38f1
·
verified ·
1 Parent(s): 17c4bab

Upload fine-tuned model

Browse files
checkpoint-210/config.json ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "BertForSequenceClassification"
4
+ ],
5
+ "attention_probs_dropout_prob": 0.1,
6
+ "classifier_dropout": null,
7
+ "dtype": "float32",
8
+ "gradient_checkpointing": false,
9
+ "hidden_act": "gelu",
10
+ "hidden_dropout_prob": 0.1,
11
+ "hidden_size": 768,
12
+ "id2label": {
13
+ "0": "negatif",
14
+ "1": "positif"
15
+ },
16
+ "initializer_range": 0.02,
17
+ "intermediate_size": 3072,
18
+ "label2id": {
19
+ "negatif": 0,
20
+ "positif": 1
21
+ },
22
+ "layer_norm_eps": 1e-12,
23
+ "max_position_embeddings": 512,
24
+ "model_type": "bert",
25
+ "num_attention_heads": 12,
26
+ "num_hidden_layers": 12,
27
+ "pad_token_id": 2,
28
+ "position_embedding_type": "absolute",
29
+ "problem_type": "single_label_classification",
30
+ "transformers_version": "4.56.2",
31
+ "type_vocab_size": 2,
32
+ "use_cache": true,
33
+ "vocab_size": 32000
34
+ }
checkpoint-210/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:68fc257bd80f73e6ef8e7f837806ccfa1e745b5fb6084a9fce148874feb39e0a
3
+ size 442499064
checkpoint-210/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:365102e7e7fff5f7ab9dd0edd5a5db3acef8147499559e8737aff13ddb9b4cd7
3
+ size 885119226
checkpoint-210/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3caf771d90b07a4bf1ba5dae59ed71d170fe6be10bc39eabb5a3694b142261e8
3
+ size 14244
checkpoint-210/scaler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:50e76f4c0c499586b096644030731e328a6e0a4953a8907981c7f86bed018279
3
+ size 988
checkpoint-210/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a0fb00c9dd3f04ada0000241a20931ec02fcc7650cc825684313dfb922fcc44f
3
+ size 1064
checkpoint-210/special_tokens_map.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": "[CLS]",
3
+ "mask_token": "[MASK]",
4
+ "pad_token": "[PAD]",
5
+ "sep_token": "[SEP]",
6
+ "unk_token": "[UNK]"
7
+ }
checkpoint-210/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-210/tokenizer_config.json ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "[UNK]",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "1": {
12
+ "content": "[SEP]",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "2": {
20
+ "content": "[PAD]",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "3": {
28
+ "content": "[CLS]",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "4": {
36
+ "content": "[MASK]",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ }
43
+ },
44
+ "clean_up_tokenization_spaces": true,
45
+ "cls_token": "[CLS]",
46
+ "do_basic_tokenize": true,
47
+ "do_lower_case": true,
48
+ "extra_special_tokens": {},
49
+ "full_tokenizer_file": null,
50
+ "mask_token": "[MASK]",
51
+ "model_max_length": 1000000000000000019884624838656,
52
+ "never_split": null,
53
+ "pad_token": "[PAD]",
54
+ "sep_token": "[SEP]",
55
+ "strip_accents": null,
56
+ "tokenize_chinese_chars": true,
57
+ "tokenizer_class": "BertTokenizer",
58
+ "unk_token": "[UNK]"
59
+ }
checkpoint-210/trainer_state.json ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_global_step": 210,
3
+ "best_metric": 0.9117647058823529,
4
+ "best_model_checkpoint": "outputs_v4\\cahya_bert-base-indonesian-1.5G\\checkpoint-210",
5
+ "epoch": 3.0,
6
+ "eval_steps": 500,
7
+ "global_step": 210,
8
+ "is_hyper_param_search": false,
9
+ "is_local_process_zero": true,
10
+ "is_world_process_zero": true,
11
+ "log_history": [
12
+ {
13
+ "epoch": 0.7142857142857143,
14
+ "grad_norm": 6.490436553955078,
15
+ "learning_rate": 1.7037037037037038e-05,
16
+ "loss": 0.5366,
17
+ "step": 50
18
+ },
19
+ {
20
+ "epoch": 1.0,
21
+ "eval_accuracy": 0.8623188405797102,
22
+ "eval_f1": 0.8689655172413793,
23
+ "eval_loss": 0.3315995931625366,
24
+ "eval_precision": 0.8289473684210527,
25
+ "eval_recall": 0.9130434782608695,
26
+ "eval_runtime": 0.4292,
27
+ "eval_samples_per_second": 321.522,
28
+ "eval_steps_per_second": 20.969,
29
+ "step": 70
30
+ },
31
+ {
32
+ "epoch": 1.4285714285714286,
33
+ "grad_norm": 9.707452774047852,
34
+ "learning_rate": 1.1851851851851852e-05,
35
+ "loss": 0.2742,
36
+ "step": 100
37
+ },
38
+ {
39
+ "epoch": 2.0,
40
+ "eval_accuracy": 0.8985507246376812,
41
+ "eval_f1": 0.8985507246376812,
42
+ "eval_loss": 0.30693408846855164,
43
+ "eval_precision": 0.8985507246376812,
44
+ "eval_recall": 0.8985507246376812,
45
+ "eval_runtime": 0.4149,
46
+ "eval_samples_per_second": 332.636,
47
+ "eval_steps_per_second": 21.694,
48
+ "step": 140
49
+ },
50
+ {
51
+ "epoch": 2.142857142857143,
52
+ "grad_norm": 7.906553745269775,
53
+ "learning_rate": 6.560846560846561e-06,
54
+ "loss": 0.1969,
55
+ "step": 150
56
+ },
57
+ {
58
+ "epoch": 2.857142857142857,
59
+ "grad_norm": 9.305294036865234,
60
+ "learning_rate": 1.26984126984127e-06,
61
+ "loss": 0.1634,
62
+ "step": 200
63
+ },
64
+ {
65
+ "epoch": 3.0,
66
+ "eval_accuracy": 0.9130434782608695,
67
+ "eval_f1": 0.9117647058823529,
68
+ "eval_loss": 0.32407820224761963,
69
+ "eval_precision": 0.9253731343283582,
70
+ "eval_recall": 0.8985507246376812,
71
+ "eval_runtime": 0.4315,
72
+ "eval_samples_per_second": 319.84,
73
+ "eval_steps_per_second": 20.859,
74
+ "step": 210
75
+ }
76
+ ],
77
+ "logging_steps": 50,
78
+ "max_steps": 210,
79
+ "num_input_tokens_seen": 0,
80
+ "num_train_epochs": 3,
81
+ "save_steps": 500,
82
+ "stateful_callbacks": {
83
+ "TrainerControl": {
84
+ "args": {
85
+ "should_epoch_stop": false,
86
+ "should_evaluate": false,
87
+ "should_log": false,
88
+ "should_save": true,
89
+ "should_training_stop": true
90
+ },
91
+ "attributes": {}
92
+ }
93
+ },
94
+ "total_flos": 125924848317540.0,
95
+ "train_batch_size": 16,
96
+ "trial_name": null,
97
+ "trial_params": null
98
+ }
checkpoint-210/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5e70f40efafeb3629dd76920ea354a3a40e9d4bf63e3b13018236f055ba76341
3
+ size 5432
checkpoint-210/vocab.txt ADDED
The diff for this file is too large to render. See raw diff
 
config.json CHANGED
@@ -1,12 +1,10 @@
1
  {
2
  "architectures": [
3
- "RobertaForSequenceClassification"
4
  ],
5
  "attention_probs_dropout_prob": 0.1,
6
- "bos_token_id": 0,
7
  "classifier_dropout": null,
8
  "dtype": "float32",
9
- "eos_token_id": 2,
10
  "gradient_checkpointing": false,
11
  "hidden_act": "gelu",
12
  "hidden_dropout_prob": 0.1,
@@ -21,17 +19,16 @@
21
  "negatif": 0,
22
  "positif": 1
23
  },
24
- "layer_norm_eps": 1e-05,
25
- "max_position_embeddings": 514,
26
- "model_type": "roberta",
27
  "num_attention_heads": 12,
28
  "num_hidden_layers": 12,
29
- "pad_token_id": 1,
30
  "position_embedding_type": "absolute",
31
  "problem_type": "single_label_classification",
32
- "total_flos": 1.1223329811709559e+19,
33
  "transformers_version": "4.56.2",
34
- "type_vocab_size": 1,
35
  "use_cache": true,
36
- "vocab_size": 52000
37
  }
 
1
  {
2
  "architectures": [
3
+ "BertForSequenceClassification"
4
  ],
5
  "attention_probs_dropout_prob": 0.1,
 
6
  "classifier_dropout": null,
7
  "dtype": "float32",
 
8
  "gradient_checkpointing": false,
9
  "hidden_act": "gelu",
10
  "hidden_dropout_prob": 0.1,
 
19
  "negatif": 0,
20
  "positif": 1
21
  },
22
+ "layer_norm_eps": 1e-12,
23
+ "max_position_embeddings": 512,
24
+ "model_type": "bert",
25
  "num_attention_heads": 12,
26
  "num_hidden_layers": 12,
27
+ "pad_token_id": 2,
28
  "position_embedding_type": "absolute",
29
  "problem_type": "single_label_classification",
 
30
  "transformers_version": "4.56.2",
31
+ "type_vocab_size": 2,
32
  "use_cache": true,
33
+ "vocab_size": 32000
34
  }
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d4a69c34e2369bfbf0ef6a13709c5a81584bcad6b322b30e255f0114db218264
3
- size 503942744
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:68fc257bd80f73e6ef8e7f837806ccfa1e745b5fb6084a9fce148874feb39e0a
3
+ size 442499064
special_tokens_map.json CHANGED
@@ -1,51 +1,7 @@
1
  {
2
- "bos_token": {
3
- "content": "<s>",
4
- "lstrip": false,
5
- "normalized": true,
6
- "rstrip": false,
7
- "single_word": false
8
- },
9
- "cls_token": {
10
- "content": "<s>",
11
- "lstrip": false,
12
- "normalized": true,
13
- "rstrip": false,
14
- "single_word": false
15
- },
16
- "eos_token": {
17
- "content": "</s>",
18
- "lstrip": false,
19
- "normalized": true,
20
- "rstrip": false,
21
- "single_word": false
22
- },
23
- "mask_token": {
24
- "content": "<mask>",
25
- "lstrip": true,
26
- "normalized": false,
27
- "rstrip": false,
28
- "single_word": false
29
- },
30
- "pad_token": {
31
- "content": "<pad>",
32
- "lstrip": false,
33
- "normalized": true,
34
- "rstrip": false,
35
- "single_word": false
36
- },
37
- "sep_token": {
38
- "content": "</s>",
39
- "lstrip": false,
40
- "normalized": true,
41
- "rstrip": false,
42
- "single_word": false
43
- },
44
- "unk_token": {
45
- "content": "<unk>",
46
- "lstrip": false,
47
- "normalized": true,
48
- "rstrip": false,
49
- "single_word": false
50
- }
51
  }
 
1
  {
2
+ "cls_token": "[CLS]",
3
+ "mask_token": "[MASK]",
4
+ "pad_token": "[PAD]",
5
+ "sep_token": "[SEP]",
6
+ "unk_token": "[UNK]"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
  }
tokenizer.json CHANGED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json CHANGED
@@ -1,63 +1,59 @@
1
  {
2
- "add_prefix_space": false,
3
  "added_tokens_decoder": {
4
  "0": {
5
- "content": "<s>",
6
  "lstrip": false,
7
- "normalized": true,
8
  "rstrip": false,
9
  "single_word": false,
10
  "special": true
11
  },
12
  "1": {
13
- "content": "<pad>",
14
  "lstrip": false,
15
- "normalized": true,
16
  "rstrip": false,
17
  "single_word": false,
18
  "special": true
19
  },
20
  "2": {
21
- "content": "</s>",
22
  "lstrip": false,
23
- "normalized": true,
24
  "rstrip": false,
25
  "single_word": false,
26
  "special": true
27
  },
28
  "3": {
29
- "content": "<unk>",
30
  "lstrip": false,
31
- "normalized": true,
32
  "rstrip": false,
33
  "single_word": false,
34
  "special": true
35
  },
36
  "4": {
37
- "content": "<mask>",
38
- "lstrip": true,
39
  "normalized": false,
40
  "rstrip": false,
41
  "single_word": false,
42
  "special": true
43
  }
44
  },
45
- "bos_token": "<s>",
46
- "clean_up_tokenization_spaces": false,
47
- "cls_token": "<s>",
48
- "eos_token": "</s>",
49
- "errors": "replace",
50
  "extra_special_tokens": {},
51
  "full_tokenizer_file": null,
52
- "mask_token": "<mask>",
53
- "max_length": 128,
54
  "model_max_length": 1000000000000000019884624838656,
55
- "pad_token": "<pad>",
56
- "sep_token": "</s>",
57
- "stride": 0,
58
- "tokenizer_class": "RobertaTokenizer",
59
- "trim_offsets": true,
60
- "truncation_side": "right",
61
- "truncation_strategy": "longest_first",
62
- "unk_token": "<unk>"
63
  }
 
1
  {
 
2
  "added_tokens_decoder": {
3
  "0": {
4
+ "content": "[UNK]",
5
  "lstrip": false,
6
+ "normalized": false,
7
  "rstrip": false,
8
  "single_word": false,
9
  "special": true
10
  },
11
  "1": {
12
+ "content": "[SEP]",
13
  "lstrip": false,
14
+ "normalized": false,
15
  "rstrip": false,
16
  "single_word": false,
17
  "special": true
18
  },
19
  "2": {
20
+ "content": "[PAD]",
21
  "lstrip": false,
22
+ "normalized": false,
23
  "rstrip": false,
24
  "single_word": false,
25
  "special": true
26
  },
27
  "3": {
28
+ "content": "[CLS]",
29
  "lstrip": false,
30
+ "normalized": false,
31
  "rstrip": false,
32
  "single_word": false,
33
  "special": true
34
  },
35
  "4": {
36
+ "content": "[MASK]",
37
+ "lstrip": false,
38
  "normalized": false,
39
  "rstrip": false,
40
  "single_word": false,
41
  "special": true
42
  }
43
  },
44
+ "clean_up_tokenization_spaces": true,
45
+ "cls_token": "[CLS]",
46
+ "do_basic_tokenize": true,
47
+ "do_lower_case": true,
 
48
  "extra_special_tokens": {},
49
  "full_tokenizer_file": null,
50
+ "mask_token": "[MASK]",
 
51
  "model_max_length": 1000000000000000019884624838656,
52
+ "never_split": null,
53
+ "pad_token": "[PAD]",
54
+ "sep_token": "[SEP]",
55
+ "strip_accents": null,
56
+ "tokenize_chinese_chars": true,
57
+ "tokenizer_class": "BertTokenizer",
58
+ "unk_token": "[UNK]"
 
59
  }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5e70f40efafeb3629dd76920ea354a3a40e9d4bf63e3b13018236f055ba76341
3
+ size 5432
vocab.txt ADDED
The diff for this file is too large to render. See raw diff