theonegareth commited on
Commit
2bb3243
·
1 Parent(s): 45fd788

Upload IndoHoaxDetector models: Logistic Regression, SVM, Random Forest, Naive Bayes, TF-IDF vectorizer, and fine-tuned IndoBERT

Browse files
Files changed (45) hide show
  1. indobert_model/config.json +33 -0
  2. indobert_model/logs/events.out.tfevents.1763960326.MSI.18903.0 +3 -0
  3. indobert_model/model.safetensors +3 -0
  4. indobert_model/special_tokens_map.json +7 -0
  5. indobert_model/tokenizer.json +0 -0
  6. indobert_model/tokenizer_config.json +58 -0
  7. indobert_model/training/checkpoint-3149/config.json +33 -0
  8. indobert_model/training/checkpoint-3149/model.safetensors +3 -0
  9. indobert_model/training/checkpoint-3149/optimizer.pt +3 -0
  10. indobert_model/training/checkpoint-3149/rng_state.pth +3 -0
  11. indobert_model/training/checkpoint-3149/scheduler.pt +3 -0
  12. indobert_model/training/checkpoint-3149/special_tokens_map.json +7 -0
  13. indobert_model/training/checkpoint-3149/tokenizer.json +0 -0
  14. indobert_model/training/checkpoint-3149/tokenizer_config.json +58 -0
  15. indobert_model/training/checkpoint-3149/trainer_state.json +85 -0
  16. indobert_model/training/checkpoint-3149/training_args.bin +3 -0
  17. indobert_model/training/checkpoint-3149/vocab.txt +0 -0
  18. indobert_model/training/checkpoint-6298/config.json +33 -0
  19. indobert_model/training/checkpoint-6298/model.safetensors +3 -0
  20. indobert_model/training/checkpoint-6298/optimizer.pt +3 -0
  21. indobert_model/training/checkpoint-6298/rng_state.pth +3 -0
  22. indobert_model/training/checkpoint-6298/scheduler.pt +3 -0
  23. indobert_model/training/checkpoint-6298/special_tokens_map.json +7 -0
  24. indobert_model/training/checkpoint-6298/tokenizer.json +0 -0
  25. indobert_model/training/checkpoint-6298/tokenizer_config.json +58 -0
  26. indobert_model/training/checkpoint-6298/trainer_state.json +136 -0
  27. indobert_model/training/checkpoint-6298/training_args.bin +3 -0
  28. indobert_model/training/checkpoint-6298/vocab.txt +0 -0
  29. indobert_model/training/checkpoint-9447/config.json +33 -0
  30. indobert_model/training/checkpoint-9447/model.safetensors +3 -0
  31. indobert_model/training/checkpoint-9447/optimizer.pt +3 -0
  32. indobert_model/training/checkpoint-9447/rng_state.pth +3 -0
  33. indobert_model/training/checkpoint-9447/scheduler.pt +3 -0
  34. indobert_model/training/checkpoint-9447/special_tokens_map.json +7 -0
  35. indobert_model/training/checkpoint-9447/tokenizer.json +0 -0
  36. indobert_model/training/checkpoint-9447/tokenizer_config.json +58 -0
  37. indobert_model/training/checkpoint-9447/trainer_state.json +187 -0
  38. indobert_model/training/checkpoint-9447/training_args.bin +3 -0
  39. indobert_model/training/checkpoint-9447/vocab.txt +0 -0
  40. indobert_model/training_args.bin +3 -0
  41. indobert_model/vocab.txt +0 -0
  42. nb_model.pkl +3 -0
  43. rf_model.pkl +3 -0
  44. svm_model.pkl +3 -0
  45. tfidf_vectorizer.pkl +3 -0
indobert_model/config.json ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_num_labels": 5,
3
+ "architectures": [
4
+ "BertForSequenceClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "classifier_dropout": null,
8
+ "directionality": "bidi",
9
+ "dtype": "float32",
10
+ "hidden_act": "gelu",
11
+ "hidden_dropout_prob": 0.1,
12
+ "hidden_size": 768,
13
+ "initializer_range": 0.02,
14
+ "intermediate_size": 3072,
15
+ "layer_norm_eps": 1e-12,
16
+ "max_position_embeddings": 512,
17
+ "model_type": "bert",
18
+ "num_attention_heads": 12,
19
+ "num_hidden_layers": 12,
20
+ "output_past": true,
21
+ "pad_token_id": 0,
22
+ "pooler_fc_size": 768,
23
+ "pooler_num_attention_heads": 12,
24
+ "pooler_num_fc_layers": 3,
25
+ "pooler_size_per_head": 128,
26
+ "pooler_type": "first_token_transform",
27
+ "position_embedding_type": "absolute",
28
+ "problem_type": "single_label_classification",
29
+ "transformers_version": "4.57.1",
30
+ "type_vocab_size": 2,
31
+ "use_cache": true,
32
+ "vocab_size": 50000
33
+ }
indobert_model/logs/events.out.tfevents.1763960326.MSI.18903.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:13d31c4f157768f65b29ea90da6f227967713bf6e63890456c16e99ed1b7b86a
3
+ size 10393
indobert_model/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:307ae084e201e8867cd13ff4962636aa99a7604d25122fdf9d7c732e975b0452
3
+ size 497795072
indobert_model/special_tokens_map.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": "[CLS]",
3
+ "mask_token": "[MASK]",
4
+ "pad_token": "[PAD]",
5
+ "sep_token": "[SEP]",
6
+ "unk_token": "[UNK]"
7
+ }
indobert_model/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
indobert_model/tokenizer_config.json ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "[PAD]",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "1": {
12
+ "content": "[UNK]",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "2": {
20
+ "content": "[CLS]",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "3": {
28
+ "content": "[SEP]",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "4": {
36
+ "content": "[MASK]",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ }
43
+ },
44
+ "clean_up_tokenization_spaces": true,
45
+ "cls_token": "[CLS]",
46
+ "do_basic_tokenize": true,
47
+ "do_lower_case": true,
48
+ "extra_special_tokens": {},
49
+ "mask_token": "[MASK]",
50
+ "model_max_length": 1000000000000000019884624838656,
51
+ "never_split": null,
52
+ "pad_token": "[PAD]",
53
+ "sep_token": "[SEP]",
54
+ "strip_accents": null,
55
+ "tokenize_chinese_chars": true,
56
+ "tokenizer_class": "BertTokenizer",
57
+ "unk_token": "[UNK]"
58
+ }
indobert_model/training/checkpoint-3149/config.json ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_num_labels": 5,
3
+ "architectures": [
4
+ "BertForSequenceClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "classifier_dropout": null,
8
+ "directionality": "bidi",
9
+ "dtype": "float32",
10
+ "hidden_act": "gelu",
11
+ "hidden_dropout_prob": 0.1,
12
+ "hidden_size": 768,
13
+ "initializer_range": 0.02,
14
+ "intermediate_size": 3072,
15
+ "layer_norm_eps": 1e-12,
16
+ "max_position_embeddings": 512,
17
+ "model_type": "bert",
18
+ "num_attention_heads": 12,
19
+ "num_hidden_layers": 12,
20
+ "output_past": true,
21
+ "pad_token_id": 0,
22
+ "pooler_fc_size": 768,
23
+ "pooler_num_attention_heads": 12,
24
+ "pooler_num_fc_layers": 3,
25
+ "pooler_size_per_head": 128,
26
+ "pooler_type": "first_token_transform",
27
+ "position_embedding_type": "absolute",
28
+ "problem_type": "single_label_classification",
29
+ "transformers_version": "4.57.1",
30
+ "type_vocab_size": 2,
31
+ "use_cache": true,
32
+ "vocab_size": 50000
33
+ }
indobert_model/training/checkpoint-3149/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c13f7fba081ce22d68a7764710bf6a6c7a328e3e1adca74e35fec810bb86c067
3
+ size 497795072
indobert_model/training/checkpoint-3149/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7ba43eaa26ab43815e4dca4d2095519467bf2bb47e29529f3c21c8faf9c82204
3
+ size 995714443
indobert_model/training/checkpoint-3149/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:15b6fd0ef06d2af26e25d6e95586490215b372cfac6bdba987fdcc8cca9f89d2
3
+ size 14645
indobert_model/training/checkpoint-3149/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9a692fdba2188743a1f0b8150a1afef797a26e1c622d4b613bb30cdba4de6f4c
3
+ size 1465
indobert_model/training/checkpoint-3149/special_tokens_map.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": "[CLS]",
3
+ "mask_token": "[MASK]",
4
+ "pad_token": "[PAD]",
5
+ "sep_token": "[SEP]",
6
+ "unk_token": "[UNK]"
7
+ }
indobert_model/training/checkpoint-3149/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
indobert_model/training/checkpoint-3149/tokenizer_config.json ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "[PAD]",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "1": {
12
+ "content": "[UNK]",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "2": {
20
+ "content": "[CLS]",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "3": {
28
+ "content": "[SEP]",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "4": {
36
+ "content": "[MASK]",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ }
43
+ },
44
+ "clean_up_tokenization_spaces": true,
45
+ "cls_token": "[CLS]",
46
+ "do_basic_tokenize": true,
47
+ "do_lower_case": true,
48
+ "extra_special_tokens": {},
49
+ "mask_token": "[MASK]",
50
+ "model_max_length": 1000000000000000019884624838656,
51
+ "never_split": null,
52
+ "pad_token": "[PAD]",
53
+ "sep_token": "[SEP]",
54
+ "strip_accents": null,
55
+ "tokenize_chinese_chars": true,
56
+ "tokenizer_class": "BertTokenizer",
57
+ "unk_token": "[UNK]"
58
+ }
indobert_model/training/checkpoint-3149/trainer_state.json ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_global_step": 3149,
3
+ "best_metric": 0.9922191345772132,
4
+ "best_model_checkpoint": "/home/user/Machine Learning/IndoHoaxDetector/indobert_model/training/checkpoint-3149",
5
+ "epoch": 1.0,
6
+ "eval_steps": 500,
7
+ "global_step": 3149,
8
+ "is_hyper_param_search": false,
9
+ "is_local_process_zero": true,
10
+ "is_world_process_zero": true,
11
+ "log_history": [
12
+ {
13
+ "epoch": 0.15878056525881232,
14
+ "grad_norm": 6.066575527191162,
15
+ "learning_rate": 1.8943579972478037e-05,
16
+ "loss": 0.1413,
17
+ "step": 500
18
+ },
19
+ {
20
+ "epoch": 0.31756113051762463,
21
+ "grad_norm": 2.5585596561431885,
22
+ "learning_rate": 1.788504287075262e-05,
23
+ "loss": 0.079,
24
+ "step": 1000
25
+ },
26
+ {
27
+ "epoch": 0.476341695776437,
28
+ "grad_norm": 6.151716232299805,
29
+ "learning_rate": 1.6826505769027206e-05,
30
+ "loss": 0.0669,
31
+ "step": 1500
32
+ },
33
+ {
34
+ "epoch": 0.6351222610352493,
35
+ "grad_norm": 0.012287363409996033,
36
+ "learning_rate": 1.576796866730179e-05,
37
+ "loss": 0.0569,
38
+ "step": 2000
39
+ },
40
+ {
41
+ "epoch": 0.7939028262940616,
42
+ "grad_norm": 0.09763805568218231,
43
+ "learning_rate": 1.4709431565576376e-05,
44
+ "loss": 0.0528,
45
+ "step": 2500
46
+ },
47
+ {
48
+ "epoch": 0.952683391552874,
49
+ "grad_norm": 5.14717435836792,
50
+ "learning_rate": 1.3650894463850958e-05,
51
+ "loss": 0.0486,
52
+ "step": 3000
53
+ },
54
+ {
55
+ "epoch": 1.0,
56
+ "eval_accuracy": 0.9922191345772132,
57
+ "eval_loss": 0.03707018122076988,
58
+ "eval_runtime": 83.2394,
59
+ "eval_samples_per_second": 151.311,
60
+ "eval_steps_per_second": 9.467,
61
+ "step": 3149
62
+ }
63
+ ],
64
+ "logging_steps": 500,
65
+ "max_steps": 9447,
66
+ "num_input_tokens_seen": 0,
67
+ "num_train_epochs": 3,
68
+ "save_steps": 500,
69
+ "stateful_callbacks": {
70
+ "TrainerControl": {
71
+ "args": {
72
+ "should_epoch_stop": false,
73
+ "should_evaluate": false,
74
+ "should_log": false,
75
+ "should_save": true,
76
+ "should_training_stop": false
77
+ },
78
+ "attributes": {}
79
+ }
80
+ },
81
+ "total_flos": 3313686408967680.0,
82
+ "train_batch_size": 16,
83
+ "trial_name": null,
84
+ "trial_params": null
85
+ }
indobert_model/training/checkpoint-3149/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6bf0c0a22bf8643019429801930b8f01cdf82327a398b9e59c1324ef729e2b13
3
+ size 5905
indobert_model/training/checkpoint-3149/vocab.txt ADDED
The diff for this file is too large to render. See raw diff
 
indobert_model/training/checkpoint-6298/config.json ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_num_labels": 5,
3
+ "architectures": [
4
+ "BertForSequenceClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "classifier_dropout": null,
8
+ "directionality": "bidi",
9
+ "dtype": "float32",
10
+ "hidden_act": "gelu",
11
+ "hidden_dropout_prob": 0.1,
12
+ "hidden_size": 768,
13
+ "initializer_range": 0.02,
14
+ "intermediate_size": 3072,
15
+ "layer_norm_eps": 1e-12,
16
+ "max_position_embeddings": 512,
17
+ "model_type": "bert",
18
+ "num_attention_heads": 12,
19
+ "num_hidden_layers": 12,
20
+ "output_past": true,
21
+ "pad_token_id": 0,
22
+ "pooler_fc_size": 768,
23
+ "pooler_num_attention_heads": 12,
24
+ "pooler_num_fc_layers": 3,
25
+ "pooler_size_per_head": 128,
26
+ "pooler_type": "first_token_transform",
27
+ "position_embedding_type": "absolute",
28
+ "problem_type": "single_label_classification",
29
+ "transformers_version": "4.57.1",
30
+ "type_vocab_size": 2,
31
+ "use_cache": true,
32
+ "vocab_size": 50000
33
+ }
indobert_model/training/checkpoint-6298/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:be74eb579e30894b49ed55edf4c95de3196873a9bfa5b36bcee44c9dbd70cff2
3
+ size 497795072
indobert_model/training/checkpoint-6298/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f712c1deabd1dd3687723f696eb73afdb9ae230f38489c8ef20f1841a0aef42e
3
+ size 995714443
indobert_model/training/checkpoint-6298/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:89d203a89830c5846a8f2be717f1889093bb9690135b7469beb281351a8a4dfa
3
+ size 14645
indobert_model/training/checkpoint-6298/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:48c004f15fd5ee7a2922596c91957017c15a22e51ff1e3e5f451ee212dab0f2b
3
+ size 1465
indobert_model/training/checkpoint-6298/special_tokens_map.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": "[CLS]",
3
+ "mask_token": "[MASK]",
4
+ "pad_token": "[PAD]",
5
+ "sep_token": "[SEP]",
6
+ "unk_token": "[UNK]"
7
+ }
indobert_model/training/checkpoint-6298/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
indobert_model/training/checkpoint-6298/tokenizer_config.json ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "[PAD]",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "1": {
12
+ "content": "[UNK]",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "2": {
20
+ "content": "[CLS]",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "3": {
28
+ "content": "[SEP]",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "4": {
36
+ "content": "[MASK]",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ }
43
+ },
44
+ "clean_up_tokenization_spaces": true,
45
+ "cls_token": "[CLS]",
46
+ "do_basic_tokenize": true,
47
+ "do_lower_case": true,
48
+ "extra_special_tokens": {},
49
+ "mask_token": "[MASK]",
50
+ "model_max_length": 1000000000000000019884624838656,
51
+ "never_split": null,
52
+ "pad_token": "[PAD]",
53
+ "sep_token": "[SEP]",
54
+ "strip_accents": null,
55
+ "tokenize_chinese_chars": true,
56
+ "tokenizer_class": "BertTokenizer",
57
+ "unk_token": "[UNK]"
58
+ }
indobert_model/training/checkpoint-6298/trainer_state.json ADDED
@@ -0,0 +1,136 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_global_step": 6298,
3
+ "best_metric": 0.993092497022628,
4
+ "best_model_checkpoint": "/home/user/Machine Learning/IndoHoaxDetector/indobert_model/training/checkpoint-6298",
5
+ "epoch": 2.0,
6
+ "eval_steps": 500,
7
+ "global_step": 6298,
8
+ "is_hyper_param_search": false,
9
+ "is_local_process_zero": true,
10
+ "is_world_process_zero": true,
11
+ "log_history": [
12
+ {
13
+ "epoch": 0.15878056525881232,
14
+ "grad_norm": 6.066575527191162,
15
+ "learning_rate": 1.8943579972478037e-05,
16
+ "loss": 0.1413,
17
+ "step": 500
18
+ },
19
+ {
20
+ "epoch": 0.31756113051762463,
21
+ "grad_norm": 2.5585596561431885,
22
+ "learning_rate": 1.788504287075262e-05,
23
+ "loss": 0.079,
24
+ "step": 1000
25
+ },
26
+ {
27
+ "epoch": 0.476341695776437,
28
+ "grad_norm": 6.151716232299805,
29
+ "learning_rate": 1.6826505769027206e-05,
30
+ "loss": 0.0669,
31
+ "step": 1500
32
+ },
33
+ {
34
+ "epoch": 0.6351222610352493,
35
+ "grad_norm": 0.012287363409996033,
36
+ "learning_rate": 1.576796866730179e-05,
37
+ "loss": 0.0569,
38
+ "step": 2000
39
+ },
40
+ {
41
+ "epoch": 0.7939028262940616,
42
+ "grad_norm": 0.09763805568218231,
43
+ "learning_rate": 1.4709431565576376e-05,
44
+ "loss": 0.0528,
45
+ "step": 2500
46
+ },
47
+ {
48
+ "epoch": 0.952683391552874,
49
+ "grad_norm": 5.14717435836792,
50
+ "learning_rate": 1.3650894463850958e-05,
51
+ "loss": 0.0486,
52
+ "step": 3000
53
+ },
54
+ {
55
+ "epoch": 1.0,
56
+ "eval_accuracy": 0.9922191345772132,
57
+ "eval_loss": 0.03707018122076988,
58
+ "eval_runtime": 83.2394,
59
+ "eval_samples_per_second": 151.311,
60
+ "eval_steps_per_second": 9.467,
61
+ "step": 3149
62
+ },
63
+ {
64
+ "epoch": 1.1114639568116862,
65
+ "grad_norm": 0.0047888318076729774,
66
+ "learning_rate": 1.2592357362125544e-05,
67
+ "loss": 0.0229,
68
+ "step": 3500
69
+ },
70
+ {
71
+ "epoch": 1.2702445220704985,
72
+ "grad_norm": 0.043374646455049515,
73
+ "learning_rate": 1.1533820260400128e-05,
74
+ "loss": 0.019,
75
+ "step": 4000
76
+ },
77
+ {
78
+ "epoch": 1.4290250873293109,
79
+ "grad_norm": 0.03199789300560951,
80
+ "learning_rate": 1.0475283158674712e-05,
81
+ "loss": 0.0191,
82
+ "step": 4500
83
+ },
84
+ {
85
+ "epoch": 1.5878056525881232,
86
+ "grad_norm": 0.016201680526137352,
87
+ "learning_rate": 9.416746056949296e-06,
88
+ "loss": 0.0247,
89
+ "step": 5000
90
+ },
91
+ {
92
+ "epoch": 1.7465862178469356,
93
+ "grad_norm": 0.003989357966929674,
94
+ "learning_rate": 8.35820895522388e-06,
95
+ "loss": 0.021,
96
+ "step": 5500
97
+ },
98
+ {
99
+ "epoch": 1.9053667831057477,
100
+ "grad_norm": 0.013209746219217777,
101
+ "learning_rate": 7.299671853498465e-06,
102
+ "loss": 0.0171,
103
+ "step": 6000
104
+ },
105
+ {
106
+ "epoch": 2.0,
107
+ "eval_accuracy": 0.993092497022628,
108
+ "eval_loss": 0.033595748245716095,
109
+ "eval_runtime": 83.3444,
110
+ "eval_samples_per_second": 151.12,
111
+ "eval_steps_per_second": 9.455,
112
+ "step": 6298
113
+ }
114
+ ],
115
+ "logging_steps": 500,
116
+ "max_steps": 9447,
117
+ "num_input_tokens_seen": 0,
118
+ "num_train_epochs": 3,
119
+ "save_steps": 500,
120
+ "stateful_callbacks": {
121
+ "TrainerControl": {
122
+ "args": {
123
+ "should_epoch_stop": false,
124
+ "should_evaluate": false,
125
+ "should_log": false,
126
+ "should_save": true,
127
+ "should_training_stop": false
128
+ },
129
+ "attributes": {}
130
+ }
131
+ },
132
+ "total_flos": 6627372817935360.0,
133
+ "train_batch_size": 16,
134
+ "trial_name": null,
135
+ "trial_params": null
136
+ }
indobert_model/training/checkpoint-6298/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6bf0c0a22bf8643019429801930b8f01cdf82327a398b9e59c1324ef729e2b13
3
+ size 5905
indobert_model/training/checkpoint-6298/vocab.txt ADDED
The diff for this file is too large to render. See raw diff
 
indobert_model/training/checkpoint-9447/config.json ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_num_labels": 5,
3
+ "architectures": [
4
+ "BertForSequenceClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "classifier_dropout": null,
8
+ "directionality": "bidi",
9
+ "dtype": "float32",
10
+ "hidden_act": "gelu",
11
+ "hidden_dropout_prob": 0.1,
12
+ "hidden_size": 768,
13
+ "initializer_range": 0.02,
14
+ "intermediate_size": 3072,
15
+ "layer_norm_eps": 1e-12,
16
+ "max_position_embeddings": 512,
17
+ "model_type": "bert",
18
+ "num_attention_heads": 12,
19
+ "num_hidden_layers": 12,
20
+ "output_past": true,
21
+ "pad_token_id": 0,
22
+ "pooler_fc_size": 768,
23
+ "pooler_num_attention_heads": 12,
24
+ "pooler_num_fc_layers": 3,
25
+ "pooler_size_per_head": 128,
26
+ "pooler_type": "first_token_transform",
27
+ "position_embedding_type": "absolute",
28
+ "problem_type": "single_label_classification",
29
+ "transformers_version": "4.57.1",
30
+ "type_vocab_size": 2,
31
+ "use_cache": true,
32
+ "vocab_size": 50000
33
+ }
indobert_model/training/checkpoint-9447/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:307ae084e201e8867cd13ff4962636aa99a7604d25122fdf9d7c732e975b0452
3
+ size 497795072
indobert_model/training/checkpoint-9447/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:04e0c904d346a06476020de43f96c94bee971931520a69e5e4d2806d60817b09
3
+ size 995714443
indobert_model/training/checkpoint-9447/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8ab24f3da118b50afdf186f60aa31ba4e4512fdcf26c8fd14c3eabf53aeec8b9
3
+ size 14645
indobert_model/training/checkpoint-9447/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2792ca39051534f3ef5a5f1445de1c961ae11679dc2c461c0a424ad030d41e70
3
+ size 1465
indobert_model/training/checkpoint-9447/special_tokens_map.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": "[CLS]",
3
+ "mask_token": "[MASK]",
4
+ "pad_token": "[PAD]",
5
+ "sep_token": "[SEP]",
6
+ "unk_token": "[UNK]"
7
+ }
indobert_model/training/checkpoint-9447/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
indobert_model/training/checkpoint-9447/tokenizer_config.json ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "[PAD]",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "1": {
12
+ "content": "[UNK]",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "2": {
20
+ "content": "[CLS]",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "3": {
28
+ "content": "[SEP]",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "4": {
36
+ "content": "[MASK]",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ }
43
+ },
44
+ "clean_up_tokenization_spaces": true,
45
+ "cls_token": "[CLS]",
46
+ "do_basic_tokenize": true,
47
+ "do_lower_case": true,
48
+ "extra_special_tokens": {},
49
+ "mask_token": "[MASK]",
50
+ "model_max_length": 1000000000000000019884624838656,
51
+ "never_split": null,
52
+ "pad_token": "[PAD]",
53
+ "sep_token": "[SEP]",
54
+ "strip_accents": null,
55
+ "tokenize_chinese_chars": true,
56
+ "tokenizer_class": "BertTokenizer",
57
+ "unk_token": "[UNK]"
58
+ }
indobert_model/training/checkpoint-9447/trainer_state.json ADDED
@@ -0,0 +1,187 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_global_step": 9447,
3
+ "best_metric": 0.9938070662961492,
4
+ "best_model_checkpoint": "/home/user/Machine Learning/IndoHoaxDetector/indobert_model/training/checkpoint-9447",
5
+ "epoch": 3.0,
6
+ "eval_steps": 500,
7
+ "global_step": 9447,
8
+ "is_hyper_param_search": false,
9
+ "is_local_process_zero": true,
10
+ "is_world_process_zero": true,
11
+ "log_history": [
12
+ {
13
+ "epoch": 0.15878056525881232,
14
+ "grad_norm": 6.066575527191162,
15
+ "learning_rate": 1.8943579972478037e-05,
16
+ "loss": 0.1413,
17
+ "step": 500
18
+ },
19
+ {
20
+ "epoch": 0.31756113051762463,
21
+ "grad_norm": 2.5585596561431885,
22
+ "learning_rate": 1.788504287075262e-05,
23
+ "loss": 0.079,
24
+ "step": 1000
25
+ },
26
+ {
27
+ "epoch": 0.476341695776437,
28
+ "grad_norm": 6.151716232299805,
29
+ "learning_rate": 1.6826505769027206e-05,
30
+ "loss": 0.0669,
31
+ "step": 1500
32
+ },
33
+ {
34
+ "epoch": 0.6351222610352493,
35
+ "grad_norm": 0.012287363409996033,
36
+ "learning_rate": 1.576796866730179e-05,
37
+ "loss": 0.0569,
38
+ "step": 2000
39
+ },
40
+ {
41
+ "epoch": 0.7939028262940616,
42
+ "grad_norm": 0.09763805568218231,
43
+ "learning_rate": 1.4709431565576376e-05,
44
+ "loss": 0.0528,
45
+ "step": 2500
46
+ },
47
+ {
48
+ "epoch": 0.952683391552874,
49
+ "grad_norm": 5.14717435836792,
50
+ "learning_rate": 1.3650894463850958e-05,
51
+ "loss": 0.0486,
52
+ "step": 3000
53
+ },
54
+ {
55
+ "epoch": 1.0,
56
+ "eval_accuracy": 0.9922191345772132,
57
+ "eval_loss": 0.03707018122076988,
58
+ "eval_runtime": 83.2394,
59
+ "eval_samples_per_second": 151.311,
60
+ "eval_steps_per_second": 9.467,
61
+ "step": 3149
62
+ },
63
+ {
64
+ "epoch": 1.1114639568116862,
65
+ "grad_norm": 0.0047888318076729774,
66
+ "learning_rate": 1.2592357362125544e-05,
67
+ "loss": 0.0229,
68
+ "step": 3500
69
+ },
70
+ {
71
+ "epoch": 1.2702445220704985,
72
+ "grad_norm": 0.043374646455049515,
73
+ "learning_rate": 1.1533820260400128e-05,
74
+ "loss": 0.019,
75
+ "step": 4000
76
+ },
77
+ {
78
+ "epoch": 1.4290250873293109,
79
+ "grad_norm": 0.03199789300560951,
80
+ "learning_rate": 1.0475283158674712e-05,
81
+ "loss": 0.0191,
82
+ "step": 4500
83
+ },
84
+ {
85
+ "epoch": 1.5878056525881232,
86
+ "grad_norm": 0.016201680526137352,
87
+ "learning_rate": 9.416746056949296e-06,
88
+ "loss": 0.0247,
89
+ "step": 5000
90
+ },
91
+ {
92
+ "epoch": 1.7465862178469356,
93
+ "grad_norm": 0.003989357966929674,
94
+ "learning_rate": 8.35820895522388e-06,
95
+ "loss": 0.021,
96
+ "step": 5500
97
+ },
98
+ {
99
+ "epoch": 1.9053667831057477,
100
+ "grad_norm": 0.013209746219217777,
101
+ "learning_rate": 7.299671853498465e-06,
102
+ "loss": 0.0171,
103
+ "step": 6000
104
+ },
105
+ {
106
+ "epoch": 2.0,
107
+ "eval_accuracy": 0.993092497022628,
108
+ "eval_loss": 0.033595748245716095,
109
+ "eval_runtime": 83.3444,
110
+ "eval_samples_per_second": 151.12,
111
+ "eval_steps_per_second": 9.455,
112
+ "step": 6298
113
+ },
114
+ {
115
+ "epoch": 2.06414734836456,
116
+ "grad_norm": 0.0014694147976115346,
117
+ "learning_rate": 6.24113475177305e-06,
118
+ "loss": 0.0149,
119
+ "step": 6500
120
+ },
121
+ {
122
+ "epoch": 2.2229279136233724,
123
+ "grad_norm": 0.0010297272820025682,
124
+ "learning_rate": 5.182597650047635e-06,
125
+ "loss": 0.0042,
126
+ "step": 7000
127
+ },
128
+ {
129
+ "epoch": 2.3817084788821847,
130
+ "grad_norm": 0.0013135488843545318,
131
+ "learning_rate": 4.124060548322219e-06,
132
+ "loss": 0.0052,
133
+ "step": 7500
134
+ },
135
+ {
136
+ "epoch": 2.540489044140997,
137
+ "grad_norm": 0.0010974216274917126,
138
+ "learning_rate": 3.0655234465968036e-06,
139
+ "loss": 0.0059,
140
+ "step": 8000
141
+ },
142
+ {
143
+ "epoch": 2.6992696093998094,
144
+ "grad_norm": 0.0037105802912265062,
145
+ "learning_rate": 2.0069863448713877e-06,
146
+ "loss": 0.003,
147
+ "step": 8500
148
+ },
149
+ {
150
+ "epoch": 2.8580501746586218,
151
+ "grad_norm": 0.002627098932862282,
152
+ "learning_rate": 9.484492431459723e-07,
153
+ "loss": 0.0033,
154
+ "step": 9000
155
+ },
156
+ {
157
+ "epoch": 3.0,
158
+ "eval_accuracy": 0.9938070662961492,
159
+ "eval_loss": 0.03967500478029251,
160
+ "eval_runtime": 83.2653,
161
+ "eval_samples_per_second": 151.264,
162
+ "eval_steps_per_second": 9.464,
163
+ "step": 9447
164
+ }
165
+ ],
166
+ "logging_steps": 500,
167
+ "max_steps": 9447,
168
+ "num_input_tokens_seen": 0,
169
+ "num_train_epochs": 3,
170
+ "save_steps": 500,
171
+ "stateful_callbacks": {
172
+ "TrainerControl": {
173
+ "args": {
174
+ "should_epoch_stop": false,
175
+ "should_evaluate": false,
176
+ "should_log": false,
177
+ "should_save": true,
178
+ "should_training_stop": true
179
+ },
180
+ "attributes": {}
181
+ }
182
+ },
183
+ "total_flos": 9941059226903040.0,
184
+ "train_batch_size": 16,
185
+ "trial_name": null,
186
+ "trial_params": null
187
+ }
indobert_model/training/checkpoint-9447/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6bf0c0a22bf8643019429801930b8f01cdf82327a398b9e59c1324ef729e2b13
3
+ size 5905
indobert_model/training/checkpoint-9447/vocab.txt ADDED
The diff for this file is too large to render. See raw diff
 
indobert_model/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6bf0c0a22bf8643019429801930b8f01cdf82327a398b9e59c1324ef729e2b13
3
+ size 5905
indobert_model/vocab.txt ADDED
The diff for this file is too large to render. See raw diff
 
nb_model.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f8e3a4506ecd3d8bf8fc85863aeb2235b5a0753ad5e92046c263179dec5daed9
3
+ size 640791
rf_model.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:957b646ffb4cbe7a9e74d02aa68725ea5694ae5a753f1e8c72bd17602c6f2cf7
3
+ size 96397897
svm_model.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6b1690d0514def4aa700bda76fb0ec5feca9ce142a1fe9d1ddbce201fd77a2f8
3
+ size 160731
tfidf_vectorizer.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8ad348a23d8565565a5ee6f06d49b3408083aa05e4ff6bfcca26597ef328acc0
3
+ size 733701