marmarg2 commited on
Commit
6469636
·
1 Parent(s): abdc95c

Upload 34 files

Browse files
Files changed (33) hide show
  1. BERT-mULT-t-MMG/README.md +63 -0
  2. BERT-mULT-t-MMG/checkpoint-102/config.json +32 -0
  3. BERT-mULT-t-MMG/checkpoint-102/optimizer.pt +3 -0
  4. BERT-mULT-t-MMG/checkpoint-102/pytorch_model.bin +3 -0
  5. BERT-mULT-t-MMG/checkpoint-102/rng_state.pth +3 -0
  6. BERT-mULT-t-MMG/checkpoint-102/scheduler.pt +3 -0
  7. BERT-mULT-t-MMG/checkpoint-102/trainer_state.json +43 -0
  8. BERT-mULT-t-MMG/checkpoint-102/training_args.bin +3 -0
  9. BERT-mULT-t-MMG/checkpoint-136/config.json +32 -0
  10. BERT-mULT-t-MMG/checkpoint-136/optimizer.pt +3 -0
  11. BERT-mULT-t-MMG/checkpoint-136/pytorch_model.bin +3 -0
  12. BERT-mULT-t-MMG/checkpoint-136/rng_state.pth +3 -0
  13. BERT-mULT-t-MMG/checkpoint-136/scheduler.pt +3 -0
  14. BERT-mULT-t-MMG/checkpoint-136/trainer_state.json +52 -0
  15. BERT-mULT-t-MMG/checkpoint-136/training_args.bin +3 -0
  16. BERT-mULT-t-MMG/checkpoint-34/optimizer.pt +3 -0
  17. BERT-mULT-t-MMG/checkpoint-34/pytorch_model.bin +3 -0
  18. BERT-mULT-t-MMG/checkpoint-34/rng_state.pth +3 -0
  19. BERT-mULT-t-MMG/checkpoint-34/scheduler.pt +3 -0
  20. BERT-mULT-t-MMG/checkpoint-34/trainer_state.json +25 -0
  21. BERT-mULT-t-MMG/checkpoint-34/training_args.bin +3 -0
  22. BERT-mULT-t-MMG/checkpoint-68/config.json +32 -0
  23. BERT-mULT-t-MMG/checkpoint-68/optimizer.pt +3 -0
  24. BERT-mULT-t-MMG/checkpoint-68/pytorch_model.bin +3 -0
  25. BERT-mULT-t-MMG/checkpoint-68/rng_state.pth +3 -0
  26. BERT-mULT-t-MMG/checkpoint-68/scheduler.pt +3 -0
  27. BERT-mULT-t-MMG/checkpoint-68/trainer_state.json +34 -0
  28. BERT-mULT-t-MMG/checkpoint-68/training_args.bin +3 -0
  29. BERT-mULT-t-MMG/config.json +32 -0
  30. BERT-mULT-t-MMG/pytorch_model.bin +3 -0
  31. BERT-mULT-t-MMG/runs/Aug10_14-39-26_practicas/events.out.tfevents.1691671191.practicas.2817540.0 +3 -0
  32. BERT-mULT-t-MMG/runs/Aug10_14-39-26_practicas/events.out.tfevents.1691671338.practicas.2817540.1 +3 -0
  33. BERT-mULT-t-MMG/training_args.bin +3 -0
BERT-mULT-t-MMG/README.md ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ base_model: bert-base-multilingual-cased
4
+ tags:
5
+ - generated_from_trainer
6
+ metrics:
7
+ - accuracy
8
+ model-index:
9
+ - name: BERT-mULT-t-MMG
10
+ results: []
11
+ ---
12
+
13
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
14
+ should probably proofread and complete it, then remove this comment. -->
15
+
16
+ # BERT-mULT-t-MMG
17
+
18
+ This model is a fine-tuned version of [bert-base-multilingual-cased](https://huggingface.co/bert-base-multilingual-cased) on an unknown dataset.
19
+ It achieves the following results on the evaluation set:
20
+ - Loss: 0.6791
21
+ - Accuracy: 0.6515
22
+
23
+ ## Model description
24
+
25
+ More information needed
26
+
27
+ ## Intended uses & limitations
28
+
29
+ More information needed
30
+
31
+ ## Training and evaluation data
32
+
33
+ More information needed
34
+
35
+ ## Training procedure
36
+
37
+ ### Training hyperparameters
38
+
39
+ The following hyperparameters were used during training:
40
+ - learning_rate: 5e-05
41
+ - train_batch_size: 8
42
+ - eval_batch_size: 8
43
+ - seed: 42
44
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
45
+ - lr_scheduler_type: linear
46
+ - num_epochs: 10
47
+
48
+ ### Training results
49
+
50
+ | Training Loss | Epoch | Step | Validation Loss | Accuracy |
51
+ |:-------------:|:-----:|:----:|:---------------:|:--------:|
52
+ | No log | 1.0 | 34 | 0.6893 | 0.5303 |
53
+ | No log | 2.0 | 68 | 0.6791 | 0.6515 |
54
+ | No log | 3.0 | 102 | 0.7651 | 0.5303 |
55
+ | No log | 4.0 | 136 | 0.6895 | 0.5758 |
56
+
57
+
58
+ ### Framework versions
59
+
60
+ - Transformers 4.31.0
61
+ - Pytorch 2.0.1+cu117
62
+ - Datasets 2.14.0
63
+ - Tokenizers 0.13.3
BERT-mULT-t-MMG/checkpoint-102/config.json ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "bert-base-multilingual-cased",
3
+ "architectures": [
4
+ "BertForSequenceClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "classifier_dropout": null,
8
+ "directionality": "bidi",
9
+ "hidden_act": "gelu",
10
+ "hidden_dropout_prob": 0.1,
11
+ "hidden_size": 768,
12
+ "initializer_range": 0.02,
13
+ "intermediate_size": 3072,
14
+ "layer_norm_eps": 1e-12,
15
+ "max_position_embeddings": 512,
16
+ "model_type": "bert",
17
+ "num_attention_heads": 12,
18
+ "num_hidden_layers": 12,
19
+ "pad_token_id": 0,
20
+ "pooler_fc_size": 768,
21
+ "pooler_num_attention_heads": 12,
22
+ "pooler_num_fc_layers": 3,
23
+ "pooler_size_per_head": 128,
24
+ "pooler_type": "first_token_transform",
25
+ "position_embedding_type": "absolute",
26
+ "problem_type": "single_label_classification",
27
+ "torch_dtype": "float32",
28
+ "transformers_version": "4.31.0",
29
+ "type_vocab_size": 2,
30
+ "use_cache": true,
31
+ "vocab_size": 119547
32
+ }
BERT-mULT-t-MMG/checkpoint-102/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bdee84e6180e08a3d772a6220f38718934fe49a519228fdb1e1f74fec1732ef9
3
+ size 1422957573
BERT-mULT-t-MMG/checkpoint-102/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6ff36a0c4155d1ce58269a8675a2541dbe605b362f4e29ace3c8ed0d96cbc3bb
3
+ size 711488305
BERT-mULT-t-MMG/checkpoint-102/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8166e4672a330b2152739645b7617bc8d816f164b0c753c34fdd50ed3c40b0ce
3
+ size 14575
BERT-mULT-t-MMG/checkpoint-102/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:90fa10e0cfd5b10317e4c2bf4786b790892ca6387b6cf42d60555e376f66d3dd
3
+ size 627
BERT-mULT-t-MMG/checkpoint-102/trainer_state.json ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.6790980100631714,
3
+ "best_model_checkpoint": "BERT-mULT-t-MMG/checkpoint-68",
4
+ "epoch": 3.0,
5
+ "global_step": 102,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 1.0,
12
+ "eval_accuracy": 0.5303030303030303,
13
+ "eval_loss": 0.6892990469932556,
14
+ "eval_runtime": 2.4367,
15
+ "eval_samples_per_second": 27.086,
16
+ "eval_steps_per_second": 3.693,
17
+ "step": 34
18
+ },
19
+ {
20
+ "epoch": 2.0,
21
+ "eval_accuracy": 0.6515151515151515,
22
+ "eval_loss": 0.6790980100631714,
23
+ "eval_runtime": 2.4536,
24
+ "eval_samples_per_second": 26.899,
25
+ "eval_steps_per_second": 3.668,
26
+ "step": 68
27
+ },
28
+ {
29
+ "epoch": 3.0,
30
+ "eval_accuracy": 0.5303030303030303,
31
+ "eval_loss": 0.7650681734085083,
32
+ "eval_runtime": 2.4451,
33
+ "eval_samples_per_second": 26.993,
34
+ "eval_steps_per_second": 3.681,
35
+ "step": 102
36
+ }
37
+ ],
38
+ "max_steps": 340,
39
+ "num_train_epochs": 10,
40
+ "total_flos": 211541288509440.0,
41
+ "trial_name": null,
42
+ "trial_params": null
43
+ }
BERT-mULT-t-MMG/checkpoint-102/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fdbe8be9883b673d6afa80be7ca632d77e0b2363bb941b5cb884746b73b819ab
3
+ size 3963
BERT-mULT-t-MMG/checkpoint-136/config.json ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "bert-base-multilingual-cased",
3
+ "architectures": [
4
+ "BertForSequenceClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "classifier_dropout": null,
8
+ "directionality": "bidi",
9
+ "hidden_act": "gelu",
10
+ "hidden_dropout_prob": 0.1,
11
+ "hidden_size": 768,
12
+ "initializer_range": 0.02,
13
+ "intermediate_size": 3072,
14
+ "layer_norm_eps": 1e-12,
15
+ "max_position_embeddings": 512,
16
+ "model_type": "bert",
17
+ "num_attention_heads": 12,
18
+ "num_hidden_layers": 12,
19
+ "pad_token_id": 0,
20
+ "pooler_fc_size": 768,
21
+ "pooler_num_attention_heads": 12,
22
+ "pooler_num_fc_layers": 3,
23
+ "pooler_size_per_head": 128,
24
+ "pooler_type": "first_token_transform",
25
+ "position_embedding_type": "absolute",
26
+ "problem_type": "single_label_classification",
27
+ "torch_dtype": "float32",
28
+ "transformers_version": "4.31.0",
29
+ "type_vocab_size": 2,
30
+ "use_cache": true,
31
+ "vocab_size": 119547
32
+ }
BERT-mULT-t-MMG/checkpoint-136/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9c7dd3230e91cfdc2c455123eb6cacb8703cf4627e717b8b556cc7c12380c6c6
3
+ size 1422957573
BERT-mULT-t-MMG/checkpoint-136/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:930090b0a84280731a09b7ad7a39210f0983f1eff4d0218d6b15cfbb82fa825e
3
+ size 711488305
BERT-mULT-t-MMG/checkpoint-136/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cb7bae29515f25b424d0666ae706849c050fab549ee662c61896e2b6baa81c35
3
+ size 14575
BERT-mULT-t-MMG/checkpoint-136/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8265df91ddb3a8c71b4208b3313c24acdf60ac5ce8681251e2df91b897c72abd
3
+ size 627
BERT-mULT-t-MMG/checkpoint-136/trainer_state.json ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.6790980100631714,
3
+ "best_model_checkpoint": "BERT-mULT-t-MMG/checkpoint-68",
4
+ "epoch": 4.0,
5
+ "global_step": 136,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 1.0,
12
+ "eval_accuracy": 0.5303030303030303,
13
+ "eval_loss": 0.6892990469932556,
14
+ "eval_runtime": 2.4367,
15
+ "eval_samples_per_second": 27.086,
16
+ "eval_steps_per_second": 3.693,
17
+ "step": 34
18
+ },
19
+ {
20
+ "epoch": 2.0,
21
+ "eval_accuracy": 0.6515151515151515,
22
+ "eval_loss": 0.6790980100631714,
23
+ "eval_runtime": 2.4536,
24
+ "eval_samples_per_second": 26.899,
25
+ "eval_steps_per_second": 3.668,
26
+ "step": 68
27
+ },
28
+ {
29
+ "epoch": 3.0,
30
+ "eval_accuracy": 0.5303030303030303,
31
+ "eval_loss": 0.7650681734085083,
32
+ "eval_runtime": 2.4451,
33
+ "eval_samples_per_second": 26.993,
34
+ "eval_steps_per_second": 3.681,
35
+ "step": 102
36
+ },
37
+ {
38
+ "epoch": 4.0,
39
+ "eval_accuracy": 0.5757575757575758,
40
+ "eval_loss": 0.6894957423210144,
41
+ "eval_runtime": 2.4549,
42
+ "eval_samples_per_second": 26.885,
43
+ "eval_steps_per_second": 3.666,
44
+ "step": 136
45
+ }
46
+ ],
47
+ "max_steps": 340,
48
+ "num_train_epochs": 10,
49
+ "total_flos": 282055051345920.0,
50
+ "trial_name": null,
51
+ "trial_params": null
52
+ }
BERT-mULT-t-MMG/checkpoint-136/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fdbe8be9883b673d6afa80be7ca632d77e0b2363bb941b5cb884746b73b819ab
3
+ size 3963
BERT-mULT-t-MMG/checkpoint-34/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:841f32b80cd263b6b9cee7b10b8e6c251a26117c3cd0c49c1efd6967e198ce3e
3
+ size 1422957573
BERT-mULT-t-MMG/checkpoint-34/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:969572288ab714c4eda857bea8e580b2c3c361edfbe79b3a135b7cb27f7913d2
3
+ size 711488305
BERT-mULT-t-MMG/checkpoint-34/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f6240db54ec5d8e68e422054a7ac15b52594fa55680ba495f5f5e1c7bca4f5d5
3
+ size 14575
BERT-mULT-t-MMG/checkpoint-34/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e1135ccdc6ff911cd34976d2d6f74e50eda157c783d2100d5cfc14ea275faaba
3
+ size 627
BERT-mULT-t-MMG/checkpoint-34/trainer_state.json ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.6892990469932556,
3
+ "best_model_checkpoint": "BERT-mULT-t-MMG/checkpoint-34",
4
+ "epoch": 1.0,
5
+ "global_step": 34,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 1.0,
12
+ "eval_accuracy": 0.5303030303030303,
13
+ "eval_loss": 0.6892990469932556,
14
+ "eval_runtime": 2.4367,
15
+ "eval_samples_per_second": 27.086,
16
+ "eval_steps_per_second": 3.693,
17
+ "step": 34
18
+ }
19
+ ],
20
+ "max_steps": 340,
21
+ "num_train_epochs": 10,
22
+ "total_flos": 70513762836480.0,
23
+ "trial_name": null,
24
+ "trial_params": null
25
+ }
BERT-mULT-t-MMG/checkpoint-34/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fdbe8be9883b673d6afa80be7ca632d77e0b2363bb941b5cb884746b73b819ab
3
+ size 3963
BERT-mULT-t-MMG/checkpoint-68/config.json ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "bert-base-multilingual-cased",
3
+ "architectures": [
4
+ "BertForSequenceClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "classifier_dropout": null,
8
+ "directionality": "bidi",
9
+ "hidden_act": "gelu",
10
+ "hidden_dropout_prob": 0.1,
11
+ "hidden_size": 768,
12
+ "initializer_range": 0.02,
13
+ "intermediate_size": 3072,
14
+ "layer_norm_eps": 1e-12,
15
+ "max_position_embeddings": 512,
16
+ "model_type": "bert",
17
+ "num_attention_heads": 12,
18
+ "num_hidden_layers": 12,
19
+ "pad_token_id": 0,
20
+ "pooler_fc_size": 768,
21
+ "pooler_num_attention_heads": 12,
22
+ "pooler_num_fc_layers": 3,
23
+ "pooler_size_per_head": 128,
24
+ "pooler_type": "first_token_transform",
25
+ "position_embedding_type": "absolute",
26
+ "problem_type": "single_label_classification",
27
+ "torch_dtype": "float32",
28
+ "transformers_version": "4.31.0",
29
+ "type_vocab_size": 2,
30
+ "use_cache": true,
31
+ "vocab_size": 119547
32
+ }
BERT-mULT-t-MMG/checkpoint-68/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d9440db5deb8d9d43644cf54164571d7ff79c3c7d1782859a7a30187cf978057
3
+ size 1422957573
BERT-mULT-t-MMG/checkpoint-68/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:717f49c9697c030729d076093434c28ca17baf396836f24f6b4e9cb222ab17d7
3
+ size 711488305
BERT-mULT-t-MMG/checkpoint-68/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b386f92443211ed864a270aa60382c492ac928b9a86f65925534197e53a0fad2
3
+ size 14575
BERT-mULT-t-MMG/checkpoint-68/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4b160d770f4f8d28e5c183009a8b11a7d3b6b0ebd3406b3dc6d0fd9adf77a3b8
3
+ size 627
BERT-mULT-t-MMG/checkpoint-68/trainer_state.json ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.6790980100631714,
3
+ "best_model_checkpoint": "BERT-mULT-t-MMG/checkpoint-68",
4
+ "epoch": 2.0,
5
+ "global_step": 68,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 1.0,
12
+ "eval_accuracy": 0.5303030303030303,
13
+ "eval_loss": 0.6892990469932556,
14
+ "eval_runtime": 2.4367,
15
+ "eval_samples_per_second": 27.086,
16
+ "eval_steps_per_second": 3.693,
17
+ "step": 34
18
+ },
19
+ {
20
+ "epoch": 2.0,
21
+ "eval_accuracy": 0.6515151515151515,
22
+ "eval_loss": 0.6790980100631714,
23
+ "eval_runtime": 2.4536,
24
+ "eval_samples_per_second": 26.899,
25
+ "eval_steps_per_second": 3.668,
26
+ "step": 68
27
+ }
28
+ ],
29
+ "max_steps": 340,
30
+ "num_train_epochs": 10,
31
+ "total_flos": 141027525672960.0,
32
+ "trial_name": null,
33
+ "trial_params": null
34
+ }
BERT-mULT-t-MMG/checkpoint-68/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fdbe8be9883b673d6afa80be7ca632d77e0b2363bb941b5cb884746b73b819ab
3
+ size 3963
BERT-mULT-t-MMG/config.json ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "bert-base-multilingual-cased",
3
+ "architectures": [
4
+ "BertForSequenceClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "classifier_dropout": null,
8
+ "directionality": "bidi",
9
+ "hidden_act": "gelu",
10
+ "hidden_dropout_prob": 0.1,
11
+ "hidden_size": 768,
12
+ "initializer_range": 0.02,
13
+ "intermediate_size": 3072,
14
+ "layer_norm_eps": 1e-12,
15
+ "max_position_embeddings": 512,
16
+ "model_type": "bert",
17
+ "num_attention_heads": 12,
18
+ "num_hidden_layers": 12,
19
+ "pad_token_id": 0,
20
+ "pooler_fc_size": 768,
21
+ "pooler_num_attention_heads": 12,
22
+ "pooler_num_fc_layers": 3,
23
+ "pooler_size_per_head": 128,
24
+ "pooler_type": "first_token_transform",
25
+ "position_embedding_type": "absolute",
26
+ "problem_type": "single_label_classification",
27
+ "torch_dtype": "float32",
28
+ "transformers_version": "4.31.0",
29
+ "type_vocab_size": 2,
30
+ "use_cache": true,
31
+ "vocab_size": 119547
32
+ }
BERT-mULT-t-MMG/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:717f49c9697c030729d076093434c28ca17baf396836f24f6b4e9cb222ab17d7
3
+ size 711488305
BERT-mULT-t-MMG/runs/Aug10_14-39-26_practicas/events.out.tfevents.1691671191.practicas.2817540.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7ef0f0cfdf7469dbf2b844b85c35bdc9f56fbd50e135f0a03c8973fbce6ec88f
3
+ size 5829
BERT-mULT-t-MMG/runs/Aug10_14-39-26_practicas/events.out.tfevents.1691671338.practicas.2817540.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:327d91366e16c04304fe0ee52882b7cded2e6214df244c41b09f6626bf74c3f8
3
+ size 411
BERT-mULT-t-MMG/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fdbe8be9883b673d6afa80be7ca632d77e0b2363bb941b5cb884746b73b819ab
3
+ size 3963