contemmcm commited on
Commit
f45fbc3
·
verified ·
1 Parent(s): 83c768b

End of training

Browse files
.gitattributes CHANGED
@@ -33,3 +33,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ source.spm filter=lfs diff=lfs merge=lfs -text
37
+ target.spm filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: transformers
3
+ license: apache-2.0
4
+ base_model: Helsinki-NLP/opus-mt-en-ru
5
+ tags:
6
+ - generated_from_trainer
7
+ metrics:
8
+ - bleu
9
+ model-index:
10
+ - name: 68174dedb48e8aee3dbe7e2e374444f6
11
+ results: []
12
+ ---
13
+
14
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
15
+ should probably proofread and complete it, then remove this comment. -->
16
+
17
+ # 68174dedb48e8aee3dbe7e2e374444f6
18
+
19
+ This model is a fine-tuned version of [Helsinki-NLP/opus-mt-en-ru](https://huggingface.co/Helsinki-NLP/opus-mt-en-ru) on the Helsinki-NLP/opus_books [fr-no] dataset.
20
+ It achieves the following results on the evaluation set:
21
+ - Loss: 2.7788
22
+ - Data Size: 1.0
23
+ - Epoch Runtime: 6.5770
24
+ - Bleu: 1.8779
25
+
26
+ ## Model description
27
+
28
+ More information needed
29
+
30
+ ## Intended uses & limitations
31
+
32
+ More information needed
33
+
34
+ ## Training and evaluation data
35
+
36
+ More information needed
37
+
38
+ ## Training procedure
39
+
40
+ ### Training hyperparameters
41
+
42
+ The following hyperparameters were used during training:
43
+ - learning_rate: 5e-05
44
+ - train_batch_size: 8
45
+ - eval_batch_size: 8
46
+ - seed: 42
47
+ - distributed_type: multi-GPU
48
+ - num_devices: 4
49
+ - total_train_batch_size: 32
50
+ - total_eval_batch_size: 32
51
+ - optimizer: Use adamw_torch with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
52
+ - lr_scheduler_type: constant
53
+ - num_epochs: 50
54
+
55
+ ### Training results
56
+
57
+ | Training Loss | Epoch | Step | Validation Loss | Data Size | Epoch Runtime | Bleu |
58
+ |:-------------:|:-----:|:----:|:---------------:|:---------:|:-------------:|:------:|
59
+ | No log | 0 | 0 | 8.1576 | 0 | 1.1298 | 0.0120 |
60
+ | No log | 1 | 86 | 7.2886 | 0.0078 | 1.6048 | 0.0364 |
61
+ | No log | 2 | 172 | 6.6430 | 0.0156 | 1.4120 | 0.0139 |
62
+ | No log | 3 | 258 | 6.1895 | 0.0312 | 1.6749 | 0.0290 |
63
+ | No log | 4 | 344 | 5.5434 | 0.0625 | 1.9136 | 0.0593 |
64
+ | 0.3187 | 5 | 430 | 4.9139 | 0.125 | 2.3661 | 0.1100 |
65
+ | 1.2067 | 6 | 516 | 4.2736 | 0.25 | 3.0119 | 0.1236 |
66
+ | 1.5018 | 7 | 602 | 3.7627 | 0.5 | 3.9584 | 0.3964 |
67
+ | 2.1437 | 8.0 | 688 | 3.3846 | 1.0 | 6.2046 | 0.7190 |
68
+ | 3.339 | 9.0 | 774 | 3.1913 | 1.0 | 5.9831 | 0.8692 |
69
+ | 3.1252 | 10.0 | 860 | 3.0527 | 1.0 | 6.0412 | 1.2043 |
70
+ | 3.0013 | 11.0 | 946 | 2.9586 | 1.0 | 5.9485 | 1.1741 |
71
+ | 2.798 | 12.0 | 1032 | 2.8913 | 1.0 | 6.2445 | 1.3138 |
72
+ | 2.6842 | 13.0 | 1118 | 2.8311 | 1.0 | 6.0278 | 1.3558 |
73
+ | 2.56 | 14.0 | 1204 | 2.7889 | 1.0 | 6.3031 | 1.5464 |
74
+ | 2.4526 | 15.0 | 1290 | 2.7657 | 1.0 | 6.1649 | 1.5456 |
75
+ | 2.3505 | 16.0 | 1376 | 2.7463 | 1.0 | 6.1921 | 1.3818 |
76
+ | 2.2636 | 17.0 | 1462 | 2.7351 | 1.0 | 6.1744 | 1.5696 |
77
+ | 2.1563 | 18.0 | 1548 | 2.7299 | 1.0 | 6.4085 | 1.7799 |
78
+ | 2.0449 | 19.0 | 1634 | 2.7390 | 1.0 | 6.0078 | 1.7635 |
79
+ | 1.9794 | 20.0 | 1720 | 2.7285 | 1.0 | 6.2261 | 1.7834 |
80
+ | 1.9045 | 21.0 | 1806 | 2.7564 | 1.0 | 6.2097 | 1.6870 |
81
+ | 1.8111 | 22.0 | 1892 | 2.7386 | 1.0 | 6.1472 | 1.8704 |
82
+ | 1.7096 | 23.0 | 1978 | 2.7728 | 1.0 | 6.1480 | 1.8485 |
83
+ | 1.6496 | 24.0 | 2064 | 2.7788 | 1.0 | 6.5770 | 1.8779 |
84
+
85
+
86
+ ### Framework versions
87
+
88
+ - Transformers 4.57.0
89
+ - Pytorch 2.8.0+cu128
90
+ - Datasets 4.2.0
91
+ - Tokenizers 0.22.1
config.json ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_num_labels": 3,
3
+ "activation_dropout": 0.0,
4
+ "activation_function": "swish",
5
+ "add_bias_logits": false,
6
+ "add_final_layer_norm": false,
7
+ "architectures": [
8
+ "MarianMTModel"
9
+ ],
10
+ "attention_dropout": 0.0,
11
+ "classif_dropout": 0.0,
12
+ "classifier_dropout": 0.0,
13
+ "d_model": 512,
14
+ "decoder_attention_heads": 8,
15
+ "decoder_ffn_dim": 2048,
16
+ "decoder_layerdrop": 0.0,
17
+ "decoder_layers": 6,
18
+ "decoder_start_token_id": 62517,
19
+ "decoder_vocab_size": 62518,
20
+ "dropout": 0.1,
21
+ "dtype": "float32",
22
+ "encoder_attention_heads": 8,
23
+ "encoder_ffn_dim": 2048,
24
+ "encoder_layerdrop": 0.0,
25
+ "encoder_layers": 6,
26
+ "eos_token_id": 0,
27
+ "forced_eos_token_id": 0,
28
+ "id2label": {
29
+ "0": "LABEL_0",
30
+ "1": "LABEL_1",
31
+ "2": "LABEL_2"
32
+ },
33
+ "init_std": 0.02,
34
+ "is_encoder_decoder": true,
35
+ "label2id": {
36
+ "LABEL_0": 0,
37
+ "LABEL_1": 1,
38
+ "LABEL_2": 2
39
+ },
40
+ "max_length": null,
41
+ "max_position_embeddings": 512,
42
+ "model_type": "marian",
43
+ "normalize_before": false,
44
+ "normalize_embedding": false,
45
+ "num_beams": null,
46
+ "num_hidden_layers": 6,
47
+ "pad_token_id": 62517,
48
+ "scale_embedding": true,
49
+ "share_encoder_decoder_embeddings": true,
50
+ "static_position_embeddings": true,
51
+ "transformers_version": "4.57.0",
52
+ "use_cache": true,
53
+ "vocab_size": 62518
54
+ }
generation_config.json ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bad_words_ids": [
3
+ [
4
+ 62517
5
+ ]
6
+ ],
7
+ "decoder_start_token_id": 62517,
8
+ "eos_token_id": [
9
+ 0
10
+ ],
11
+ "forced_eos_token_id": 0,
12
+ "max_length": 512,
13
+ "num_beams": 4,
14
+ "pad_token_id": 62517,
15
+ "renormalize_logits": true,
16
+ "transformers_version": "4.57.0"
17
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cddce658cb73e2ef87d0f74bbd2f931fa7ccd3aafed866f3c901a80b0887089a
3
+ size 688981208
source.spm ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:16bebef1389a0b8ab452772c4e35b9e605e5713f8ac7baa71ca701394eaa086d
3
+ size 802781
special_tokens_map.json ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {
2
+ "eos_token": "</s>",
3
+ "pad_token": "<pad>",
4
+ "unk_token": "<unk>"
5
+ }
target.spm ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:745998e51ba5b058e38b7ac7765c25c43ed5c1c39cc92b27163b9b2e323c9d7c
3
+ size 1080169
tokenizer_config.json ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "</s>",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "1": {
12
+ "content": "<unk>",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "62517": {
20
+ "content": "<pad>",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ }
27
+ },
28
+ "clean_up_tokenization_spaces": false,
29
+ "eos_token": "</s>",
30
+ "extra_special_tokens": {},
31
+ "model_max_length": 512,
32
+ "pad_token": "<pad>",
33
+ "separate_vocabs": false,
34
+ "source_lang": "en",
35
+ "sp_model_kwargs": {},
36
+ "target_lang": "ru",
37
+ "tokenizer_class": "MarianTokenizer",
38
+ "unk_token": "<unk>"
39
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1708855b3cacc28d057f2eb26921357807cb17c5be5e74552a7229b3acbba265
3
+ size 6097
vocab.json ADDED
The diff for this file is too large to render. See raw diff