Curiousfox commited on
Commit
f5e4b63
·
verified ·
1 Parent(s): a6d387d

End of training

Browse files
.gitattributes CHANGED
@@ -33,3 +33,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ source.spm filter=lfs diff=lfs merge=lfs -text
37
+ target.spm filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: transformers
3
+ language:
4
+ - nan
5
+ license: apache-2.0
6
+ base_model: Helsinki-NLP/opus-mt-en-ZH
7
+ tags:
8
+ - generated_from_trainer
9
+ datasets:
10
+ - sarahwei/Taiwanese-Minnan-Sutiau
11
+ metrics:
12
+ - bleu
13
+ model-index:
14
+ - name: helsinki_new_ver2
15
+ results: []
16
+ ---
17
+
18
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
19
+ should probably proofread and complete it, then remove this comment. -->
20
+
21
+ # helsinki_new_ver2
22
+
23
+ This model is a fine-tuned version of [Helsinki-NLP/opus-mt-en-ZH](https://huggingface.co/Helsinki-NLP/opus-mt-en-ZH) on the sarahwei/Taiwanese-Minnan-Sutiau dataset.
24
+ It achieves the following results on the evaluation set:
25
+ - Loss: 0.6670
26
+ - Bleu: 1.5307
27
+
28
+ ## Model description
29
+
30
+ More information needed
31
+
32
+ ## Intended uses & limitations
33
+
34
+ More information needed
35
+
36
+ ## Training and evaluation data
37
+
38
+ More information needed
39
+
40
+ ## Training procedure
41
+
42
+ ### Training hyperparameters
43
+
44
+ The following hyperparameters were used during training:
45
+ - learning_rate: 1e-06
46
+ - train_batch_size: 8
47
+ - eval_batch_size: 8
48
+ - seed: 42
49
+ - optimizer: Use OptimizerNames.ADAMW_TORCH with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
50
+ - lr_scheduler_type: linear
51
+ - lr_scheduler_warmup_steps: 1000
52
+ - training_steps: 23000
53
+ - mixed_precision_training: Native AMP
54
+
55
+ ### Training results
56
+
57
+ | Training Loss | Epoch | Step | Validation Loss | Bleu |
58
+ |:-------------:|:------:|:-----:|:---------------:|:------:|
59
+ | 0.8583 | 0.4230 | 1000 | 0.9198 | 0.1959 |
60
+ | 0.7889 | 0.8460 | 2000 | 0.8619 | 0.1895 |
61
+ | 0.7595 | 1.2690 | 3000 | 0.8263 | 0.9704 |
62
+ | 0.7098 | 1.6920 | 4000 | 0.7982 | 1.0918 |
63
+ | 0.6963 | 2.1151 | 5000 | 0.7757 | 1.1072 |
64
+ | 0.6818 | 2.5381 | 6000 | 0.7568 | 1.1531 |
65
+ | 0.6642 | 2.9611 | 7000 | 0.7403 | 1.2418 |
66
+ | 0.659 | 3.3841 | 8000 | 0.7262 | 1.5448 |
67
+ | 0.6287 | 3.8071 | 9000 | 0.7135 | 1.3160 |
68
+ | 0.6251 | 4.2301 | 10000 | 0.7020 | 1.4177 |
69
+ | 0.6079 | 4.6531 | 11000 | 0.6918 | 1.7637 |
70
+ | 0.6003 | 5.0761 | 12000 | 0.6825 | 1.3500 |
71
+ | 0.5874 | 5.4992 | 13000 | 0.6743 | 1.5090 |
72
+ | 0.5941 | 5.9222 | 14000 | 0.6670 | 1.5307 |
73
+
74
+
75
+ ### Framework versions
76
+
77
+ - Transformers 4.51.3
78
+ - Pytorch 2.6.0+cu124
79
+ - Datasets 3.5.1
80
+ - Tokenizers 0.21.1
config.json ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "activation_dropout": 0.0,
3
+ "activation_function": "swish",
4
+ "add_bias_logits": false,
5
+ "add_final_layer_norm": false,
6
+ "architectures": [
7
+ "MarianMTModel"
8
+ ],
9
+ "attention_dropout": 0.0,
10
+ "bos_token_id": 0,
11
+ "classif_dropout": 0.0,
12
+ "classifier_dropout": 0.0,
13
+ "d_model": 512,
14
+ "decoder_attention_heads": 8,
15
+ "decoder_ffn_dim": 2048,
16
+ "decoder_layerdrop": 0.0,
17
+ "decoder_layers": 6,
18
+ "decoder_start_token_id": 65000,
19
+ "decoder_vocab_size": 65001,
20
+ "do_blenderbot_90_layernorm": false,
21
+ "dropout": 0.1,
22
+ "encoder_attention_heads": 8,
23
+ "encoder_ffn_dim": 2048,
24
+ "encoder_layerdrop": 0.0,
25
+ "encoder_layers": 6,
26
+ "eos_token_id": 0,
27
+ "extra_pos_embeddings": 0,
28
+ "force_bos_token_to_be_generated": false,
29
+ "forced_eos_token_id": 0,
30
+ "gradient_checkpointing": false,
31
+ "id2label": {
32
+ "0": "LABEL_0",
33
+ "1": "LABEL_1",
34
+ "2": "LABEL_2"
35
+ },
36
+ "init_std": 0.02,
37
+ "is_encoder_decoder": true,
38
+ "label2id": {
39
+ "LABEL_0": 0,
40
+ "LABEL_1": 1,
41
+ "LABEL_2": 2
42
+ },
43
+ "max_length": null,
44
+ "max_position_embeddings": 512,
45
+ "model_type": "marian",
46
+ "normalize_before": false,
47
+ "normalize_embedding": false,
48
+ "num_beams": null,
49
+ "num_hidden_layers": 6,
50
+ "pad_token_id": 65000,
51
+ "scale_embedding": true,
52
+ "share_encoder_decoder_embeddings": true,
53
+ "static_position_embeddings": true,
54
+ "torch_dtype": "float32",
55
+ "transformers_version": "4.51.3",
56
+ "use_cache": true,
57
+ "vocab_size": 65001
58
+ }
generation_config.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bad_words_ids": [
3
+ [
4
+ 65000
5
+ ]
6
+ ],
7
+ "bos_token_id": 0,
8
+ "decoder_start_token_id": 65000,
9
+ "eos_token_id": 0,
10
+ "forced_eos_token_id": 0,
11
+ "max_length": 512,
12
+ "num_beams": 4,
13
+ "pad_token_id": 65000,
14
+ "renormalize_logits": true,
15
+ "transformers_version": "4.51.3"
16
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:53ffebdd5283090e68097180c6ecab762ba31720e3074ac601c676ee0decb655
3
+ size 309965092
source.spm ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5775ddc9e3ff2fae91554da56468ad35ff56edaba870fea74447bc7234bfdaa8
3
+ size 806435
special_tokens_map.json ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "eos_token": {
3
+ "content": "</s>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "pad_token": {
10
+ "content": "<pad>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "unk_token": {
17
+ "content": "<unk>",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ }
23
+ }
target.spm ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:81dc94efa84e4025ef38d25d5d07429fe41e3eb29d44003f1db6fe98487b0052
3
+ size 804600
tokenizer_config.json ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "</s>",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "1": {
12
+ "content": "<unk>",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "65000": {
20
+ "content": "<pad>",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ }
27
+ },
28
+ "clean_up_tokenization_spaces": false,
29
+ "eos_token": "</s>",
30
+ "extra_special_tokens": {},
31
+ "model_max_length": 512,
32
+ "pad_token": "<pad>",
33
+ "separate_vocabs": false,
34
+ "source_lang": "eng",
35
+ "sp_model_kwargs": {},
36
+ "target_lang": "zho",
37
+ "tokenizer_class": "MarianTokenizer",
38
+ "unk_token": "<unk>"
39
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b0e2dff39be13b6da36e0cacdaf78fcce9507a393e1fda8c1f0291f5ea24b062
3
+ size 5432
vocab.json ADDED
The diff for this file is too large to render. See raw diff