WellDunDun commited on
Commit
4c688d3
·
verified ·
1 Parent(s): c6c8a58

Training in progress, step 1000, checkpoint

Browse files
.gitattributes CHANGED
@@ -35,3 +35,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
  source.spm filter=lfs diff=lfs merge=lfs -text
37
  target.spm filter=lfs diff=lfs merge=lfs -text
 
 
 
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
  source.spm filter=lfs diff=lfs merge=lfs -text
37
  target.spm filter=lfs diff=lfs merge=lfs -text
38
+ last-checkpoint/source.spm filter=lfs diff=lfs merge=lfs -text
39
+ last-checkpoint/target.spm filter=lfs diff=lfs merge=lfs -text
last-checkpoint/config.json ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "activation_dropout": 0.0,
3
+ "activation_function": "swish",
4
+ "add_bias_logits": false,
5
+ "add_final_layer_norm": false,
6
+ "architectures": [
7
+ "MarianMTModel"
8
+ ],
9
+ "attention_dropout": 0.0,
10
+ "classif_dropout": 0.0,
11
+ "classifier_dropout": 0.0,
12
+ "d_model": 512,
13
+ "decoder_attention_heads": 8,
14
+ "decoder_ffn_dim": 2048,
15
+ "decoder_layerdrop": 0.0,
16
+ "decoder_layers": 6,
17
+ "decoder_start_token_id": 64109,
18
+ "decoder_vocab_size": 64110,
19
+ "dropout": 0.1,
20
+ "dtype": "float32",
21
+ "encoder_attention_heads": 8,
22
+ "encoder_ffn_dim": 2048,
23
+ "encoder_layerdrop": 0.0,
24
+ "encoder_layers": 6,
25
+ "eos_token_id": 0,
26
+ "extra_pos_embeddings": 64110,
27
+ "forced_eos_token_id": 0,
28
+ "id2label": {
29
+ "0": "LABEL_0",
30
+ "1": "LABEL_1",
31
+ "2": "LABEL_2"
32
+ },
33
+ "init_std": 0.02,
34
+ "is_encoder_decoder": true,
35
+ "label2id": {
36
+ "LABEL_0": 0,
37
+ "LABEL_1": 1,
38
+ "LABEL_2": 2
39
+ },
40
+ "max_length": null,
41
+ "max_position_embeddings": 512,
42
+ "model_type": "marian",
43
+ "normalize_before": false,
44
+ "normalize_embedding": false,
45
+ "num_beams": null,
46
+ "num_hidden_layers": 6,
47
+ "pad_token_id": 64109,
48
+ "scale_embedding": true,
49
+ "share_encoder_decoder_embeddings": true,
50
+ "static_position_embeddings": true,
51
+ "transformers_version": "4.57.3",
52
+ "use_cache": true,
53
+ "vocab_size": 64110
54
+ }
last-checkpoint/generation_config.json ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bad_words_ids": [
3
+ [
4
+ 64109
5
+ ]
6
+ ],
7
+ "decoder_start_token_id": 64109,
8
+ "eos_token_id": [
9
+ 0
10
+ ],
11
+ "forced_eos_token_id": 0,
12
+ "max_length": 512,
13
+ "num_beams": 4,
14
+ "pad_token_id": 64109,
15
+ "renormalize_logits": true,
16
+ "transformers_version": "4.57.3"
17
+ }
last-checkpoint/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9095f8f4b09f5ac990f0f5a29fada4677f683368b4241bfc2f445ec24cd32f17
3
+ size 308136760
last-checkpoint/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:63bbc30f30dd76a085db013210508d0a520062d24abcb883c042786584262935
3
+ size 615918027
last-checkpoint/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c9bf27ae5fc465cacc575c82fcf640ea163c1ef9c099c54ea1e77e18d43cd99b
3
+ size 14645
last-checkpoint/scaler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f7f61b50ac58556fb5ddabe26ca60d44c13d0ea7faf3bf7f86dbfa4e99343e6a
3
+ size 1383
last-checkpoint/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6f0a9fbc27bd8041e9c0979b03c45a1bff18010be17c72323614773a30a2199c
3
+ size 1465
last-checkpoint/source.spm ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9129bf81ef9d5fcc1797ea3061e789dc5f9e5602786b0490238d207d4219e0f5
3
+ size 790473
last-checkpoint/special_tokens_map.json ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {
2
+ "eos_token": "</s>",
3
+ "pad_token": "<pad>",
4
+ "unk_token": "<unk>"
5
+ }
last-checkpoint/target.spm ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6a0a52318dac3247455ec889eb3bd8da9bdf234974be8b9b0f99ad5378f474c9
3
+ size 706659
last-checkpoint/tokenizer_config.json ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "</s>",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "1": {
12
+ "content": "<unk>",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "64109": {
20
+ "content": "<pad>",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ }
27
+ },
28
+ "clean_up_tokenization_spaces": false,
29
+ "eos_token": "</s>",
30
+ "extra_special_tokens": {},
31
+ "model_max_length": 512,
32
+ "pad_token": "<pad>",
33
+ "separate_vocabs": false,
34
+ "source_lang": "eng",
35
+ "sp_model_kwargs": {},
36
+ "target_lang": "mul",
37
+ "tokenizer_class": "MarianTokenizer",
38
+ "unk_token": "<unk>"
39
+ }
last-checkpoint/trainer_state.json ADDED
@@ -0,0 +1,112 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_global_step": 1000,
3
+ "best_metric": 0.6297275424003601,
4
+ "best_model_checkpoint": "./en-wal-checkpoints/checkpoint-1000",
5
+ "epoch": 0.13962580284836637,
6
+ "eval_steps": 1000,
7
+ "global_step": 1000,
8
+ "is_hyper_param_search": false,
9
+ "is_local_process_zero": true,
10
+ "is_world_process_zero": true,
11
+ "log_history": [
12
+ {
13
+ "epoch": 0.013962580284836637,
14
+ "grad_norm": 1.8059194087982178,
15
+ "learning_rate": 1.990784697012008e-05,
16
+ "loss": 2.3977,
17
+ "step": 100
18
+ },
19
+ {
20
+ "epoch": 0.027925160569673275,
21
+ "grad_norm": 1.7286276817321777,
22
+ "learning_rate": 1.98147631015545e-05,
23
+ "loss": 1.1677,
24
+ "step": 200
25
+ },
26
+ {
27
+ "epoch": 0.041887740854509914,
28
+ "grad_norm": 1.5416228771209717,
29
+ "learning_rate": 1.9721679232988925e-05,
30
+ "loss": 1.0428,
31
+ "step": 300
32
+ },
33
+ {
34
+ "epoch": 0.05585032113934655,
35
+ "grad_norm": 2.0110714435577393,
36
+ "learning_rate": 1.9628595364423345e-05,
37
+ "loss": 0.9278,
38
+ "step": 400
39
+ },
40
+ {
41
+ "epoch": 0.06981290142418319,
42
+ "grad_norm": 1.5851672887802124,
43
+ "learning_rate": 1.953551149585777e-05,
44
+ "loss": 0.8565,
45
+ "step": 500
46
+ },
47
+ {
48
+ "epoch": 0.08377548170901983,
49
+ "grad_norm": 1.6291816234588623,
50
+ "learning_rate": 1.944242762729219e-05,
51
+ "loss": 0.8211,
52
+ "step": 600
53
+ },
54
+ {
55
+ "epoch": 0.09773806199385647,
56
+ "grad_norm": 1.455430269241333,
57
+ "learning_rate": 1.9349343758726614e-05,
58
+ "loss": 0.7689,
59
+ "step": 700
60
+ },
61
+ {
62
+ "epoch": 0.1117006422786931,
63
+ "grad_norm": 1.8033580780029297,
64
+ "learning_rate": 1.9256259890161038e-05,
65
+ "loss": 0.7457,
66
+ "step": 800
67
+ },
68
+ {
69
+ "epoch": 0.12566322256352974,
70
+ "grad_norm": 1.815686821937561,
71
+ "learning_rate": 1.916317602159546e-05,
72
+ "loss": 0.7194,
73
+ "step": 900
74
+ },
75
+ {
76
+ "epoch": 0.13962580284836637,
77
+ "grad_norm": 1.4914112091064453,
78
+ "learning_rate": 1.9070092153029882e-05,
79
+ "loss": 0.6944,
80
+ "step": 1000
81
+ },
82
+ {
83
+ "epoch": 0.13962580284836637,
84
+ "eval_loss": 0.6297275424003601,
85
+ "eval_runtime": 16.8076,
86
+ "eval_samples_per_second": 358.826,
87
+ "eval_steps_per_second": 22.43,
88
+ "step": 1000
89
+ }
90
+ ],
91
+ "logging_steps": 100,
92
+ "max_steps": 21486,
93
+ "num_input_tokens_seen": 0,
94
+ "num_train_epochs": 3,
95
+ "save_steps": 1000,
96
+ "stateful_callbacks": {
97
+ "TrainerControl": {
98
+ "args": {
99
+ "should_epoch_stop": false,
100
+ "should_evaluate": false,
101
+ "should_log": false,
102
+ "should_save": true,
103
+ "should_training_stop": false
104
+ },
105
+ "attributes": {}
106
+ }
107
+ },
108
+ "total_flos": 542373838848000.0,
109
+ "train_batch_size": 16,
110
+ "trial_name": null,
111
+ "trial_params": null
112
+ }
last-checkpoint/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e10d4e3e16a17bf82487d3f04f2bd2ddb944691afefdf7975162f453899e25a3
3
+ size 6033
last-checkpoint/vocab.json ADDED
The diff for this file is too large to render. See raw diff