ChiefTheLord commited on
Commit
1c0022d
·
verified ·
1 Parent(s): 4d9ea1b

Upload folder using huggingface_hub

Browse files
checkpoints/checkpoint-768/eval_state.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoints/checkpoint-768/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:36dd3eb4aba449551cc7e73c172dff3ca6e545fb999f0d9735b66323cd9ee677
3
+ size 1285789991
checkpoints/checkpoint-768/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:44a8f617ec401f3748d26c94e0a7f1c6e71636f6a22f32c7ebf43eaddb6a733b
3
+ size 5500739
checkpoints/checkpoint-768/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a8e2011629d8bed3ef560fa11175cac55684c4e12a72634bb24abf767b6c7399
3
+ size 14645
checkpoints/checkpoint-768/scaler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0ea77389e470a224e5941beec3f67d185181449627399ba48a1bcbaae5e2650b
3
+ size 1383
checkpoints/checkpoint-768/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:68006772f756af55bb05059da8b91655f0d4b68a2361b11d14132e1cd4655530
3
+ size 1465
checkpoints/checkpoint-768/trainer_state.json ADDED
@@ -0,0 +1,133 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_global_step": null,
3
+ "best_metric": null,
4
+ "best_model_checkpoint": null,
5
+ "epoch": 0.2482223658694247,
6
+ "eval_steps": 256,
7
+ "global_step": 768,
8
+ "is_hyper_param_search": false,
9
+ "is_local_process_zero": true,
10
+ "is_world_process_zero": true,
11
+ "log_history": [
12
+ {
13
+ "epoch": 0.04137039431157078,
14
+ "grad_norm": 95.44776916503906,
15
+ "learning_rate": 9.773140025240866e-09,
16
+ "loss": 16.2614,
17
+ "step": 128
18
+ },
19
+ {
20
+ "epoch": 0.08274078862314156,
21
+ "grad_norm": 81.94233703613281,
22
+ "learning_rate": 1.9623233908948195e-08,
23
+ "loss": 16.1329,
24
+ "step": 256
25
+ },
26
+ {
27
+ "epoch": 0.08274078862314156,
28
+ "eval_bleu": 0.0,
29
+ "eval_cap_loss": 9.05574489775158,
30
+ "eval_con_loss": 3.4572216215587797,
31
+ "eval_loss": 15.97018808031839,
32
+ "step": 256
33
+ },
34
+ {
35
+ "epoch": 0.08274078862314156,
36
+ "eval_bleu": 0.0,
37
+ "eval_cap_loss": 9.05574489775158,
38
+ "eval_con_loss": 3.4572216215587797,
39
+ "eval_loss": 15.97018808031839,
40
+ "eval_runtime": 170.2059,
41
+ "eval_samples_per_second": 5.875,
42
+ "eval_steps_per_second": 0.37,
43
+ "step": 256
44
+ },
45
+ {
46
+ "epoch": 0.12411118293471235,
47
+ "grad_norm": 92.045166015625,
48
+ "learning_rate": 2.9473327792655523e-08,
49
+ "loss": 15.8603,
50
+ "step": 384
51
+ },
52
+ {
53
+ "epoch": 0.16548157724628312,
54
+ "grad_norm": 67.55699920654297,
55
+ "learning_rate": 3.9323421676362855e-08,
56
+ "loss": 15.4976,
57
+ "step": 512
58
+ },
59
+ {
60
+ "epoch": 0.16548157724628312,
61
+ "eval_bleu": 0.0,
62
+ "eval_cap_loss": 8.317013150169736,
63
+ "eval_con_loss": 3.4565826900421626,
64
+ "eval_loss": 15.230178530254062,
65
+ "step": 512
66
+ },
67
+ {
68
+ "epoch": 0.16548157724628312,
69
+ "eval_bleu": 0.0,
70
+ "eval_cap_loss": 8.317013150169736,
71
+ "eval_con_loss": 3.4565826900421626,
72
+ "eval_loss": 15.230178530254062,
73
+ "eval_runtime": 169.6131,
74
+ "eval_samples_per_second": 5.896,
75
+ "eval_steps_per_second": 0.371,
76
+ "step": 512
77
+ },
78
+ {
79
+ "epoch": 0.2068519715578539,
80
+ "grad_norm": 68.02971649169922,
81
+ "learning_rate": 4.917351556007019e-08,
82
+ "loss": 15.0425,
83
+ "step": 640
84
+ },
85
+ {
86
+ "epoch": 0.2482223658694247,
87
+ "grad_norm": 63.56317138671875,
88
+ "learning_rate": 5.902360944377751e-08,
89
+ "loss": 14.5238,
90
+ "step": 768
91
+ },
92
+ {
93
+ "epoch": 0.2482223658694247,
94
+ "eval_bleu": 0.0009372288320443345,
95
+ "eval_cap_loss": 7.270466600145612,
96
+ "eval_con_loss": 3.45575193374876,
97
+ "eval_loss": 14.181970460074288,
98
+ "step": 768
99
+ },
100
+ {
101
+ "epoch": 0.2482223658694247,
102
+ "eval_bleu": 0.0009372288320443345,
103
+ "eval_cap_loss": 7.270466600145612,
104
+ "eval_con_loss": 3.45575193374876,
105
+ "eval_loss": 14.181970460074288,
106
+ "eval_runtime": 168.362,
107
+ "eval_samples_per_second": 5.94,
108
+ "eval_steps_per_second": 0.374,
109
+ "step": 768
110
+ }
111
+ ],
112
+ "logging_steps": 128,
113
+ "max_steps": 4331600,
114
+ "num_input_tokens_seen": 0,
115
+ "num_train_epochs": 1400,
116
+ "save_steps": 256,
117
+ "stateful_callbacks": {
118
+ "TrainerControl": {
119
+ "args": {
120
+ "should_epoch_stop": false,
121
+ "should_evaluate": false,
122
+ "should_log": false,
123
+ "should_save": true,
124
+ "should_training_stop": false
125
+ },
126
+ "attributes": {}
127
+ }
128
+ },
129
+ "total_flos": 0.0,
130
+ "train_batch_size": 16,
131
+ "trial_name": null,
132
+ "trial_params": null
133
+ }
checkpoints/checkpoint-768/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c02eead29fa990e6385f8edcc8677014d9cab4633305b111e3398065679d01f5
3
+ size 5777