mgh6 commited on
Commit
d27a7a2
·
verified ·
1 Parent(s): 9af57f7

Training in progress, step 500, checkpoint

Browse files
last-checkpoint/config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "_name_or_path": "tattabio/gLM2_150M",
3
  "architectures": [
4
  "gLM2ForMaskedLM"
5
  ],
@@ -8,10 +8,10 @@
8
  "AutoModel": "modeling_glm2.gLM2Model",
9
  "AutoModelForMaskedLM": "modeling_glm2.gLM2ForMaskedLM"
10
  },
11
- "depth": 30,
12
- "dim": 640,
13
  "ffn_dim_multiplier": null,
14
- "heads": 10,
15
  "model_type": "gLM2",
16
  "norm_eps": 1e-05,
17
  "swiglu_multiple_of": 256,
 
1
  {
2
+ "_name_or_path": "tattabio/gLM2_650M",
3
  "architectures": [
4
  "gLM2ForMaskedLM"
5
  ],
 
8
  "AutoModel": "modeling_glm2.gLM2Model",
9
  "AutoModelForMaskedLM": "modeling_glm2.gLM2ForMaskedLM"
10
  },
11
+ "depth": 33,
12
+ "dim": 1280,
13
  "ffn_dim_multiplier": null,
14
+ "heads": 20,
15
  "model_type": "gLM2",
16
  "norm_eps": 1e-05,
17
  "swiglu_multiple_of": 256,
last-checkpoint/model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:47c3ea48a3d6443a0fd08b53e26ae04984ed6d6381f3928afee8fb4a155e9eaf
3
- size 609855088
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:38384749b3fa8fe08d6d05430325f30ab4a6dfb4936f267fce232c98c501dc69
3
+ size 2682482800
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1ff76df2be9ebe7397bfb66a0d8ba41a51dc5c9909150e0d126337d9da67e16a
3
- size 1219840122
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:493eb904d50aba9fced0e8ab2ed8fc66ac410087d78582e9bad1afa3df987de3
3
+ size 5365108834
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1e7034a159dfbc62e35de23e4c6fd1b9f4a30b049e2fbd1857b3a8bade788440
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:848fdf35f13e1fde847fbd191021c99c0675e5e723a1b65fde4649f2fc9250db
3
  size 15024
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:cc2ced66205bf6a6509c630ed58ad5e3a6e3843df72048f8b326021e9e36307f
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d9f9fc41c0627d630837221d5c7872d3197c08985ee35f058d5f5e36bfe0249b
3
  size 15024
last-checkpoint/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:753dcfeefdbe4107c04bfd9f26c44afd642bd34e5d3d0b685a1b7e09f489639e
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7ae1449d711371210b0f6284f921f3df183a3c5c6628d3fc2950f5c89910866d
3
  size 15024
last-checkpoint/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3e87981ec44568bc91dcff3b2572db474da39ce05df9ab33e26b3f375f4eac1b
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c3cc14e80a0475fa4dead8d6a3c6f0af9c5a92c40ad285584d68830834b3a6ea
3
  size 15024
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b6956522915b3d6cfb4a36d77fb3f2d3b9a7b7a09c560fa745829757b1d0d295
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:478ca537cf75a11344e25e46d3c46fdcf2db572bdb8cfff6f1ed3781e47a9787
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,152 +1,82 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.07672094673648273,
5
  "eval_steps": 500,
6
- "global_step": 1000,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
  "epoch": 0.0038360473368241363,
13
- "grad_norm": 60.51473617553711,
14
  "learning_rate": 9.961638790854688e-05,
15
- "loss": 82.5331,
16
  "step": 50
17
  },
18
  {
19
  "epoch": 0.0076720946736482725,
20
- "grad_norm": 41.30891418457031,
21
  "learning_rate": 9.923277581709376e-05,
22
- "loss": 79.5007,
23
  "step": 100
24
  },
25
  {
26
  "epoch": 0.011508142010472408,
27
- "grad_norm": 47.033897399902344,
28
  "learning_rate": 9.884916372564063e-05,
29
- "loss": 78.5655,
30
  "step": 150
31
  },
32
  {
33
  "epoch": 0.015344189347296545,
34
- "grad_norm": 47.13461685180664,
35
  "learning_rate": 9.846555163418752e-05,
36
- "loss": 77.9608,
37
  "step": 200
38
  },
39
  {
40
  "epoch": 0.019180236684120684,
41
- "grad_norm": 50.00185012817383,
42
  "learning_rate": 9.80819395427344e-05,
43
- "loss": 77.3352,
44
  "step": 250
45
  },
46
  {
47
  "epoch": 0.023016284020944817,
48
- "grad_norm": 47.55207443237305,
49
  "learning_rate": 9.769832745128127e-05,
50
- "loss": 76.8788,
51
  "step": 300
52
  },
53
  {
54
  "epoch": 0.026852331357768953,
55
- "grad_norm": 32.375267028808594,
56
  "learning_rate": 9.731471535982815e-05,
57
- "loss": 76.3247,
58
  "step": 350
59
  },
60
  {
61
  "epoch": 0.03068837869459309,
62
- "grad_norm": 26.361143112182617,
63
  "learning_rate": 9.693110326837502e-05,
64
- "loss": 76.1166,
65
  "step": 400
66
  },
67
  {
68
  "epoch": 0.03452442603141723,
69
- "grad_norm": 30.172746658325195,
70
  "learning_rate": 9.65474911769219e-05,
71
- "loss": 75.7178,
72
  "step": 450
73
  },
74
  {
75
  "epoch": 0.03836047336824137,
76
- "grad_norm": 48.87131118774414,
77
  "learning_rate": 9.616387908546877e-05,
78
- "loss": 75.2954,
79
  "step": 500
80
- },
81
- {
82
- "epoch": 0.0421965207050655,
83
- "grad_norm": 24.453649520874023,
84
- "learning_rate": 9.578026699401565e-05,
85
- "loss": 75.0015,
86
- "step": 550
87
- },
88
- {
89
- "epoch": 0.046032568041889634,
90
- "grad_norm": 31.099306106567383,
91
- "learning_rate": 9.539665490256254e-05,
92
- "loss": 75.0084,
93
- "step": 600
94
- },
95
- {
96
- "epoch": 0.049868615378713774,
97
- "grad_norm": 29.60244369506836,
98
- "learning_rate": 9.501304281110941e-05,
99
- "loss": 74.4046,
100
- "step": 650
101
- },
102
- {
103
- "epoch": 0.05370466271553791,
104
- "grad_norm": 37.62842559814453,
105
- "learning_rate": 9.462943071965629e-05,
106
- "loss": 74.3024,
107
- "step": 700
108
- },
109
- {
110
- "epoch": 0.05754071005236205,
111
- "grad_norm": 29.12676429748535,
112
- "learning_rate": 9.424581862820317e-05,
113
- "loss": 73.9017,
114
- "step": 750
115
- },
116
- {
117
- "epoch": 0.06137675738918618,
118
- "grad_norm": 29.885427474975586,
119
- "learning_rate": 9.386220653675004e-05,
120
- "loss": 73.7941,
121
- "step": 800
122
- },
123
- {
124
- "epoch": 0.06521280472601032,
125
- "grad_norm": 53.64329147338867,
126
- "learning_rate": 9.347859444529692e-05,
127
- "loss": 73.4727,
128
- "step": 850
129
- },
130
- {
131
- "epoch": 0.06904885206283445,
132
- "grad_norm": 30.879840850830078,
133
- "learning_rate": 9.309498235384379e-05,
134
- "loss": 73.1386,
135
- "step": 900
136
- },
137
- {
138
- "epoch": 0.07288489939965859,
139
- "grad_norm": 32.532352447509766,
140
- "learning_rate": 9.271137026239067e-05,
141
- "loss": 73.0574,
142
- "step": 950
143
- },
144
- {
145
- "epoch": 0.07672094673648273,
146
- "grad_norm": 29.306671142578125,
147
- "learning_rate": 9.232775817093756e-05,
148
- "loss": 72.7158,
149
- "step": 1000
150
  }
151
  ],
152
  "logging_steps": 50,
@@ -166,7 +96,7 @@
166
  "attributes": {}
167
  }
168
  },
169
- "total_flos": 3.895117780751483e+17,
170
  "train_batch_size": 2,
171
  "trial_name": null,
172
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.03836047336824137,
5
  "eval_steps": 500,
6
+ "global_step": 500,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
  "epoch": 0.0038360473368241363,
13
+ "grad_norm": 42.40585708618164,
14
  "learning_rate": 9.961638790854688e-05,
15
+ "loss": 76.6444,
16
  "step": 50
17
  },
18
  {
19
  "epoch": 0.0076720946736482725,
20
+ "grad_norm": 47.012596130371094,
21
  "learning_rate": 9.923277581709376e-05,
22
+ "loss": 73.8558,
23
  "step": 100
24
  },
25
  {
26
  "epoch": 0.011508142010472408,
27
+ "grad_norm": 37.02272033691406,
28
  "learning_rate": 9.884916372564063e-05,
29
+ "loss": 72.8816,
30
  "step": 150
31
  },
32
  {
33
  "epoch": 0.015344189347296545,
34
+ "grad_norm": 47.1681022644043,
35
  "learning_rate": 9.846555163418752e-05,
36
+ "loss": 72.3309,
37
  "step": 200
38
  },
39
  {
40
  "epoch": 0.019180236684120684,
41
+ "grad_norm": 61.863441467285156,
42
  "learning_rate": 9.80819395427344e-05,
43
+ "loss": 71.639,
44
  "step": 250
45
  },
46
  {
47
  "epoch": 0.023016284020944817,
48
+ "grad_norm": 65.1348648071289,
49
  "learning_rate": 9.769832745128127e-05,
50
+ "loss": 71.186,
51
  "step": 300
52
  },
53
  {
54
  "epoch": 0.026852331357768953,
55
+ "grad_norm": 54.1230583190918,
56
  "learning_rate": 9.731471535982815e-05,
57
+ "loss": 70.5527,
58
  "step": 350
59
  },
60
  {
61
  "epoch": 0.03068837869459309,
62
+ "grad_norm": 24.713777542114258,
63
  "learning_rate": 9.693110326837502e-05,
64
+ "loss": 70.4304,
65
  "step": 400
66
  },
67
  {
68
  "epoch": 0.03452442603141723,
69
+ "grad_norm": 33.9341926574707,
70
  "learning_rate": 9.65474911769219e-05,
71
+ "loss": 69.9943,
72
  "step": 450
73
  },
74
  {
75
  "epoch": 0.03836047336824137,
76
+ "grad_norm": 45.115230560302734,
77
  "learning_rate": 9.616387908546877e-05,
78
+ "loss": 69.615,
79
  "step": 500
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
80
  }
81
  ],
82
  "logging_steps": 50,
 
96
  "attributes": {}
97
  }
98
  },
99
+ "total_flos": 8.568157303923016e+17,
100
  "train_batch_size": 2,
101
  "trial_name": null,
102
  "trial_params": null
last-checkpoint/training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c1d0607b0ef32a7fb9356d7e9cea98c6b3f4ff9d058f42339926e1f0132b13a7
3
  size 5240
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:eb6051eee29c31ca0092abe3bc9f1d55068e299690664ab4b9484f44a7a3cebf
3
  size 5240