iko-01 commited on
Commit
06cd7c2
·
verified ·
1 Parent(s): 541ca36

رفع النموذج النهائي مع جميع الملفات للتجربة أو إعادة التدريب

Browse files
config.json ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "activation_function": "gelu_new",
3
+ "architectures": [
4
+ "GPT2LMHeadModel"
5
+ ],
6
+ "attn_pdrop": 0.1,
7
+ "bos_token_id": 50256,
8
+ "embd_pdrop": 0.1,
9
+ "eos_token_id": 50256,
10
+ "initializer_range": 0.02,
11
+ "layer_norm_epsilon": 1e-05,
12
+ "model_type": "gpt2",
13
+ "n_ctx": 1024,
14
+ "n_embd": 768,
15
+ "n_head": 12,
16
+ "n_inner": null,
17
+ "n_layer": 12,
18
+ "n_positions": 1024,
19
+ "reorder_and_upcast_attn": false,
20
+ "resid_pdrop": 0.1,
21
+ "scale_attn_by_inverse_layer_idx": false,
22
+ "scale_attn_weights": true,
23
+ "summary_activation": null,
24
+ "summary_first_dropout": 0.1,
25
+ "summary_proj_to_labels": true,
26
+ "summary_type": "cls_index",
27
+ "summary_use_proj": true,
28
+ "task_specific_params": {
29
+ "text-generation": {
30
+ "do_sample": true,
31
+ "max_length": 50
32
+ }
33
+ },
34
+ "torch_dtype": "float32",
35
+ "transformers_version": "4.55.4",
36
+ "use_cache": true,
37
+ "vocab_size": 50257
38
+ }
generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 50256,
4
+ "eos_token_id": 50256,
5
+ "transformers_version": "4.55.4"
6
+ }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cc24163d1da10722e4d32189bcc6841b66f5ff31be72546183f44688910d8b47
3
+ size 497774208
optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:504780adaffd2d82966db63011168a74086da9d8a13f359535001f43344dee77
3
+ size 995644811
rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d395676b079053e76bb59133efbe2911ced49beb9e6af9be31f21b4748ee43e3
3
+ size 14645
scaler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b4ffbf4c791f22edd02ea9239c34b1149287b54bdbe73135a430fe19e449a59a
3
+ size 1383
scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:97f899d6b97a12f3e6f0fb5e17def131f65b0fcc96779976cbe14d7e07b95ffa
3
+ size 1465
special_tokens_map.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<|endoftext|>",
3
+ "eos_token": "<|endoftext|>",
4
+ "pad_token": "<|endoftext|>",
5
+ "unk_token": "<|endoftext|>"
6
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "50256": {
5
+ "content": "<|endoftext|>",
6
+ "lstrip": false,
7
+ "normalized": true,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ }
12
+ },
13
+ "bos_token": "<|endoftext|>",
14
+ "clean_up_tokenization_spaces": false,
15
+ "eos_token": "<|endoftext|>",
16
+ "extra_special_tokens": {},
17
+ "model_max_length": 1024,
18
+ "pad_token": "<|endoftext|>",
19
+ "tokenizer_class": "GPT2Tokenizer",
20
+ "unk_token": "<|endoftext|>"
21
+ }
trainer_state.json ADDED
@@ -0,0 +1,292 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_global_step": null,
3
+ "best_metric": null,
4
+ "best_model_checkpoint": null,
5
+ "epoch": 0.5549774540409296,
6
+ "eval_steps": 500,
7
+ "global_step": 3000,
8
+ "is_hyper_param_search": false,
9
+ "is_local_process_zero": true,
10
+ "is_world_process_zero": true,
11
+ "log_history": [
12
+ {
13
+ "epoch": 0.018499248468030985,
14
+ "grad_norm": 2.7567262649536133,
15
+ "learning_rate": 9.9e-06,
16
+ "loss": 2.3993,
17
+ "step": 100
18
+ },
19
+ {
20
+ "epoch": 0.03699849693606197,
21
+ "grad_norm": 2.469092845916748,
22
+ "learning_rate": 1.9900000000000003e-05,
23
+ "loss": 1.8863,
24
+ "step": 200
25
+ },
26
+ {
27
+ "epoch": 0.05549774540409296,
28
+ "grad_norm": 2.287935256958008,
29
+ "learning_rate": 1.987638906230491e-05,
30
+ "loss": 1.6964,
31
+ "step": 300
32
+ },
33
+ {
34
+ "epoch": 0.07399699387212394,
35
+ "grad_norm": 2.158879518508911,
36
+ "learning_rate": 1.9751529529279564e-05,
37
+ "loss": 1.6172,
38
+ "step": 400
39
+ },
40
+ {
41
+ "epoch": 0.09249624234015492,
42
+ "grad_norm": 2.3301937580108643,
43
+ "learning_rate": 1.9626669996254215e-05,
44
+ "loss": 1.5604,
45
+ "step": 500
46
+ },
47
+ {
48
+ "epoch": 0.09249624234015492,
49
+ "eval_loss": 1.5311822891235352,
50
+ "eval_runtime": 836.5935,
51
+ "eval_samples_per_second": 22.154,
52
+ "eval_steps_per_second": 11.077,
53
+ "step": 500
54
+ },
55
+ {
56
+ "epoch": 0.11099549080818592,
57
+ "grad_norm": 1.958460807800293,
58
+ "learning_rate": 1.950181046322887e-05,
59
+ "loss": 1.5221,
60
+ "step": 600
61
+ },
62
+ {
63
+ "epoch": 0.1294947392762169,
64
+ "grad_norm": 2.088949680328369,
65
+ "learning_rate": 1.9376950930203523e-05,
66
+ "loss": 1.5003,
67
+ "step": 700
68
+ },
69
+ {
70
+ "epoch": 0.14799398774424788,
71
+ "grad_norm": 2.262972593307495,
72
+ "learning_rate": 1.9252091397178178e-05,
73
+ "loss": 1.4501,
74
+ "step": 800
75
+ },
76
+ {
77
+ "epoch": 0.16649323621227888,
78
+ "grad_norm": 1.8716174364089966,
79
+ "learning_rate": 1.912723186415283e-05,
80
+ "loss": 1.4359,
81
+ "step": 900
82
+ },
83
+ {
84
+ "epoch": 0.18499248468030985,
85
+ "grad_norm": 1.6942836046218872,
86
+ "learning_rate": 1.9002372331127483e-05,
87
+ "loss": 1.4317,
88
+ "step": 1000
89
+ },
90
+ {
91
+ "epoch": 0.18499248468030985,
92
+ "eval_loss": 1.4133305549621582,
93
+ "eval_runtime": 838.9381,
94
+ "eval_samples_per_second": 22.092,
95
+ "eval_steps_per_second": 11.046,
96
+ "step": 1000
97
+ },
98
+ {
99
+ "epoch": 0.20349173314834085,
100
+ "grad_norm": 2.1511948108673096,
101
+ "learning_rate": 1.8877512798102137e-05,
102
+ "loss": 1.4053,
103
+ "step": 1100
104
+ },
105
+ {
106
+ "epoch": 0.22199098161637185,
107
+ "grad_norm": 1.7339736223220825,
108
+ "learning_rate": 1.875265326507679e-05,
109
+ "loss": 1.3915,
110
+ "step": 1200
111
+ },
112
+ {
113
+ "epoch": 0.24049023008440282,
114
+ "grad_norm": 1.9543105363845825,
115
+ "learning_rate": 1.8627793732051446e-05,
116
+ "loss": 1.3825,
117
+ "step": 1300
118
+ },
119
+ {
120
+ "epoch": 0.2589894785524338,
121
+ "grad_norm": 1.7334178686141968,
122
+ "learning_rate": 1.8502934199026097e-05,
123
+ "loss": 1.3708,
124
+ "step": 1400
125
+ },
126
+ {
127
+ "epoch": 0.2774887270204648,
128
+ "grad_norm": 1.5893480777740479,
129
+ "learning_rate": 1.837807466600075e-05,
130
+ "loss": 1.3594,
131
+ "step": 1500
132
+ },
133
+ {
134
+ "epoch": 0.2774887270204648,
135
+ "eval_loss": 1.3516819477081299,
136
+ "eval_runtime": 838.2447,
137
+ "eval_samples_per_second": 22.11,
138
+ "eval_steps_per_second": 11.055,
139
+ "step": 1500
140
+ },
141
+ {
142
+ "epoch": 0.29598797548849576,
143
+ "grad_norm": 1.9415546655654907,
144
+ "learning_rate": 1.8253215132975405e-05,
145
+ "loss": 1.3602,
146
+ "step": 1600
147
+ },
148
+ {
149
+ "epoch": 0.3144872239565268,
150
+ "grad_norm": 1.6256144046783447,
151
+ "learning_rate": 1.812835559995006e-05,
152
+ "loss": 1.3297,
153
+ "step": 1700
154
+ },
155
+ {
156
+ "epoch": 0.33298647242455776,
157
+ "grad_norm": 1.6166146993637085,
158
+ "learning_rate": 1.800349606692471e-05,
159
+ "loss": 1.3273,
160
+ "step": 1800
161
+ },
162
+ {
163
+ "epoch": 0.3514857208925887,
164
+ "grad_norm": 1.7110105752944946,
165
+ "learning_rate": 1.7878636533899365e-05,
166
+ "loss": 1.2984,
167
+ "step": 1900
168
+ },
169
+ {
170
+ "epoch": 0.3699849693606197,
171
+ "grad_norm": 1.4126442670822144,
172
+ "learning_rate": 1.7753777000874016e-05,
173
+ "loss": 1.3113,
174
+ "step": 2000
175
+ },
176
+ {
177
+ "epoch": 0.3699849693606197,
178
+ "eval_loss": 1.3114128112792969,
179
+ "eval_runtime": 836.695,
180
+ "eval_samples_per_second": 22.151,
181
+ "eval_steps_per_second": 11.076,
182
+ "step": 2000
183
+ },
184
+ {
185
+ "epoch": 0.3884842178286507,
186
+ "grad_norm": 1.3738536834716797,
187
+ "learning_rate": 1.7628917467848673e-05,
188
+ "loss": 1.2932,
189
+ "step": 2100
190
+ },
191
+ {
192
+ "epoch": 0.4069834662966817,
193
+ "grad_norm": 1.5857338905334473,
194
+ "learning_rate": 1.7504057934823324e-05,
195
+ "loss": 1.2871,
196
+ "step": 2200
197
+ },
198
+ {
199
+ "epoch": 0.42548271476471267,
200
+ "grad_norm": 1.4277790784835815,
201
+ "learning_rate": 1.737919840179798e-05,
202
+ "loss": 1.2887,
203
+ "step": 2300
204
+ },
205
+ {
206
+ "epoch": 0.4439819632327437,
207
+ "grad_norm": 1.380540132522583,
208
+ "learning_rate": 1.7254338868772633e-05,
209
+ "loss": 1.2879,
210
+ "step": 2400
211
+ },
212
+ {
213
+ "epoch": 0.46248121170077466,
214
+ "grad_norm": 1.55470871925354,
215
+ "learning_rate": 1.7129479335747284e-05,
216
+ "loss": 1.2914,
217
+ "step": 2500
218
+ },
219
+ {
220
+ "epoch": 0.46248121170077466,
221
+ "eval_loss": 1.2786827087402344,
222
+ "eval_runtime": 829.3841,
223
+ "eval_samples_per_second": 22.347,
224
+ "eval_steps_per_second": 11.173,
225
+ "step": 2500
226
+ },
227
+ {
228
+ "epoch": 0.48098046016880563,
229
+ "grad_norm": 1.2825775146484375,
230
+ "learning_rate": 1.700461980272194e-05,
231
+ "loss": 1.2576,
232
+ "step": 2600
233
+ },
234
+ {
235
+ "epoch": 0.4994797086368366,
236
+ "grad_norm": 1.512398600578308,
237
+ "learning_rate": 1.6879760269696593e-05,
238
+ "loss": 1.2507,
239
+ "step": 2700
240
+ },
241
+ {
242
+ "epoch": 0.5179789571048676,
243
+ "grad_norm": 1.2684593200683594,
244
+ "learning_rate": 1.6754900736671247e-05,
245
+ "loss": 1.2489,
246
+ "step": 2800
247
+ },
248
+ {
249
+ "epoch": 0.5364782055728986,
250
+ "grad_norm": 1.6501576900482178,
251
+ "learning_rate": 1.6630041203645898e-05,
252
+ "loss": 1.2484,
253
+ "step": 2900
254
+ },
255
+ {
256
+ "epoch": 0.5549774540409296,
257
+ "grad_norm": 1.7460854053497314,
258
+ "learning_rate": 1.6505181670620552e-05,
259
+ "loss": 1.2344,
260
+ "step": 3000
261
+ },
262
+ {
263
+ "epoch": 0.5549774540409296,
264
+ "eval_loss": 1.2557722330093384,
265
+ "eval_runtime": 829.0461,
266
+ "eval_samples_per_second": 22.356,
267
+ "eval_steps_per_second": 11.178,
268
+ "step": 3000
269
+ }
270
+ ],
271
+ "logging_steps": 100,
272
+ "max_steps": 16218,
273
+ "num_input_tokens_seen": 0,
274
+ "num_train_epochs": 3,
275
+ "save_steps": 1000,
276
+ "stateful_callbacks": {
277
+ "TrainerControl": {
278
+ "args": {
279
+ "should_epoch_stop": false,
280
+ "should_evaluate": false,
281
+ "should_log": false,
282
+ "should_save": true,
283
+ "should_training_stop": false
284
+ },
285
+ "attributes": {}
286
+ }
287
+ },
288
+ "total_flos": 2.5139428982784e+16,
289
+ "train_batch_size": 2,
290
+ "trial_name": null,
291
+ "trial_params": null
292
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e159870768706f7cd7856a6590cfc28fc38cce2a89f930120df5926c8a809cac
3
+ size 5713
vocab.json ADDED
The diff for this file is too large to render. See raw diff