iko-01 commited on
Commit
e10bed1
·
verified ·
1 Parent(s): 21f8bb3

رفع النموذج النهائي مع جميع الملفات للتجربة أو إعادة التدريب

Browse files
config.json ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "activation_function": "gelu_new",
3
+ "architectures": [
4
+ "GPT2LMHeadModel"
5
+ ],
6
+ "attn_pdrop": 0.1,
7
+ "bos_token_id": 50256,
8
+ "embd_pdrop": 0.1,
9
+ "eos_token_id": 50256,
10
+ "initializer_range": 0.02,
11
+ "layer_norm_epsilon": 1e-05,
12
+ "model_type": "gpt2",
13
+ "n_ctx": 1024,
14
+ "n_embd": 768,
15
+ "n_head": 12,
16
+ "n_inner": null,
17
+ "n_layer": 12,
18
+ "n_positions": 1024,
19
+ "reorder_and_upcast_attn": false,
20
+ "resid_pdrop": 0.1,
21
+ "scale_attn_by_inverse_layer_idx": false,
22
+ "scale_attn_weights": true,
23
+ "summary_activation": null,
24
+ "summary_first_dropout": 0.1,
25
+ "summary_proj_to_labels": true,
26
+ "summary_type": "cls_index",
27
+ "summary_use_proj": true,
28
+ "task_specific_params": {
29
+ "text-generation": {
30
+ "do_sample": true,
31
+ "max_length": 50
32
+ }
33
+ },
34
+ "torch_dtype": "float32",
35
+ "transformers_version": "4.55.4",
36
+ "use_cache": true,
37
+ "vocab_size": 50257
38
+ }
generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 50256,
4
+ "eos_token_id": 50256,
5
+ "transformers_version": "4.55.4"
6
+ }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:45be989d3511f4d94fad2552286260011ad42f09985f600f654c42455c17cf06
3
+ size 497774208
optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8d68a7966fa57efce40bba244718f5b105a1c0b270e535c5a1a9df4479a91188
3
+ size 995644811
rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8327635f702c9652852e8a4ff3ae273f89cd085970f61e237f78662d199e34dd
3
+ size 14645
scaler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:52ccff13654eb643cada7d9b3c064203a1528bc3b6806d14ae61b19deceb9580
3
+ size 1383
scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bb6341bc67914d50adde2854d0ef31de777e24a2c75bcad6c093b0d2b4db7708
3
+ size 1465
special_tokens_map.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<|endoftext|>",
3
+ "eos_token": "<|endoftext|>",
4
+ "pad_token": "<|endoftext|>",
5
+ "unk_token": "<|endoftext|>"
6
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "50256": {
5
+ "content": "<|endoftext|>",
6
+ "lstrip": false,
7
+ "normalized": true,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ }
12
+ },
13
+ "bos_token": "<|endoftext|>",
14
+ "clean_up_tokenization_spaces": false,
15
+ "eos_token": "<|endoftext|>",
16
+ "extra_special_tokens": {},
17
+ "model_max_length": 1024,
18
+ "pad_token": "<|endoftext|>",
19
+ "tokenizer_class": "GPT2Tokenizer",
20
+ "unk_token": "<|endoftext|>"
21
+ }
trainer_state.json ADDED
@@ -0,0 +1,206 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_global_step": null,
3
+ "best_metric": null,
4
+ "best_model_checkpoint": null,
5
+ "epoch": 0.3699849693606197,
6
+ "eval_steps": 500,
7
+ "global_step": 2000,
8
+ "is_hyper_param_search": false,
9
+ "is_local_process_zero": true,
10
+ "is_world_process_zero": true,
11
+ "log_history": [
12
+ {
13
+ "epoch": 0.018499248468030985,
14
+ "grad_norm": 2.7567262649536133,
15
+ "learning_rate": 9.9e-06,
16
+ "loss": 2.3993,
17
+ "step": 100
18
+ },
19
+ {
20
+ "epoch": 0.03699849693606197,
21
+ "grad_norm": 2.469092845916748,
22
+ "learning_rate": 1.9900000000000003e-05,
23
+ "loss": 1.8863,
24
+ "step": 200
25
+ },
26
+ {
27
+ "epoch": 0.05549774540409296,
28
+ "grad_norm": 2.287935256958008,
29
+ "learning_rate": 1.987638906230491e-05,
30
+ "loss": 1.6964,
31
+ "step": 300
32
+ },
33
+ {
34
+ "epoch": 0.07399699387212394,
35
+ "grad_norm": 2.158879518508911,
36
+ "learning_rate": 1.9751529529279564e-05,
37
+ "loss": 1.6172,
38
+ "step": 400
39
+ },
40
+ {
41
+ "epoch": 0.09249624234015492,
42
+ "grad_norm": 2.3301937580108643,
43
+ "learning_rate": 1.9626669996254215e-05,
44
+ "loss": 1.5604,
45
+ "step": 500
46
+ },
47
+ {
48
+ "epoch": 0.09249624234015492,
49
+ "eval_loss": 1.5311822891235352,
50
+ "eval_runtime": 836.5935,
51
+ "eval_samples_per_second": 22.154,
52
+ "eval_steps_per_second": 11.077,
53
+ "step": 500
54
+ },
55
+ {
56
+ "epoch": 0.11099549080818592,
57
+ "grad_norm": 1.958460807800293,
58
+ "learning_rate": 1.950181046322887e-05,
59
+ "loss": 1.5221,
60
+ "step": 600
61
+ },
62
+ {
63
+ "epoch": 0.1294947392762169,
64
+ "grad_norm": 2.088949680328369,
65
+ "learning_rate": 1.9376950930203523e-05,
66
+ "loss": 1.5003,
67
+ "step": 700
68
+ },
69
+ {
70
+ "epoch": 0.14799398774424788,
71
+ "grad_norm": 2.262972593307495,
72
+ "learning_rate": 1.9252091397178178e-05,
73
+ "loss": 1.4501,
74
+ "step": 800
75
+ },
76
+ {
77
+ "epoch": 0.16649323621227888,
78
+ "grad_norm": 1.8716174364089966,
79
+ "learning_rate": 1.912723186415283e-05,
80
+ "loss": 1.4359,
81
+ "step": 900
82
+ },
83
+ {
84
+ "epoch": 0.18499248468030985,
85
+ "grad_norm": 1.6942836046218872,
86
+ "learning_rate": 1.9002372331127483e-05,
87
+ "loss": 1.4317,
88
+ "step": 1000
89
+ },
90
+ {
91
+ "epoch": 0.18499248468030985,
92
+ "eval_loss": 1.4133305549621582,
93
+ "eval_runtime": 838.9381,
94
+ "eval_samples_per_second": 22.092,
95
+ "eval_steps_per_second": 11.046,
96
+ "step": 1000
97
+ },
98
+ {
99
+ "epoch": 0.20349173314834085,
100
+ "grad_norm": 2.1511948108673096,
101
+ "learning_rate": 1.8877512798102137e-05,
102
+ "loss": 1.4053,
103
+ "step": 1100
104
+ },
105
+ {
106
+ "epoch": 0.22199098161637185,
107
+ "grad_norm": 1.7339736223220825,
108
+ "learning_rate": 1.875265326507679e-05,
109
+ "loss": 1.3915,
110
+ "step": 1200
111
+ },
112
+ {
113
+ "epoch": 0.24049023008440282,
114
+ "grad_norm": 1.9543105363845825,
115
+ "learning_rate": 1.8627793732051446e-05,
116
+ "loss": 1.3825,
117
+ "step": 1300
118
+ },
119
+ {
120
+ "epoch": 0.2589894785524338,
121
+ "grad_norm": 1.7334178686141968,
122
+ "learning_rate": 1.8502934199026097e-05,
123
+ "loss": 1.3708,
124
+ "step": 1400
125
+ },
126
+ {
127
+ "epoch": 0.2774887270204648,
128
+ "grad_norm": 1.5893480777740479,
129
+ "learning_rate": 1.837807466600075e-05,
130
+ "loss": 1.3594,
131
+ "step": 1500
132
+ },
133
+ {
134
+ "epoch": 0.2774887270204648,
135
+ "eval_loss": 1.3516819477081299,
136
+ "eval_runtime": 838.2447,
137
+ "eval_samples_per_second": 22.11,
138
+ "eval_steps_per_second": 11.055,
139
+ "step": 1500
140
+ },
141
+ {
142
+ "epoch": 0.29598797548849576,
143
+ "grad_norm": 1.9415546655654907,
144
+ "learning_rate": 1.8253215132975405e-05,
145
+ "loss": 1.3602,
146
+ "step": 1600
147
+ },
148
+ {
149
+ "epoch": 0.3144872239565268,
150
+ "grad_norm": 1.6256144046783447,
151
+ "learning_rate": 1.812835559995006e-05,
152
+ "loss": 1.3297,
153
+ "step": 1700
154
+ },
155
+ {
156
+ "epoch": 0.33298647242455776,
157
+ "grad_norm": 1.6166146993637085,
158
+ "learning_rate": 1.800349606692471e-05,
159
+ "loss": 1.3273,
160
+ "step": 1800
161
+ },
162
+ {
163
+ "epoch": 0.3514857208925887,
164
+ "grad_norm": 1.7110105752944946,
165
+ "learning_rate": 1.7878636533899365e-05,
166
+ "loss": 1.2984,
167
+ "step": 1900
168
+ },
169
+ {
170
+ "epoch": 0.3699849693606197,
171
+ "grad_norm": 1.4126442670822144,
172
+ "learning_rate": 1.7753777000874016e-05,
173
+ "loss": 1.3113,
174
+ "step": 2000
175
+ },
176
+ {
177
+ "epoch": 0.3699849693606197,
178
+ "eval_loss": 1.3114128112792969,
179
+ "eval_runtime": 836.695,
180
+ "eval_samples_per_second": 22.151,
181
+ "eval_steps_per_second": 11.076,
182
+ "step": 2000
183
+ }
184
+ ],
185
+ "logging_steps": 100,
186
+ "max_steps": 16218,
187
+ "num_input_tokens_seen": 0,
188
+ "num_train_epochs": 3,
189
+ "save_steps": 1000,
190
+ "stateful_callbacks": {
191
+ "TrainerControl": {
192
+ "args": {
193
+ "should_epoch_stop": false,
194
+ "should_evaluate": false,
195
+ "should_log": false,
196
+ "should_save": true,
197
+ "should_training_stop": false
198
+ },
199
+ "attributes": {}
200
+ }
201
+ },
202
+ "total_flos": 1.6722690048e+16,
203
+ "train_batch_size": 2,
204
+ "trial_name": null,
205
+ "trial_params": null
206
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4b589167ffa8a5961cb77283dec1a9d9b8eafe41c4a04742013a7a0d742c3efb
3
+ size 5713
vocab.json ADDED
The diff for this file is too large to render. See raw diff