khaikheolatui commited on
Commit
5c3081d
·
verified ·
1 Parent(s): 2d2b9b8

Upload folder using huggingface_hub

Browse files
checkpoint-5640/config.json ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "google/vit-base-patch16-224-in21k",
3
+ "architectures": [
4
+ "ViTForImageClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.0,
7
+ "encoder_stride": 16,
8
+ "hidden_act": "gelu",
9
+ "hidden_dropout_prob": 0.0,
10
+ "hidden_size": 768,
11
+ "id2label": {
12
+ "0": "AI-Generated Images",
13
+ "1": "Real Images"
14
+ },
15
+ "image_size": 224,
16
+ "initializer_range": 0.02,
17
+ "intermediate_size": 3072,
18
+ "label2id": {
19
+ "AI-Generated Images": 0,
20
+ "Real Images": 1
21
+ },
22
+ "layer_norm_eps": 1e-12,
23
+ "model_type": "vit",
24
+ "num_attention_heads": 12,
25
+ "num_channels": 3,
26
+ "num_hidden_layers": 12,
27
+ "patch_size": 16,
28
+ "problem_type": "single_label_classification",
29
+ "qkv_bias": true,
30
+ "torch_dtype": "float32",
31
+ "transformers_version": "4.46.3"
32
+ }
checkpoint-5640/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c8b7237b60616bf439295c0ab154847a7bacf8878d0a6510b87aa6036bad8b37
3
+ size 343223968
checkpoint-5640/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d53b6485a23fa058e1e39ab02f89ef1fa8074379060ca12fed60d8d861e40f8c
3
+ size 686568890
checkpoint-5640/preprocessor_config.json ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "do_normalize": true,
3
+ "do_rescale": true,
4
+ "do_resize": true,
5
+ "image_mean": [
6
+ 0.5,
7
+ 0.5,
8
+ 0.5
9
+ ],
10
+ "image_processor_type": "ViTImageProcessor",
11
+ "image_std": [
12
+ 0.5,
13
+ 0.5,
14
+ 0.5
15
+ ],
16
+ "resample": 2,
17
+ "rescale_factor": 0.00392156862745098,
18
+ "size": {
19
+ "height": 224,
20
+ "width": 224
21
+ }
22
+ }
checkpoint-5640/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8199a5eafcbaf3d2c542539947ec9d31a260e7bea63cc11fd637ed4208f518da
3
+ size 14244
checkpoint-5640/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:21270894a258c70a05d5683e8af3e2e5d4eb10f39c9d620b8cb4db90ad79b66d
3
+ size 1064
checkpoint-5640/trainer_state.json ADDED
@@ -0,0 +1,410 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.024019582197070122,
3
+ "best_model_checkpoint": "human_faces_ai_vs_real_image_detection/checkpoint-5640",
4
+ "epoch": 30.0,
5
+ "eval_steps": 500,
6
+ "global_step": 5640,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 1.0,
13
+ "eval_accuracy": 0.8005,
14
+ "eval_loss": 0.6034463047981262,
15
+ "eval_model_preparation_time": 0.0037,
16
+ "eval_runtime": 36.8165,
17
+ "eval_samples_per_second": 108.647,
18
+ "eval_steps_per_second": 13.581,
19
+ "step": 188
20
+ },
21
+ {
22
+ "epoch": 2.0,
23
+ "eval_accuracy": 0.945,
24
+ "eval_loss": 0.45400869846343994,
25
+ "eval_model_preparation_time": 0.0037,
26
+ "eval_runtime": 36.5101,
27
+ "eval_samples_per_second": 109.559,
28
+ "eval_steps_per_second": 13.695,
29
+ "step": 376
30
+ },
31
+ {
32
+ "epoch": 2.6595744680851063,
33
+ "grad_norm": 1.0709679126739502,
34
+ "learning_rate": 6.43649373881932e-07,
35
+ "loss": 0.4854,
36
+ "step": 500
37
+ },
38
+ {
39
+ "epoch": 3.0,
40
+ "eval_accuracy": 0.99025,
41
+ "eval_loss": 0.3362714946269989,
42
+ "eval_model_preparation_time": 0.0037,
43
+ "eval_runtime": 36.1828,
44
+ "eval_samples_per_second": 110.55,
45
+ "eval_steps_per_second": 13.819,
46
+ "step": 564
47
+ },
48
+ {
49
+ "epoch": 4.0,
50
+ "eval_accuracy": 0.9975,
51
+ "eval_loss": 0.2511403262615204,
52
+ "eval_model_preparation_time": 0.0037,
53
+ "eval_runtime": 36.1289,
54
+ "eval_samples_per_second": 110.715,
55
+ "eval_steps_per_second": 13.839,
56
+ "step": 752
57
+ },
58
+ {
59
+ "epoch": 5.0,
60
+ "eval_accuracy": 0.99975,
61
+ "eval_loss": 0.1973569691181183,
62
+ "eval_model_preparation_time": 0.0037,
63
+ "eval_runtime": 35.3327,
64
+ "eval_samples_per_second": 113.21,
65
+ "eval_steps_per_second": 14.151,
66
+ "step": 940
67
+ },
68
+ {
69
+ "epoch": 5.319148936170213,
70
+ "grad_norm": 0.4672161638736725,
71
+ "learning_rate": 5.810375670840786e-07,
72
+ "loss": 0.1274,
73
+ "step": 1000
74
+ },
75
+ {
76
+ "epoch": 6.0,
77
+ "eval_accuracy": 1.0,
78
+ "eval_loss": 0.1595696210861206,
79
+ "eval_model_preparation_time": 0.0037,
80
+ "eval_runtime": 35.311,
81
+ "eval_samples_per_second": 113.279,
82
+ "eval_steps_per_second": 14.16,
83
+ "step": 1128
84
+ },
85
+ {
86
+ "epoch": 7.0,
87
+ "eval_accuracy": 1.0,
88
+ "eval_loss": 0.13066644966602325,
89
+ "eval_model_preparation_time": 0.0037,
90
+ "eval_runtime": 35.4331,
91
+ "eval_samples_per_second": 112.889,
92
+ "eval_steps_per_second": 14.111,
93
+ "step": 1316
94
+ },
95
+ {
96
+ "epoch": 7.9787234042553195,
97
+ "grad_norm": 0.3177025318145752,
98
+ "learning_rate": 5.184257602862254e-07,
99
+ "loss": 0.0553,
100
+ "step": 1500
101
+ },
102
+ {
103
+ "epoch": 8.0,
104
+ "eval_accuracy": 1.0,
105
+ "eval_loss": 0.11046025156974792,
106
+ "eval_model_preparation_time": 0.0037,
107
+ "eval_runtime": 35.3727,
108
+ "eval_samples_per_second": 113.081,
109
+ "eval_steps_per_second": 14.135,
110
+ "step": 1504
111
+ },
112
+ {
113
+ "epoch": 9.0,
114
+ "eval_accuracy": 1.0,
115
+ "eval_loss": 0.09384504705667496,
116
+ "eval_model_preparation_time": 0.0037,
117
+ "eval_runtime": 35.5517,
118
+ "eval_samples_per_second": 112.512,
119
+ "eval_steps_per_second": 14.064,
120
+ "step": 1692
121
+ },
122
+ {
123
+ "epoch": 10.0,
124
+ "eval_accuracy": 1.0,
125
+ "eval_loss": 0.08109256625175476,
126
+ "eval_model_preparation_time": 0.0037,
127
+ "eval_runtime": 36.4656,
128
+ "eval_samples_per_second": 109.692,
129
+ "eval_steps_per_second": 13.712,
130
+ "step": 1880
131
+ },
132
+ {
133
+ "epoch": 10.638297872340425,
134
+ "grad_norm": 0.21588321030139923,
135
+ "learning_rate": 4.558139534883721e-07,
136
+ "loss": 0.0356,
137
+ "step": 2000
138
+ },
139
+ {
140
+ "epoch": 11.0,
141
+ "eval_accuracy": 1.0,
142
+ "eval_loss": 0.07062225043773651,
143
+ "eval_model_preparation_time": 0.0037,
144
+ "eval_runtime": 37.594,
145
+ "eval_samples_per_second": 106.4,
146
+ "eval_steps_per_second": 13.3,
147
+ "step": 2068
148
+ },
149
+ {
150
+ "epoch": 12.0,
151
+ "eval_accuracy": 1.0,
152
+ "eval_loss": 0.062023114413022995,
153
+ "eval_model_preparation_time": 0.0037,
154
+ "eval_runtime": 36.3841,
155
+ "eval_samples_per_second": 109.938,
156
+ "eval_steps_per_second": 13.742,
157
+ "step": 2256
158
+ },
159
+ {
160
+ "epoch": 13.0,
161
+ "eval_accuracy": 1.0,
162
+ "eval_loss": 0.05518809333443642,
163
+ "eval_model_preparation_time": 0.0037,
164
+ "eval_runtime": 36.86,
165
+ "eval_samples_per_second": 108.519,
166
+ "eval_steps_per_second": 13.565,
167
+ "step": 2444
168
+ },
169
+ {
170
+ "epoch": 13.297872340425531,
171
+ "grad_norm": 0.19038738310337067,
172
+ "learning_rate": 3.932021466905188e-07,
173
+ "loss": 0.0269,
174
+ "step": 2500
175
+ },
176
+ {
177
+ "epoch": 14.0,
178
+ "eval_accuracy": 1.0,
179
+ "eval_loss": 0.04978133738040924,
180
+ "eval_model_preparation_time": 0.0037,
181
+ "eval_runtime": 36.5409,
182
+ "eval_samples_per_second": 109.466,
183
+ "eval_steps_per_second": 13.683,
184
+ "step": 2632
185
+ },
186
+ {
187
+ "epoch": 15.0,
188
+ "eval_accuracy": 1.0,
189
+ "eval_loss": 0.04512861743569374,
190
+ "eval_model_preparation_time": 0.0037,
191
+ "eval_runtime": 35.8932,
192
+ "eval_samples_per_second": 111.442,
193
+ "eval_steps_per_second": 13.93,
194
+ "step": 2820
195
+ },
196
+ {
197
+ "epoch": 15.957446808510639,
198
+ "grad_norm": 0.1552741378545761,
199
+ "learning_rate": 3.3059033989266543e-07,
200
+ "loss": 0.022,
201
+ "step": 3000
202
+ },
203
+ {
204
+ "epoch": 16.0,
205
+ "eval_accuracy": 1.0,
206
+ "eval_loss": 0.04158434644341469,
207
+ "eval_model_preparation_time": 0.0037,
208
+ "eval_runtime": 35.906,
209
+ "eval_samples_per_second": 111.402,
210
+ "eval_steps_per_second": 13.925,
211
+ "step": 3008
212
+ },
213
+ {
214
+ "epoch": 17.0,
215
+ "eval_accuracy": 1.0,
216
+ "eval_loss": 0.03861114755272865,
217
+ "eval_model_preparation_time": 0.0037,
218
+ "eval_runtime": 35.3432,
219
+ "eval_samples_per_second": 113.176,
220
+ "eval_steps_per_second": 14.147,
221
+ "step": 3196
222
+ },
223
+ {
224
+ "epoch": 18.0,
225
+ "eval_accuracy": 1.0,
226
+ "eval_loss": 0.03599005192518234,
227
+ "eval_model_preparation_time": 0.0037,
228
+ "eval_runtime": 35.0022,
229
+ "eval_samples_per_second": 114.278,
230
+ "eval_steps_per_second": 14.285,
231
+ "step": 3384
232
+ },
233
+ {
234
+ "epoch": 18.617021276595743,
235
+ "grad_norm": 0.13470065593719482,
236
+ "learning_rate": 2.679785330948122e-07,
237
+ "loss": 0.0189,
238
+ "step": 3500
239
+ },
240
+ {
241
+ "epoch": 19.0,
242
+ "eval_accuracy": 1.0,
243
+ "eval_loss": 0.03385862708091736,
244
+ "eval_model_preparation_time": 0.0037,
245
+ "eval_runtime": 34.9618,
246
+ "eval_samples_per_second": 114.41,
247
+ "eval_steps_per_second": 14.301,
248
+ "step": 3572
249
+ },
250
+ {
251
+ "epoch": 20.0,
252
+ "eval_accuracy": 1.0,
253
+ "eval_loss": 0.03184980899095535,
254
+ "eval_model_preparation_time": 0.0037,
255
+ "eval_runtime": 35.6458,
256
+ "eval_samples_per_second": 112.215,
257
+ "eval_steps_per_second": 14.027,
258
+ "step": 3760
259
+ },
260
+ {
261
+ "epoch": 21.0,
262
+ "eval_accuracy": 1.0,
263
+ "eval_loss": 0.03002801164984703,
264
+ "eval_model_preparation_time": 0.0037,
265
+ "eval_runtime": 35.4539,
266
+ "eval_samples_per_second": 112.822,
267
+ "eval_steps_per_second": 14.103,
268
+ "step": 3948
269
+ },
270
+ {
271
+ "epoch": 21.27659574468085,
272
+ "grad_norm": 0.12372926622629166,
273
+ "learning_rate": 2.0536672629695882e-07,
274
+ "loss": 0.017,
275
+ "step": 4000
276
+ },
277
+ {
278
+ "epoch": 22.0,
279
+ "eval_accuracy": 1.0,
280
+ "eval_loss": 0.028735142201185226,
281
+ "eval_model_preparation_time": 0.0037,
282
+ "eval_runtime": 36.0465,
283
+ "eval_samples_per_second": 110.968,
284
+ "eval_steps_per_second": 13.871,
285
+ "step": 4136
286
+ },
287
+ {
288
+ "epoch": 23.0,
289
+ "eval_accuracy": 1.0,
290
+ "eval_loss": 0.02766670472919941,
291
+ "eval_model_preparation_time": 0.0037,
292
+ "eval_runtime": 36.0255,
293
+ "eval_samples_per_second": 111.032,
294
+ "eval_steps_per_second": 13.879,
295
+ "step": 4324
296
+ },
297
+ {
298
+ "epoch": 23.93617021276596,
299
+ "grad_norm": 0.11596166342496872,
300
+ "learning_rate": 1.4275491949910554e-07,
301
+ "loss": 0.0156,
302
+ "step": 4500
303
+ },
304
+ {
305
+ "epoch": 24.0,
306
+ "eval_accuracy": 1.0,
307
+ "eval_loss": 0.026656346395611763,
308
+ "eval_model_preparation_time": 0.0037,
309
+ "eval_runtime": 36.1037,
310
+ "eval_samples_per_second": 110.792,
311
+ "eval_steps_per_second": 13.849,
312
+ "step": 4512
313
+ },
314
+ {
315
+ "epoch": 25.0,
316
+ "eval_accuracy": 1.0,
317
+ "eval_loss": 0.025852810591459274,
318
+ "eval_model_preparation_time": 0.0037,
319
+ "eval_runtime": 35.2898,
320
+ "eval_samples_per_second": 113.347,
321
+ "eval_steps_per_second": 14.168,
322
+ "step": 4700
323
+ },
324
+ {
325
+ "epoch": 26.0,
326
+ "eval_accuracy": 1.0,
327
+ "eval_loss": 0.025204475969076157,
328
+ "eval_model_preparation_time": 0.0037,
329
+ "eval_runtime": 35.3092,
330
+ "eval_samples_per_second": 113.285,
331
+ "eval_steps_per_second": 14.161,
332
+ "step": 4888
333
+ },
334
+ {
335
+ "epoch": 26.595744680851062,
336
+ "grad_norm": 0.11188771575689316,
337
+ "learning_rate": 8.014311270125224e-08,
338
+ "loss": 0.0147,
339
+ "step": 5000
340
+ },
341
+ {
342
+ "epoch": 27.0,
343
+ "eval_accuracy": 1.0,
344
+ "eval_loss": 0.024634206667542458,
345
+ "eval_model_preparation_time": 0.0037,
346
+ "eval_runtime": 35.3596,
347
+ "eval_samples_per_second": 113.124,
348
+ "eval_steps_per_second": 14.14,
349
+ "step": 5076
350
+ },
351
+ {
352
+ "epoch": 28.0,
353
+ "eval_accuracy": 1.0,
354
+ "eval_loss": 0.02432175725698471,
355
+ "eval_model_preparation_time": 0.0037,
356
+ "eval_runtime": 34.9485,
357
+ "eval_samples_per_second": 114.454,
358
+ "eval_steps_per_second": 14.307,
359
+ "step": 5264
360
+ },
361
+ {
362
+ "epoch": 29.0,
363
+ "eval_accuracy": 1.0,
364
+ "eval_loss": 0.024080481380224228,
365
+ "eval_model_preparation_time": 0.0037,
366
+ "eval_runtime": 35.3389,
367
+ "eval_samples_per_second": 113.19,
368
+ "eval_steps_per_second": 14.149,
369
+ "step": 5452
370
+ },
371
+ {
372
+ "epoch": 29.25531914893617,
373
+ "grad_norm": 0.10677912831306458,
374
+ "learning_rate": 1.7531305903398928e-08,
375
+ "loss": 0.0142,
376
+ "step": 5500
377
+ },
378
+ {
379
+ "epoch": 30.0,
380
+ "eval_accuracy": 1.0,
381
+ "eval_loss": 0.024019582197070122,
382
+ "eval_model_preparation_time": 0.0037,
383
+ "eval_runtime": 35.8919,
384
+ "eval_samples_per_second": 111.446,
385
+ "eval_steps_per_second": 13.931,
386
+ "step": 5640
387
+ }
388
+ ],
389
+ "logging_steps": 500,
390
+ "max_steps": 5640,
391
+ "num_input_tokens_seen": 0,
392
+ "num_train_epochs": 30,
393
+ "save_steps": 500,
394
+ "stateful_callbacks": {
395
+ "TrainerControl": {
396
+ "args": {
397
+ "should_epoch_stop": false,
398
+ "should_evaluate": false,
399
+ "should_log": false,
400
+ "should_save": true,
401
+ "should_training_stop": true
402
+ },
403
+ "attributes": {}
404
+ }
405
+ },
406
+ "total_flos": 1.394855813062656e+19,
407
+ "train_batch_size": 32,
408
+ "trial_name": null,
409
+ "trial_params": null
410
+ }
checkpoint-5640/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f7f97ee9a8942bf76f518cea737119f684d6f51d699a43a24025810154de7970
3
+ size 5240
config.json ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "google/vit-base-patch16-224-in21k",
3
+ "architectures": [
4
+ "ViTForImageClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.0,
7
+ "encoder_stride": 16,
8
+ "hidden_act": "gelu",
9
+ "hidden_dropout_prob": 0.0,
10
+ "hidden_size": 768,
11
+ "id2label": {
12
+ "0": "AI-Generated Images",
13
+ "1": "Real Images"
14
+ },
15
+ "image_size": 224,
16
+ "initializer_range": 0.02,
17
+ "intermediate_size": 3072,
18
+ "label2id": {
19
+ "AI-Generated Images": 0,
20
+ "Real Images": 1
21
+ },
22
+ "layer_norm_eps": 1e-12,
23
+ "model_type": "vit",
24
+ "num_attention_heads": 12,
25
+ "num_channels": 3,
26
+ "num_hidden_layers": 12,
27
+ "patch_size": 16,
28
+ "problem_type": "single_label_classification",
29
+ "qkv_bias": true,
30
+ "torch_dtype": "float32",
31
+ "transformers_version": "4.46.3"
32
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c8b7237b60616bf439295c0ab154847a7bacf8878d0a6510b87aa6036bad8b37
3
+ size 343223968
preprocessor_config.json ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "do_normalize": true,
3
+ "do_rescale": true,
4
+ "do_resize": true,
5
+ "image_mean": [
6
+ 0.5,
7
+ 0.5,
8
+ 0.5
9
+ ],
10
+ "image_processor_type": "ViTImageProcessor",
11
+ "image_std": [
12
+ 0.5,
13
+ 0.5,
14
+ 0.5
15
+ ],
16
+ "resample": 2,
17
+ "rescale_factor": 0.00392156862745098,
18
+ "size": {
19
+ "height": 224,
20
+ "width": 224
21
+ }
22
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f7f97ee9a8942bf76f518cea737119f684d6f51d699a43a24025810154de7970
3
+ size 5240