TTNVXX commited on
Commit
72d0bf5
·
verified ·
1 Parent(s): 237d1be

Upload folder using huggingface_hub

Browse files
README.md ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ ---
3
+ tags:
4
+ - autotrain
5
+ - image-classification
6
+ widget:
7
+ - src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/tiger.jpg
8
+ example_title: Tiger
9
+ - src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/teapot.jpg
10
+ example_title: Teapot
11
+ - src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/palace.jpg
12
+ example_title: Palace
13
+ datasets:
14
+ - CartoonOrNotV2/autotrain-data
15
+ ---
16
+
17
+ # Model Trained Using AutoTrain
18
+
19
+ - Problem type: Image Classification
20
+
21
+ ## Validation Metricsg
22
+ loss: 0.15279646217823029
23
+
24
+ f1: 0.9732620320855614
25
+
26
+ precision: 0.9891304347826086
27
+
28
+ recall: 0.9578947368421052
29
+
30
+ auc: 0.9932718393922951
31
+
32
+ accuracy: 0.9739583333333334
checkpoint-288/config.json ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "umm-maybe/AI-image-detector",
3
+ "_num_labels": 2,
4
+ "architectures": [
5
+ "SwinForImageClassification"
6
+ ],
7
+ "attention_probs_dropout_prob": 0.0,
8
+ "depths": [
9
+ 2,
10
+ 2,
11
+ 18,
12
+ 2
13
+ ],
14
+ "drop_path_rate": 0.1,
15
+ "embed_dim": 128,
16
+ "encoder_stride": 32,
17
+ "hidden_act": "gelu",
18
+ "hidden_dropout_prob": 0.0,
19
+ "hidden_size": 1024,
20
+ "id2label": {
21
+ "0": "cartoon",
22
+ "1": "not_cartoon"
23
+ },
24
+ "image_size": 224,
25
+ "initializer_range": 0.02,
26
+ "label2id": {
27
+ "cartoon": 0,
28
+ "not_cartoon": 1
29
+ },
30
+ "layer_norm_eps": 1e-05,
31
+ "max_length": 128,
32
+ "mlp_ratio": 4.0,
33
+ "model_type": "swin",
34
+ "num_channels": 3,
35
+ "num_heads": [
36
+ 4,
37
+ 8,
38
+ 16,
39
+ 32
40
+ ],
41
+ "num_layers": 4,
42
+ "out_features": [
43
+ "stage4"
44
+ ],
45
+ "out_indices": [
46
+ 4
47
+ ],
48
+ "padding": "max_length",
49
+ "patch_size": 4,
50
+ "path_norm": true,
51
+ "problem_type": "single_label_classification",
52
+ "qkv_bias": true,
53
+ "stage_names": [
54
+ "stem",
55
+ "stage1",
56
+ "stage2",
57
+ "stage3",
58
+ "stage4"
59
+ ],
60
+ "torch_dtype": "float32",
61
+ "transformers_version": "4.37.0",
62
+ "use_absolute_embeddings": false,
63
+ "window_size": 7
64
+ }
checkpoint-288/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b27d9cb81864aedd5fd7b853826b5feed5fd568206ae20dd78a1998c31ab6572
3
+ size 347498816
checkpoint-288/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b5034940fe7b7459dd112344bec8c3db93a647c41fad64ab61b46cd5d60724a7
3
+ size 694305741
checkpoint-288/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:44efcf634920be27675244b15c4fdbb96dc40444df803567893fdb9276c02d3e
3
+ size 13926
checkpoint-288/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1e33148aad92ddf4c03cb81316d9ace5fb35dc75321bc8a095c2633bf9c47595
3
+ size 1064
checkpoint-288/trainer_state.json ADDED
@@ -0,0 +1,492 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.15279646217823029,
3
+ "best_model_checkpoint": "CartoonOrNotV2/checkpoint-288",
4
+ "epoch": 3.0,
5
+ "eval_steps": 500,
6
+ "global_step": 288,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.04,
13
+ "learning_rate": 6.896551724137932e-06,
14
+ "loss": 0.8299,
15
+ "step": 4
16
+ },
17
+ {
18
+ "epoch": 0.08,
19
+ "learning_rate": 1.3793103448275863e-05,
20
+ "loss": 0.8639,
21
+ "step": 8
22
+ },
23
+ {
24
+ "epoch": 0.12,
25
+ "learning_rate": 2.0689655172413793e-05,
26
+ "loss": 0.6099,
27
+ "step": 12
28
+ },
29
+ {
30
+ "epoch": 0.17,
31
+ "learning_rate": 2.7586206896551727e-05,
32
+ "loss": 0.5108,
33
+ "step": 16
34
+ },
35
+ {
36
+ "epoch": 0.21,
37
+ "learning_rate": 3.4482758620689657e-05,
38
+ "loss": 0.6755,
39
+ "step": 20
40
+ },
41
+ {
42
+ "epoch": 0.25,
43
+ "learning_rate": 4.1379310344827587e-05,
44
+ "loss": 0.402,
45
+ "step": 24
46
+ },
47
+ {
48
+ "epoch": 0.29,
49
+ "learning_rate": 4.827586206896552e-05,
50
+ "loss": 0.354,
51
+ "step": 28
52
+ },
53
+ {
54
+ "epoch": 0.33,
55
+ "learning_rate": 4.9420849420849425e-05,
56
+ "loss": 0.2615,
57
+ "step": 32
58
+ },
59
+ {
60
+ "epoch": 0.38,
61
+ "learning_rate": 4.8648648648648654e-05,
62
+ "loss": 0.4872,
63
+ "step": 36
64
+ },
65
+ {
66
+ "epoch": 0.42,
67
+ "learning_rate": 4.787644787644788e-05,
68
+ "loss": 0.4822,
69
+ "step": 40
70
+ },
71
+ {
72
+ "epoch": 0.46,
73
+ "learning_rate": 4.710424710424711e-05,
74
+ "loss": 0.1944,
75
+ "step": 44
76
+ },
77
+ {
78
+ "epoch": 0.5,
79
+ "learning_rate": 4.6332046332046336e-05,
80
+ "loss": 0.4792,
81
+ "step": 48
82
+ },
83
+ {
84
+ "epoch": 0.54,
85
+ "learning_rate": 4.555984555984556e-05,
86
+ "loss": 0.1135,
87
+ "step": 52
88
+ },
89
+ {
90
+ "epoch": 0.58,
91
+ "learning_rate": 4.478764478764479e-05,
92
+ "loss": 0.1958,
93
+ "step": 56
94
+ },
95
+ {
96
+ "epoch": 0.62,
97
+ "learning_rate": 4.401544401544402e-05,
98
+ "loss": 0.0708,
99
+ "step": 60
100
+ },
101
+ {
102
+ "epoch": 0.67,
103
+ "learning_rate": 4.324324324324325e-05,
104
+ "loss": 0.4999,
105
+ "step": 64
106
+ },
107
+ {
108
+ "epoch": 0.71,
109
+ "learning_rate": 4.247104247104247e-05,
110
+ "loss": 0.257,
111
+ "step": 68
112
+ },
113
+ {
114
+ "epoch": 0.75,
115
+ "learning_rate": 4.16988416988417e-05,
116
+ "loss": 0.1863,
117
+ "step": 72
118
+ },
119
+ {
120
+ "epoch": 0.79,
121
+ "learning_rate": 4.092664092664093e-05,
122
+ "loss": 0.2244,
123
+ "step": 76
124
+ },
125
+ {
126
+ "epoch": 0.83,
127
+ "learning_rate": 4.015444015444015e-05,
128
+ "loss": 0.4371,
129
+ "step": 80
130
+ },
131
+ {
132
+ "epoch": 0.88,
133
+ "learning_rate": 3.938223938223938e-05,
134
+ "loss": 0.1398,
135
+ "step": 84
136
+ },
137
+ {
138
+ "epoch": 0.92,
139
+ "learning_rate": 3.861003861003861e-05,
140
+ "loss": 0.3429,
141
+ "step": 88
142
+ },
143
+ {
144
+ "epoch": 0.96,
145
+ "learning_rate": 3.783783783783784e-05,
146
+ "loss": 0.4481,
147
+ "step": 92
148
+ },
149
+ {
150
+ "epoch": 1.0,
151
+ "learning_rate": 3.7065637065637065e-05,
152
+ "loss": 0.117,
153
+ "step": 96
154
+ },
155
+ {
156
+ "epoch": 1.0,
157
+ "eval_accuracy": 0.9427083333333334,
158
+ "eval_auc": 0.9909929462832339,
159
+ "eval_f1": 0.9411764705882354,
160
+ "eval_loss": 0.15437577664852142,
161
+ "eval_precision": 0.9565217391304348,
162
+ "eval_recall": 0.9263157894736842,
163
+ "eval_runtime": 59.3659,
164
+ "eval_samples_per_second": 3.234,
165
+ "eval_steps_per_second": 0.202,
166
+ "step": 96
167
+ },
168
+ {
169
+ "epoch": 1.04,
170
+ "learning_rate": 3.6293436293436295e-05,
171
+ "loss": 0.3791,
172
+ "step": 100
173
+ },
174
+ {
175
+ "epoch": 1.08,
176
+ "learning_rate": 3.5521235521235524e-05,
177
+ "loss": 0.1039,
178
+ "step": 104
179
+ },
180
+ {
181
+ "epoch": 1.12,
182
+ "learning_rate": 3.4749034749034754e-05,
183
+ "loss": 0.0458,
184
+ "step": 108
185
+ },
186
+ {
187
+ "epoch": 1.17,
188
+ "learning_rate": 3.397683397683398e-05,
189
+ "loss": 0.0149,
190
+ "step": 112
191
+ },
192
+ {
193
+ "epoch": 1.21,
194
+ "learning_rate": 3.3204633204633207e-05,
195
+ "loss": 0.1961,
196
+ "step": 116
197
+ },
198
+ {
199
+ "epoch": 1.25,
200
+ "learning_rate": 3.2432432432432436e-05,
201
+ "loss": 0.2281,
202
+ "step": 120
203
+ },
204
+ {
205
+ "epoch": 1.29,
206
+ "learning_rate": 3.166023166023166e-05,
207
+ "loss": 0.1105,
208
+ "step": 124
209
+ },
210
+ {
211
+ "epoch": 1.33,
212
+ "learning_rate": 3.088803088803089e-05,
213
+ "loss": 0.3031,
214
+ "step": 128
215
+ },
216
+ {
217
+ "epoch": 1.38,
218
+ "learning_rate": 3.011583011583012e-05,
219
+ "loss": 0.3272,
220
+ "step": 132
221
+ },
222
+ {
223
+ "epoch": 1.42,
224
+ "learning_rate": 2.9343629343629348e-05,
225
+ "loss": 0.1091,
226
+ "step": 136
227
+ },
228
+ {
229
+ "epoch": 1.46,
230
+ "learning_rate": 2.857142857142857e-05,
231
+ "loss": 0.0637,
232
+ "step": 140
233
+ },
234
+ {
235
+ "epoch": 1.5,
236
+ "learning_rate": 2.77992277992278e-05,
237
+ "loss": 0.3734,
238
+ "step": 144
239
+ },
240
+ {
241
+ "epoch": 1.54,
242
+ "learning_rate": 2.702702702702703e-05,
243
+ "loss": 0.5033,
244
+ "step": 148
245
+ },
246
+ {
247
+ "epoch": 1.58,
248
+ "learning_rate": 2.6254826254826253e-05,
249
+ "loss": 0.1018,
250
+ "step": 152
251
+ },
252
+ {
253
+ "epoch": 1.62,
254
+ "learning_rate": 2.5482625482625483e-05,
255
+ "loss": 0.7375,
256
+ "step": 156
257
+ },
258
+ {
259
+ "epoch": 1.67,
260
+ "learning_rate": 2.4710424710424712e-05,
261
+ "loss": 0.1537,
262
+ "step": 160
263
+ },
264
+ {
265
+ "epoch": 1.71,
266
+ "learning_rate": 2.393822393822394e-05,
267
+ "loss": 0.1889,
268
+ "step": 164
269
+ },
270
+ {
271
+ "epoch": 1.75,
272
+ "learning_rate": 2.3166023166023168e-05,
273
+ "loss": 0.3122,
274
+ "step": 168
275
+ },
276
+ {
277
+ "epoch": 1.79,
278
+ "learning_rate": 2.2393822393822394e-05,
279
+ "loss": 0.1504,
280
+ "step": 172
281
+ },
282
+ {
283
+ "epoch": 1.83,
284
+ "learning_rate": 2.1621621621621624e-05,
285
+ "loss": 0.3638,
286
+ "step": 176
287
+ },
288
+ {
289
+ "epoch": 1.88,
290
+ "learning_rate": 2.084942084942085e-05,
291
+ "loss": 0.0842,
292
+ "step": 180
293
+ },
294
+ {
295
+ "epoch": 1.92,
296
+ "learning_rate": 2.0077220077220077e-05,
297
+ "loss": 0.1984,
298
+ "step": 184
299
+ },
300
+ {
301
+ "epoch": 1.96,
302
+ "learning_rate": 1.9305019305019306e-05,
303
+ "loss": 0.1108,
304
+ "step": 188
305
+ },
306
+ {
307
+ "epoch": 2.0,
308
+ "learning_rate": 1.8532818532818533e-05,
309
+ "loss": 0.1461,
310
+ "step": 192
311
+ },
312
+ {
313
+ "epoch": 2.0,
314
+ "eval_accuracy": 0.953125,
315
+ "eval_auc": 0.9920781334780249,
316
+ "eval_f1": 0.9508196721311476,
317
+ "eval_loss": 0.17911452054977417,
318
+ "eval_precision": 0.9886363636363636,
319
+ "eval_recall": 0.9157894736842105,
320
+ "eval_runtime": 60.1067,
321
+ "eval_samples_per_second": 3.194,
322
+ "eval_steps_per_second": 0.2,
323
+ "step": 192
324
+ },
325
+ {
326
+ "epoch": 2.04,
327
+ "learning_rate": 1.7760617760617762e-05,
328
+ "loss": 0.0114,
329
+ "step": 196
330
+ },
331
+ {
332
+ "epoch": 2.08,
333
+ "learning_rate": 1.698841698841699e-05,
334
+ "loss": 0.1118,
335
+ "step": 200
336
+ },
337
+ {
338
+ "epoch": 2.12,
339
+ "learning_rate": 1.6216216216216218e-05,
340
+ "loss": 0.1376,
341
+ "step": 204
342
+ },
343
+ {
344
+ "epoch": 2.17,
345
+ "learning_rate": 1.5444015444015444e-05,
346
+ "loss": 0.0606,
347
+ "step": 208
348
+ },
349
+ {
350
+ "epoch": 2.21,
351
+ "learning_rate": 1.4671814671814674e-05,
352
+ "loss": 0.1765,
353
+ "step": 212
354
+ },
355
+ {
356
+ "epoch": 2.25,
357
+ "learning_rate": 1.38996138996139e-05,
358
+ "loss": 0.2129,
359
+ "step": 216
360
+ },
361
+ {
362
+ "epoch": 2.29,
363
+ "learning_rate": 1.3127413127413127e-05,
364
+ "loss": 0.0257,
365
+ "step": 220
366
+ },
367
+ {
368
+ "epoch": 2.33,
369
+ "learning_rate": 1.2355212355212356e-05,
370
+ "loss": 0.0493,
371
+ "step": 224
372
+ },
373
+ {
374
+ "epoch": 2.38,
375
+ "learning_rate": 1.1583011583011584e-05,
376
+ "loss": 0.1198,
377
+ "step": 228
378
+ },
379
+ {
380
+ "epoch": 2.42,
381
+ "learning_rate": 1.0810810810810812e-05,
382
+ "loss": 0.0078,
383
+ "step": 232
384
+ },
385
+ {
386
+ "epoch": 2.46,
387
+ "learning_rate": 1.0038610038610038e-05,
388
+ "loss": 0.11,
389
+ "step": 236
390
+ },
391
+ {
392
+ "epoch": 2.5,
393
+ "learning_rate": 9.266409266409266e-06,
394
+ "loss": 0.0264,
395
+ "step": 240
396
+ },
397
+ {
398
+ "epoch": 2.54,
399
+ "learning_rate": 8.494208494208494e-06,
400
+ "loss": 0.2191,
401
+ "step": 244
402
+ },
403
+ {
404
+ "epoch": 2.58,
405
+ "learning_rate": 7.722007722007722e-06,
406
+ "loss": 0.3105,
407
+ "step": 248
408
+ },
409
+ {
410
+ "epoch": 2.62,
411
+ "learning_rate": 6.94980694980695e-06,
412
+ "loss": 0.0933,
413
+ "step": 252
414
+ },
415
+ {
416
+ "epoch": 2.67,
417
+ "learning_rate": 6.177606177606178e-06,
418
+ "loss": 0.2103,
419
+ "step": 256
420
+ },
421
+ {
422
+ "epoch": 2.71,
423
+ "learning_rate": 5.405405405405406e-06,
424
+ "loss": 0.0115,
425
+ "step": 260
426
+ },
427
+ {
428
+ "epoch": 2.75,
429
+ "learning_rate": 4.633204633204633e-06,
430
+ "loss": 0.0261,
431
+ "step": 264
432
+ },
433
+ {
434
+ "epoch": 2.79,
435
+ "learning_rate": 3.861003861003861e-06,
436
+ "loss": 0.3301,
437
+ "step": 268
438
+ },
439
+ {
440
+ "epoch": 2.83,
441
+ "learning_rate": 3.088803088803089e-06,
442
+ "loss": 0.1125,
443
+ "step": 272
444
+ },
445
+ {
446
+ "epoch": 2.88,
447
+ "learning_rate": 2.3166023166023166e-06,
448
+ "loss": 0.0756,
449
+ "step": 276
450
+ },
451
+ {
452
+ "epoch": 2.92,
453
+ "learning_rate": 1.5444015444015445e-06,
454
+ "loss": 0.0775,
455
+ "step": 280
456
+ },
457
+ {
458
+ "epoch": 2.96,
459
+ "learning_rate": 7.722007722007723e-07,
460
+ "loss": 0.0029,
461
+ "step": 284
462
+ },
463
+ {
464
+ "epoch": 3.0,
465
+ "learning_rate": 0.0,
466
+ "loss": 0.0068,
467
+ "step": 288
468
+ },
469
+ {
470
+ "epoch": 3.0,
471
+ "eval_accuracy": 0.9739583333333334,
472
+ "eval_auc": 0.9932718393922951,
473
+ "eval_f1": 0.9732620320855614,
474
+ "eval_loss": 0.15279646217823029,
475
+ "eval_precision": 0.9891304347826086,
476
+ "eval_recall": 0.9578947368421052,
477
+ "eval_runtime": 59.2078,
478
+ "eval_samples_per_second": 3.243,
479
+ "eval_steps_per_second": 0.203,
480
+ "step": 288
481
+ }
482
+ ],
483
+ "logging_steps": 4,
484
+ "max_steps": 288,
485
+ "num_input_tokens_seen": 0,
486
+ "num_train_epochs": 3,
487
+ "save_steps": 500,
488
+ "total_flos": 1.8050816016698573e+17,
489
+ "train_batch_size": 8,
490
+ "trial_name": null,
491
+ "trial_params": null
492
+ }
checkpoint-288/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:96af38b966796e9d1f3214ac7a3556c7b8370436ef566b1e53c59f8bc4ed47ff
3
+ size 4728
config.json ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "umm-maybe/AI-image-detector",
3
+ "_num_labels": 2,
4
+ "architectures": [
5
+ "SwinForImageClassification"
6
+ ],
7
+ "attention_probs_dropout_prob": 0.0,
8
+ "depths": [
9
+ 2,
10
+ 2,
11
+ 18,
12
+ 2
13
+ ],
14
+ "drop_path_rate": 0.1,
15
+ "embed_dim": 128,
16
+ "encoder_stride": 32,
17
+ "hidden_act": "gelu",
18
+ "hidden_dropout_prob": 0.0,
19
+ "hidden_size": 1024,
20
+ "id2label": {
21
+ "0": "cartoon",
22
+ "1": "not_cartoon"
23
+ },
24
+ "image_size": 224,
25
+ "initializer_range": 0.02,
26
+ "label2id": {
27
+ "cartoon": 0,
28
+ "not_cartoon": 1
29
+ },
30
+ "layer_norm_eps": 1e-05,
31
+ "max_length": 128,
32
+ "mlp_ratio": 4.0,
33
+ "model_type": "swin",
34
+ "num_channels": 3,
35
+ "num_heads": [
36
+ 4,
37
+ 8,
38
+ 16,
39
+ 32
40
+ ],
41
+ "num_layers": 4,
42
+ "out_features": [
43
+ "stage4"
44
+ ],
45
+ "out_indices": [
46
+ 4
47
+ ],
48
+ "padding": "max_length",
49
+ "patch_size": 4,
50
+ "path_norm": true,
51
+ "problem_type": "single_label_classification",
52
+ "qkv_bias": true,
53
+ "stage_names": [
54
+ "stem",
55
+ "stage1",
56
+ "stage2",
57
+ "stage3",
58
+ "stage4"
59
+ ],
60
+ "torch_dtype": "float32",
61
+ "transformers_version": "4.37.0",
62
+ "use_absolute_embeddings": false,
63
+ "window_size": 7
64
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b27d9cb81864aedd5fd7b853826b5feed5fd568206ae20dd78a1998c31ab6572
3
+ size 347498816
preprocessor_config.json ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "do_normalize": true,
3
+ "do_rescale": true,
4
+ "do_resize": true,
5
+ "image_mean": [
6
+ 0.485,
7
+ 0.456,
8
+ 0.406
9
+ ],
10
+ "image_processor_type": "ViTImageProcessor",
11
+ "image_std": [
12
+ 0.229,
13
+ 0.224,
14
+ 0.225
15
+ ],
16
+ "resample": 3,
17
+ "rescale_factor": 0.00392156862745098,
18
+ "size": {
19
+ "height": 224,
20
+ "width": 224
21
+ }
22
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:96af38b966796e9d1f3214ac7a3556c7b8370436ef566b1e53c59f8bc4ed47ff
3
+ size 4728
training_params.json ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "data_path": "CartoonOrNotV2/autotrain-data",
3
+ "model": "umm-maybe/AI-image-detector",
4
+ "username": "TTNVXX",
5
+ "lr": 5e-05,
6
+ "epochs": 3,
7
+ "batch_size": 8,
8
+ "warmup_ratio": 0.1,
9
+ "gradient_accumulation": 1,
10
+ "optimizer": "adamw_torch",
11
+ "scheduler": "linear",
12
+ "weight_decay": 0.0,
13
+ "max_grad_norm": 1.0,
14
+ "seed": 42,
15
+ "train_split": "train",
16
+ "valid_split": "validation",
17
+ "logging_steps": -1,
18
+ "project_name": "CartoonOrNotV2",
19
+ "auto_find_batch_size": false,
20
+ "mixed_precision": null,
21
+ "save_total_limit": 1,
22
+ "save_strategy": "epoch",
23
+ "push_to_hub": true,
24
+ "repo_id": "TTNVXX/CartoonOrNotV2",
25
+ "evaluation_strategy": "epoch",
26
+ "image_column": "autotrain_image",
27
+ "target_column": "autotrain_label",
28
+ "log": "none"
29
+ }