Skullly commited on
Commit
54e85c3
·
verified ·
1 Parent(s): 3452a9d

End of training

Browse files
README.md ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ base_model: google/efficientnet-b2
4
+ tags:
5
+ - generated_from_trainer
6
+ datasets:
7
+ - image_folder
8
+ metrics:
9
+ - accuracy
10
+ model-index:
11
+ - name: results
12
+ results:
13
+ - task:
14
+ name: Image Classification
15
+ type: image-classification
16
+ dataset:
17
+ name: image_folder
18
+ type: image_folder
19
+ config: default
20
+ split: train
21
+ args: default
22
+ metrics:
23
+ - name: Accuracy
24
+ type: accuracy
25
+ value: 0.5296428571428572
26
+ ---
27
+
28
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
29
+ should probably proofread and complete it, then remove this comment. -->
30
+
31
+ # results
32
+
33
+ This model is a fine-tuned version of [google/efficientnet-b2](https://huggingface.co/google/efficientnet-b2) on the image_folder dataset.
34
+ It achieves the following results on the evaluation set:
35
+ - Loss: 0.6920
36
+ - Accuracy: 0.5296
37
+
38
+ ## Model description
39
+
40
+ More information needed
41
+
42
+ ## Intended uses & limitations
43
+
44
+ More information needed
45
+
46
+ ## Training and evaluation data
47
+
48
+ More information needed
49
+
50
+ ## Training procedure
51
+
52
+ ### Training hyperparameters
53
+
54
+ The following hyperparameters were used during training:
55
+ - learning_rate: 3e-05
56
+ - train_batch_size: 32
57
+ - eval_batch_size: 32
58
+ - seed: 42
59
+ - gradient_accumulation_steps: 8
60
+ - total_train_batch_size: 256
61
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
62
+ - lr_scheduler_type: cosine
63
+ - lr_scheduler_warmup_steps: 1000
64
+ - num_epochs: 3
65
+
66
+ ### Training results
67
+
68
+
69
+
70
+ ### Framework versions
71
+
72
+ - Transformers 4.37.0
73
+ - Pytorch 2.1.2
74
+ - Datasets 2.1.0
75
+ - Tokenizers 0.15.1
all_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 3.0,
3
+ "eval_accuracy": 0.5296428571428572,
4
+ "eval_loss": 0.6920002102851868,
5
+ "eval_runtime": 16.5991,
6
+ "eval_samples_per_second": 168.683,
7
+ "eval_steps_per_second": 5.301
8
+ }
config.json ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "google/efficientnet-b2",
3
+ "architectures": [
4
+ "EfficientNetForImageClassification"
5
+ ],
6
+ "batch_norm_eps": 0.001,
7
+ "batch_norm_momentum": 0.99,
8
+ "depth_coefficient": 1.2,
9
+ "depth_divisor": 8,
10
+ "depthwise_padding": [
11
+ 5,
12
+ 8,
13
+ 16
14
+ ],
15
+ "drop_connect_rate": 0.2,
16
+ "dropout_rate": 0.3,
17
+ "expand_ratios": [
18
+ 1,
19
+ 6,
20
+ 6,
21
+ 6,
22
+ 6,
23
+ 6,
24
+ 6
25
+ ],
26
+ "hidden_act": "swish",
27
+ "hidden_dim": 1408,
28
+ "id2label": {
29
+ "0": "fake",
30
+ "1": "real"
31
+ },
32
+ "image_size": 260,
33
+ "in_channels": [
34
+ 32,
35
+ 16,
36
+ 24,
37
+ 40,
38
+ 80,
39
+ 112,
40
+ 192
41
+ ],
42
+ "initializer_range": 0.02,
43
+ "kernel_sizes": [
44
+ 3,
45
+ 3,
46
+ 5,
47
+ 3,
48
+ 5,
49
+ 5,
50
+ 3
51
+ ],
52
+ "label2id": {
53
+ "fake": 0,
54
+ "real": 1
55
+ },
56
+ "model_type": "efficientnet",
57
+ "num_block_repeats": [
58
+ 1,
59
+ 2,
60
+ 2,
61
+ 3,
62
+ 3,
63
+ 4,
64
+ 1
65
+ ],
66
+ "num_channels": 3,
67
+ "num_hidden_layers": 64,
68
+ "out_channels": [
69
+ 16,
70
+ 24,
71
+ 40,
72
+ 80,
73
+ 112,
74
+ 192,
75
+ 320
76
+ ],
77
+ "pooling_type": "mean",
78
+ "problem_type": "single_label_classification",
79
+ "squeeze_expansion_ratio": 0.25,
80
+ "strides": [
81
+ 1,
82
+ 2,
83
+ 2,
84
+ 2,
85
+ 1,
86
+ 2,
87
+ 1
88
+ ],
89
+ "torch_dtype": "float32",
90
+ "transformers_version": "4.37.0",
91
+ "width_coefficient": 1.1
92
+ }
eval_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 3.0,
3
+ "eval_accuracy": 0.5296428571428572,
4
+ "eval_loss": 0.6920002102851868,
5
+ "eval_runtime": 16.5991,
6
+ "eval_samples_per_second": 168.683,
7
+ "eval_steps_per_second": 5.301
8
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:113b1f93afc40ce8930ba1b2bdfdb189d65070825b902ca4181f9f5c37405585
3
+ size 31151640
preprocessor_config.json ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "crop_size": {
3
+ "height": 289,
4
+ "width": 289
5
+ },
6
+ "do_center_crop": false,
7
+ "do_normalize": true,
8
+ "do_rescale": true,
9
+ "do_resize": true,
10
+ "image_mean": [
11
+ 0.485,
12
+ 0.456,
13
+ 0.406
14
+ ],
15
+ "image_processor_type": "EfficientNetImageProcessor",
16
+ "image_std": [
17
+ 0.47853944,
18
+ 0.4732864,
19
+ 0.47434163
20
+ ],
21
+ "include_top": true,
22
+ "resample": 0,
23
+ "rescale_factor": 0.00392156862745098,
24
+ "rescale_offset": false,
25
+ "size": {
26
+ "height": 260,
27
+ "width": 260
28
+ }
29
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4452a709e678d159397ebc2aebbbe0855700306bb9899a4bb1bcfa7c99358e4b
3
+ size 4664