Developer9215 commited on
Commit
6c1c19a
·
verified ·
1 Parent(s): 8316a85

hyuns9215

Browse files
README.md ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: transformers
3
+ license: apache-2.0
4
+ base_model: google/vit-base-patch16-224-in21k
5
+ tags:
6
+ - image=classification
7
+ - ViT
8
+ - generated_from_trainer
9
+ metrics:
10
+ - accuracy
11
+ model-index:
12
+ - name: vit-base-beans-demo-v5
13
+ results: []
14
+ ---
15
+
16
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
17
+ should probably proofread and complete it, then remove this comment. -->
18
+
19
+ # vit-base-beans-demo-v5
20
+
21
+ This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on the beans dataset.
22
+ It achieves the following results on the evaluation set:
23
+ - Loss: 0.1687
24
+ - Accuracy: 0.9609
25
+
26
+ ## Model description
27
+
28
+ More information needed
29
+
30
+ ## Intended uses & limitations
31
+
32
+ More information needed
33
+
34
+ ## Training and evaluation data
35
+
36
+ More information needed
37
+
38
+ ## Training procedure
39
+
40
+ ### Training hyperparameters
41
+
42
+ The following hyperparameters were used during training:
43
+ - learning_rate: 0.0002
44
+ - train_batch_size: 16
45
+ - eval_batch_size: 16
46
+ - seed: 42
47
+ - optimizer: Use OptimizerNames.ADAMW_TORCH with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
48
+ - lr_scheduler_type: linear
49
+ - num_epochs: 4
50
+ - mixed_precision_training: Native AMP
51
+
52
+ ### Training results
53
+
54
+ | Training Loss | Epoch | Step | Validation Loss | Accuracy |
55
+ |:-------------:|:------:|:----:|:---------------:|:--------:|
56
+ | 0.0537 | 1.5385 | 100 | 0.0554 | 0.9850 |
57
+ | 0.0129 | 3.0769 | 200 | 0.0136 | 1.0 |
58
+
59
+
60
+ ### Framework versions
61
+
62
+ - Transformers 4.54.1
63
+ - Pytorch 2.6.0+cu124
64
+ - Datasets 4.0.0
65
+ - Tokenizers 0.21.4
all_results.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 4.0,
3
+ "eval_accuracy": 0.9609375,
4
+ "eval_loss": 0.1686757206916809,
5
+ "eval_runtime": 2.0246,
6
+ "eval_samples_per_second": 63.222,
7
+ "eval_steps_per_second": 3.951,
8
+ "total_flos": 3.205097416476426e+17,
9
+ "train_loss": 0.11894229616110141,
10
+ "train_runtime": 102.8707,
11
+ "train_samples_per_second": 40.206,
12
+ "train_steps_per_second": 2.527
13
+ }
config.json ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "ViTForImageClassification"
4
+ ],
5
+ "attention_probs_dropout_prob": 0.0,
6
+ "encoder_stride": 16,
7
+ "hidden_act": "gelu",
8
+ "hidden_dropout_prob": 0.0,
9
+ "hidden_size": 768,
10
+ "id2label": {
11
+ "0": "angular_leaf_spot",
12
+ "1": "bean_rust",
13
+ "2": "healthy"
14
+ },
15
+ "image_size": 224,
16
+ "initializer_range": 0.02,
17
+ "intermediate_size": 3072,
18
+ "label2id": {
19
+ "angular_leaf_spot": "0",
20
+ "bean_rust": "1",
21
+ "healthy": "2"
22
+ },
23
+ "layer_norm_eps": 1e-12,
24
+ "model_type": "vit",
25
+ "num_attention_heads": 12,
26
+ "num_channels": 3,
27
+ "num_hidden_layers": 12,
28
+ "patch_size": 16,
29
+ "pooler_act": "tanh",
30
+ "pooler_output_size": 768,
31
+ "problem_type": "single_label_classification",
32
+ "qkv_bias": true,
33
+ "torch_dtype": "float32",
34
+ "transformers_version": "4.54.1"
35
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6dbcf6452b1305897b0a9b4f7ec56b673fcec85dec84fb55af4d7accb5581bcc
3
+ size 343227052
preprocessor_config.json ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "do_convert_rgb": null,
3
+ "do_normalize": true,
4
+ "do_rescale": true,
5
+ "do_resize": true,
6
+ "image_mean": [
7
+ 0.5,
8
+ 0.5,
9
+ 0.5
10
+ ],
11
+ "image_processor_type": "ViTFeatureExtractor",
12
+ "image_std": [
13
+ 0.5,
14
+ 0.5,
15
+ 0.5
16
+ ],
17
+ "resample": 2,
18
+ "rescale_factor": 0.00392156862745098,
19
+ "size": {
20
+ "height": 224,
21
+ "width": 224
22
+ }
23
+ }
runs/Aug01_01-18-02_ecd917246513/events.out.tfevents.1754011086.ecd917246513.2031.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f8d1c2ee090a42f17513ee8c8ba187d803e007e7d57e78a3ef147fc4aee7fc78
3
+ size 11515
runs/Aug01_01-18-02_ecd917246513/events.out.tfevents.1754011352.ecd917246513.2031.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bbc293e50274818854b7125113635c44df3e81fbf84289cc404e162416eea0b6
3
+ size 411
test_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 4.0,
3
+ "eval_accuracy": 0.9609375,
4
+ "eval_loss": 0.1686757206916809,
5
+ "eval_runtime": 2.0246,
6
+ "eval_samples_per_second": 63.222,
7
+ "eval_steps_per_second": 3.951
8
+ }
train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 4.0,
3
+ "total_flos": 3.205097416476426e+17,
4
+ "train_loss": 0.11894229616110141,
5
+ "train_runtime": 102.8707,
6
+ "train_samples_per_second": 40.206,
7
+ "train_steps_per_second": 2.527
8
+ }
trainer_state.json ADDED
@@ -0,0 +1,243 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_global_step": 200,
3
+ "best_metric": 1.0,
4
+ "best_model_checkpoint": "./vit-base-beans-demo-v5/checkpoint-200",
5
+ "epoch": 4.0,
6
+ "eval_steps": 100,
7
+ "global_step": 260,
8
+ "is_hyper_param_search": false,
9
+ "is_local_process_zero": true,
10
+ "is_world_process_zero": true,
11
+ "log_history": [
12
+ {
13
+ "epoch": 0.15384615384615385,
14
+ "grad_norm": 1.957282304763794,
15
+ "learning_rate": 0.0001930769230769231,
16
+ "loss": 0.8039,
17
+ "step": 10
18
+ },
19
+ {
20
+ "epoch": 0.3076923076923077,
21
+ "grad_norm": 1.0513660907745361,
22
+ "learning_rate": 0.0001853846153846154,
23
+ "loss": 0.3269,
24
+ "step": 20
25
+ },
26
+ {
27
+ "epoch": 0.46153846153846156,
28
+ "grad_norm": 2.8595218658447266,
29
+ "learning_rate": 0.0001776923076923077,
30
+ "loss": 0.2376,
31
+ "step": 30
32
+ },
33
+ {
34
+ "epoch": 0.6153846153846154,
35
+ "grad_norm": 0.8380834460258484,
36
+ "learning_rate": 0.00017,
37
+ "loss": 0.2117,
38
+ "step": 40
39
+ },
40
+ {
41
+ "epoch": 0.7692307692307693,
42
+ "grad_norm": 4.57429838180542,
43
+ "learning_rate": 0.0001623076923076923,
44
+ "loss": 0.1837,
45
+ "step": 50
46
+ },
47
+ {
48
+ "epoch": 0.9230769230769231,
49
+ "grad_norm": 4.2446722984313965,
50
+ "learning_rate": 0.00015461538461538464,
51
+ "loss": 0.3205,
52
+ "step": 60
53
+ },
54
+ {
55
+ "epoch": 1.0769230769230769,
56
+ "grad_norm": 0.22835291922092438,
57
+ "learning_rate": 0.00014692307692307693,
58
+ "loss": 0.1658,
59
+ "step": 70
60
+ },
61
+ {
62
+ "epoch": 1.2307692307692308,
63
+ "grad_norm": 2.5722620487213135,
64
+ "learning_rate": 0.00013923076923076923,
65
+ "loss": 0.0715,
66
+ "step": 80
67
+ },
68
+ {
69
+ "epoch": 1.3846153846153846,
70
+ "grad_norm": 0.15161257982254028,
71
+ "learning_rate": 0.00013153846153846156,
72
+ "loss": 0.0435,
73
+ "step": 90
74
+ },
75
+ {
76
+ "epoch": 1.5384615384615383,
77
+ "grad_norm": 3.3087499141693115,
78
+ "learning_rate": 0.00012384615384615385,
79
+ "loss": 0.0537,
80
+ "step": 100
81
+ },
82
+ {
83
+ "epoch": 1.5384615384615383,
84
+ "eval_accuracy": 0.9849624060150376,
85
+ "eval_loss": 0.055375177413225174,
86
+ "eval_runtime": 2.6053,
87
+ "eval_samples_per_second": 51.05,
88
+ "eval_steps_per_second": 3.454,
89
+ "step": 100
90
+ },
91
+ {
92
+ "epoch": 1.6923076923076923,
93
+ "grad_norm": 0.14113253355026245,
94
+ "learning_rate": 0.00011615384615384617,
95
+ "loss": 0.1055,
96
+ "step": 110
97
+ },
98
+ {
99
+ "epoch": 1.8461538461538463,
100
+ "grad_norm": 0.1897106021642685,
101
+ "learning_rate": 0.00010846153846153846,
102
+ "loss": 0.1263,
103
+ "step": 120
104
+ },
105
+ {
106
+ "epoch": 2.0,
107
+ "grad_norm": 0.11339154094457626,
108
+ "learning_rate": 0.00010076923076923077,
109
+ "loss": 0.1044,
110
+ "step": 130
111
+ },
112
+ {
113
+ "epoch": 2.1538461538461537,
114
+ "grad_norm": 0.28828829526901245,
115
+ "learning_rate": 9.307692307692309e-05,
116
+ "loss": 0.0738,
117
+ "step": 140
118
+ },
119
+ {
120
+ "epoch": 2.3076923076923075,
121
+ "grad_norm": 0.11714714765548706,
122
+ "learning_rate": 8.538461538461538e-05,
123
+ "loss": 0.0301,
124
+ "step": 150
125
+ },
126
+ {
127
+ "epoch": 2.4615384615384617,
128
+ "grad_norm": 0.08417778462171555,
129
+ "learning_rate": 7.76923076923077e-05,
130
+ "loss": 0.0157,
131
+ "step": 160
132
+ },
133
+ {
134
+ "epoch": 2.6153846153846154,
135
+ "grad_norm": 0.0785544291138649,
136
+ "learning_rate": 7e-05,
137
+ "loss": 0.0143,
138
+ "step": 170
139
+ },
140
+ {
141
+ "epoch": 2.769230769230769,
142
+ "grad_norm": 0.9593146443367004,
143
+ "learning_rate": 6.23076923076923e-05,
144
+ "loss": 0.0422,
145
+ "step": 180
146
+ },
147
+ {
148
+ "epoch": 2.9230769230769234,
149
+ "grad_norm": 0.06379880756139755,
150
+ "learning_rate": 5.461538461538461e-05,
151
+ "loss": 0.0228,
152
+ "step": 190
153
+ },
154
+ {
155
+ "epoch": 3.076923076923077,
156
+ "grad_norm": 0.06561139225959778,
157
+ "learning_rate": 4.692307692307693e-05,
158
+ "loss": 0.0129,
159
+ "step": 200
160
+ },
161
+ {
162
+ "epoch": 3.076923076923077,
163
+ "eval_accuracy": 1.0,
164
+ "eval_loss": 0.013629596680402756,
165
+ "eval_runtime": 1.2116,
166
+ "eval_samples_per_second": 109.77,
167
+ "eval_steps_per_second": 7.428,
168
+ "step": 200
169
+ },
170
+ {
171
+ "epoch": 3.230769230769231,
172
+ "grad_norm": 0.06040000915527344,
173
+ "learning_rate": 3.923076923076923e-05,
174
+ "loss": 0.032,
175
+ "step": 210
176
+ },
177
+ {
178
+ "epoch": 3.3846153846153846,
179
+ "grad_norm": 0.0607382096350193,
180
+ "learning_rate": 3.153846153846154e-05,
181
+ "loss": 0.0114,
182
+ "step": 220
183
+ },
184
+ {
185
+ "epoch": 3.5384615384615383,
186
+ "grad_norm": 0.06130724027752876,
187
+ "learning_rate": 2.384615384615385e-05,
188
+ "loss": 0.0111,
189
+ "step": 230
190
+ },
191
+ {
192
+ "epoch": 3.6923076923076925,
193
+ "grad_norm": 0.06276917457580566,
194
+ "learning_rate": 1.6153846153846154e-05,
195
+ "loss": 0.0108,
196
+ "step": 240
197
+ },
198
+ {
199
+ "epoch": 3.8461538461538463,
200
+ "grad_norm": 0.06522306054830551,
201
+ "learning_rate": 8.461538461538462e-06,
202
+ "loss": 0.0216,
203
+ "step": 250
204
+ },
205
+ {
206
+ "epoch": 4.0,
207
+ "grad_norm": 0.05368223413825035,
208
+ "learning_rate": 7.692307692307694e-07,
209
+ "loss": 0.039,
210
+ "step": 260
211
+ },
212
+ {
213
+ "epoch": 4.0,
214
+ "step": 260,
215
+ "total_flos": 3.205097416476426e+17,
216
+ "train_loss": 0.11894229616110141,
217
+ "train_runtime": 102.8707,
218
+ "train_samples_per_second": 40.206,
219
+ "train_steps_per_second": 2.527
220
+ }
221
+ ],
222
+ "logging_steps": 10,
223
+ "max_steps": 260,
224
+ "num_input_tokens_seen": 0,
225
+ "num_train_epochs": 4,
226
+ "save_steps": 100,
227
+ "stateful_callbacks": {
228
+ "TrainerControl": {
229
+ "args": {
230
+ "should_epoch_stop": false,
231
+ "should_evaluate": false,
232
+ "should_log": false,
233
+ "should_save": true,
234
+ "should_training_stop": true
235
+ },
236
+ "attributes": {}
237
+ }
238
+ },
239
+ "total_flos": 3.205097416476426e+17,
240
+ "train_batch_size": 16,
241
+ "trial_name": null,
242
+ "trial_params": null
243
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:033db351715120211df7eb8574de5df785bf5f0d9b71671034b0cb90048f9ac5
3
+ size 5368