tayyab786 commited on
Commit
37da195
·
1 Parent(s): fd24936

Upload folder using huggingface_hub

Browse files
checkpoint-6690/config.json ADDED
@@ -0,0 +1,132 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "google/vit-base-patch16-224-in21k",
3
+ "architectures": [
4
+ "ViTForImageClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.0,
7
+ "encoder_stride": 16,
8
+ "hidden_act": "gelu",
9
+ "hidden_dropout_prob": 0.0,
10
+ "hidden_size": 768,
11
+ "id2label": {
12
+ "0": "Doddpathre",
13
+ "1": "Arali",
14
+ "2": "Avacado",
15
+ "3": "Pomegranate",
16
+ "4": "Lemon",
17
+ "5": "Honge",
18
+ "6": "Mango",
19
+ "7": "Geranium",
20
+ "8": "Gauva",
21
+ "9": "Mint",
22
+ "10": "Palak(Spinach)",
23
+ "11": "Betel",
24
+ "12": "Coriender",
25
+ "13": "Catharanthus",
26
+ "14": "Betel_Nut",
27
+ "15": "Henna",
28
+ "16": "Jackfruit",
29
+ "17": "Curry_Leaf",
30
+ "18": "Tulsi",
31
+ "19": "Basale",
32
+ "20": "Brahmi",
33
+ "21": "Ganike",
34
+ "22": "Aloevera",
35
+ "23": "Curry",
36
+ "24": "Tulasi",
37
+ "25": "Tamarind",
38
+ "26": "Amla",
39
+ "27": "Nithyapushpa",
40
+ "28": "Hibiscus",
41
+ "29": "Neem",
42
+ "30": "Pappaya",
43
+ "31": "Ekka",
44
+ "32": "Raktachandini",
45
+ "33": "Doddapatre",
46
+ "34": "Nagadali",
47
+ "35": "Castor",
48
+ "36": "Amruta_Balli",
49
+ "37": "Guava",
50
+ "38": "Bamboo",
51
+ "39": "Ashwagandha",
52
+ "40": "Insulin",
53
+ "41": "Pepper",
54
+ "42": "Rose",
55
+ "43": "Wood_sorel",
56
+ "44": "Seethapala",
57
+ "45": "Lemon_grass",
58
+ "46": "Jasmine",
59
+ "47": "Bhrami",
60
+ "48": "Papaya",
61
+ "49": "Nooni",
62
+ "50": "Sapota",
63
+ "51": "Ashoka"
64
+ },
65
+ "image_size": 224,
66
+ "initializer_range": 0.02,
67
+ "intermediate_size": 3072,
68
+ "label2id": {
69
+ "Aloevera": 22,
70
+ "Amla": 26,
71
+ "Amruta_Balli": 36,
72
+ "Arali": 1,
73
+ "Ashoka": 51,
74
+ "Ashwagandha": 39,
75
+ "Avacado": 2,
76
+ "Bamboo": 38,
77
+ "Basale": 19,
78
+ "Betel": 11,
79
+ "Betel_Nut": 14,
80
+ "Bhrami": 47,
81
+ "Brahmi": 20,
82
+ "Castor": 35,
83
+ "Catharanthus": 13,
84
+ "Coriender": 12,
85
+ "Curry": 23,
86
+ "Curry_Leaf": 17,
87
+ "Doddapatre": 33,
88
+ "Doddpathre": 0,
89
+ "Ekka": 31,
90
+ "Ganike": 21,
91
+ "Gauva": 8,
92
+ "Geranium": 7,
93
+ "Guava": 37,
94
+ "Henna": 15,
95
+ "Hibiscus": 28,
96
+ "Honge": 5,
97
+ "Insulin": 40,
98
+ "Jackfruit": 16,
99
+ "Jasmine": 46,
100
+ "Lemon": 4,
101
+ "Lemon_grass": 45,
102
+ "Mango": 6,
103
+ "Mint": 9,
104
+ "Nagadali": 34,
105
+ "Neem": 29,
106
+ "Nithyapushpa": 27,
107
+ "Nooni": 49,
108
+ "Palak(Spinach)": 10,
109
+ "Papaya": 48,
110
+ "Pappaya": 30,
111
+ "Pepper": 41,
112
+ "Pomegranate": 3,
113
+ "Raktachandini": 32,
114
+ "Rose": 42,
115
+ "Sapota": 50,
116
+ "Seethapala": 44,
117
+ "Tamarind": 25,
118
+ "Tulasi": 24,
119
+ "Tulsi": 18,
120
+ "Wood_sorel": 43
121
+ },
122
+ "layer_norm_eps": 1e-12,
123
+ "model_type": "vit",
124
+ "num_attention_heads": 12,
125
+ "num_channels": 3,
126
+ "num_hidden_layers": 12,
127
+ "patch_size": 16,
128
+ "problem_type": "single_label_classification",
129
+ "qkv_bias": true,
130
+ "torch_dtype": "float32",
131
+ "transformers_version": "4.34.0"
132
+ }
checkpoint-6690/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d6123066b760dab46c263edca4d2b9e779e356eca96f506578d6f70ab0441143
3
+ size 686876037
checkpoint-6690/preprocessor_config.json ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "do_normalize": true,
3
+ "do_rescale": true,
4
+ "do_resize": true,
5
+ "image_mean": [
6
+ 0.5,
7
+ 0.5,
8
+ 0.5
9
+ ],
10
+ "image_processor_type": "ViTImageProcessor",
11
+ "image_std": [
12
+ 0.5,
13
+ 0.5,
14
+ 0.5
15
+ ],
16
+ "resample": 2,
17
+ "rescale_factor": 0.00392156862745098,
18
+ "size": {
19
+ "height": 224,
20
+ "width": 224
21
+ }
22
+ }
checkpoint-6690/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2260320ca894b275d8322e37961fbb12cef8c5200ae4ed2ef939a52b3aa9cf9d
3
+ size 343422509
checkpoint-6690/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e0c7fea4d850b4851a1da61d9fc8818b483c1951b244b085ee2453183c5223e1
3
+ size 14575
checkpoint-6690/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1d52ee7fdc399d5377aeff037fc7da32f92f6f36eb0780e5135892bb5ede30e5
3
+ size 627
checkpoint-6690/trainer_state.json ADDED
@@ -0,0 +1,367 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.26971209049224854,
3
+ "best_model_checkpoint": "medicinal_plants_image_detection/checkpoint-6690",
4
+ "epoch": 30.0,
5
+ "eval_steps": 500,
6
+ "global_step": 6690,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 1.0,
13
+ "eval_accuracy": 0.5465768799102132,
14
+ "eval_loss": 3.5867183208465576,
15
+ "eval_runtime": 45.1056,
16
+ "eval_samples_per_second": 39.507,
17
+ "eval_steps_per_second": 4.944,
18
+ "step": 223
19
+ },
20
+ {
21
+ "epoch": 2.0,
22
+ "eval_accuracy": 0.7951739618406285,
23
+ "eval_loss": 3.1091668605804443,
24
+ "eval_runtime": 45.701,
25
+ "eval_samples_per_second": 38.993,
26
+ "eval_steps_per_second": 4.88,
27
+ "step": 446
28
+ },
29
+ {
30
+ "epoch": 2.24,
31
+ "learning_rate": 9.322289156626508e-06,
32
+ "loss": 3.4702,
33
+ "step": 500
34
+ },
35
+ {
36
+ "epoch": 3.0,
37
+ "eval_accuracy": 0.867003367003367,
38
+ "eval_loss": 2.6939425468444824,
39
+ "eval_runtime": 45.2769,
40
+ "eval_samples_per_second": 39.358,
41
+ "eval_steps_per_second": 4.925,
42
+ "step": 669
43
+ },
44
+ {
45
+ "epoch": 4.0,
46
+ "eval_accuracy": 0.8967452300785634,
47
+ "eval_loss": 2.3589229583740234,
48
+ "eval_runtime": 45.4351,
49
+ "eval_samples_per_second": 39.221,
50
+ "eval_steps_per_second": 4.908,
51
+ "step": 892
52
+ },
53
+ {
54
+ "epoch": 4.48,
55
+ "learning_rate": 8.569277108433736e-06,
56
+ "loss": 2.46,
57
+ "step": 1000
58
+ },
59
+ {
60
+ "epoch": 5.0,
61
+ "eval_accuracy": 0.9298540965207632,
62
+ "eval_loss": 2.0559611320495605,
63
+ "eval_runtime": 44.9121,
64
+ "eval_samples_per_second": 39.678,
65
+ "eval_steps_per_second": 4.965,
66
+ "step": 1115
67
+ },
68
+ {
69
+ "epoch": 6.0,
70
+ "eval_accuracy": 0.9421997755331089,
71
+ "eval_loss": 1.809415340423584,
72
+ "eval_runtime": 45.302,
73
+ "eval_samples_per_second": 39.336,
74
+ "eval_steps_per_second": 4.923,
75
+ "step": 1338
76
+ },
77
+ {
78
+ "epoch": 6.73,
79
+ "learning_rate": 7.816265060240964e-06,
80
+ "loss": 1.7651,
81
+ "step": 1500
82
+ },
83
+ {
84
+ "epoch": 7.0,
85
+ "eval_accuracy": 0.9551066217732884,
86
+ "eval_loss": 1.586814045906067,
87
+ "eval_runtime": 45.3442,
88
+ "eval_samples_per_second": 39.299,
89
+ "eval_steps_per_second": 4.918,
90
+ "step": 1561
91
+ },
92
+ {
93
+ "epoch": 8.0,
94
+ "eval_accuracy": 0.9635241301907969,
95
+ "eval_loss": 1.4009339809417725,
96
+ "eval_runtime": 44.804,
97
+ "eval_samples_per_second": 39.773,
98
+ "eval_steps_per_second": 4.977,
99
+ "step": 1784
100
+ },
101
+ {
102
+ "epoch": 8.97,
103
+ "learning_rate": 7.063253012048194e-06,
104
+ "loss": 1.2734,
105
+ "step": 2000
106
+ },
107
+ {
108
+ "epoch": 9.0,
109
+ "eval_accuracy": 0.9730639730639731,
110
+ "eval_loss": 1.2280750274658203,
111
+ "eval_runtime": 45.0108,
112
+ "eval_samples_per_second": 39.59,
113
+ "eval_steps_per_second": 4.954,
114
+ "step": 2007
115
+ },
116
+ {
117
+ "epoch": 10.0,
118
+ "eval_accuracy": 0.9781144781144782,
119
+ "eval_loss": 1.0812939405441284,
120
+ "eval_runtime": 44.9147,
121
+ "eval_samples_per_second": 39.675,
122
+ "eval_steps_per_second": 4.965,
123
+ "step": 2230
124
+ },
125
+ {
126
+ "epoch": 11.0,
127
+ "eval_accuracy": 0.9809203142536476,
128
+ "eval_loss": 0.9537368416786194,
129
+ "eval_runtime": 44.1874,
130
+ "eval_samples_per_second": 40.328,
131
+ "eval_steps_per_second": 5.047,
132
+ "step": 2453
133
+ },
134
+ {
135
+ "epoch": 11.21,
136
+ "learning_rate": 6.310240963855422e-06,
137
+ "loss": 0.922,
138
+ "step": 2500
139
+ },
140
+ {
141
+ "epoch": 12.0,
142
+ "eval_accuracy": 0.9837261503928171,
143
+ "eval_loss": 0.8425608277320862,
144
+ "eval_runtime": 44.1161,
145
+ "eval_samples_per_second": 40.393,
146
+ "eval_steps_per_second": 5.055,
147
+ "step": 2676
148
+ },
149
+ {
150
+ "epoch": 13.0,
151
+ "eval_accuracy": 0.9865319865319865,
152
+ "eval_loss": 0.7477293610572815,
153
+ "eval_runtime": 44.309,
154
+ "eval_samples_per_second": 40.218,
155
+ "eval_steps_per_second": 5.033,
156
+ "step": 2899
157
+ },
158
+ {
159
+ "epoch": 13.45,
160
+ "learning_rate": 5.5572289156626515e-06,
161
+ "loss": 0.6738,
162
+ "step": 3000
163
+ },
164
+ {
165
+ "epoch": 14.0,
166
+ "eval_accuracy": 0.9887766554433222,
167
+ "eval_loss": 0.6639252305030823,
168
+ "eval_runtime": 43.9081,
169
+ "eval_samples_per_second": 40.585,
170
+ "eval_steps_per_second": 5.079,
171
+ "step": 3122
172
+ },
173
+ {
174
+ "epoch": 15.0,
175
+ "eval_accuracy": 0.9887766554433222,
176
+ "eval_loss": 0.5889111757278442,
177
+ "eval_runtime": 44.6347,
178
+ "eval_samples_per_second": 39.924,
179
+ "eval_steps_per_second": 4.996,
180
+ "step": 3345
181
+ },
182
+ {
183
+ "epoch": 15.7,
184
+ "learning_rate": 4.80421686746988e-06,
185
+ "loss": 0.4993,
186
+ "step": 3500
187
+ },
188
+ {
189
+ "epoch": 16.0,
190
+ "eval_accuracy": 0.98989898989899,
191
+ "eval_loss": 0.5284363031387329,
192
+ "eval_runtime": 44.3114,
193
+ "eval_samples_per_second": 40.215,
194
+ "eval_steps_per_second": 5.033,
195
+ "step": 3568
196
+ },
197
+ {
198
+ "epoch": 17.0,
199
+ "eval_accuracy": 0.9910213243546577,
200
+ "eval_loss": 0.4793274402618408,
201
+ "eval_runtime": 44.0084,
202
+ "eval_samples_per_second": 40.492,
203
+ "eval_steps_per_second": 5.067,
204
+ "step": 3791
205
+ },
206
+ {
207
+ "epoch": 17.94,
208
+ "learning_rate": 4.051204819277109e-06,
209
+ "loss": 0.3821,
210
+ "step": 4000
211
+ },
212
+ {
213
+ "epoch": 18.0,
214
+ "eval_accuracy": 0.9915824915824916,
215
+ "eval_loss": 0.441709041595459,
216
+ "eval_runtime": 43.8673,
217
+ "eval_samples_per_second": 40.623,
218
+ "eval_steps_per_second": 5.084,
219
+ "step": 4014
220
+ },
221
+ {
222
+ "epoch": 19.0,
223
+ "eval_accuracy": 0.9910213243546577,
224
+ "eval_loss": 0.40644627809524536,
225
+ "eval_runtime": 43.927,
226
+ "eval_samples_per_second": 40.567,
227
+ "eval_steps_per_second": 5.077,
228
+ "step": 4237
229
+ },
230
+ {
231
+ "epoch": 20.0,
232
+ "eval_accuracy": 0.9910213243546577,
233
+ "eval_loss": 0.3786466419696808,
234
+ "eval_runtime": 43.9386,
235
+ "eval_samples_per_second": 40.557,
236
+ "eval_steps_per_second": 5.075,
237
+ "step": 4460
238
+ },
239
+ {
240
+ "epoch": 20.18,
241
+ "learning_rate": 3.2981927710843376e-06,
242
+ "loss": 0.3078,
243
+ "step": 4500
244
+ },
245
+ {
246
+ "epoch": 21.0,
247
+ "eval_accuracy": 0.9910213243546577,
248
+ "eval_loss": 0.35503095388412476,
249
+ "eval_runtime": 43.9717,
250
+ "eval_samples_per_second": 40.526,
251
+ "eval_steps_per_second": 5.071,
252
+ "step": 4683
253
+ },
254
+ {
255
+ "epoch": 22.0,
256
+ "eval_accuracy": 0.98989898989899,
257
+ "eval_loss": 0.3350999057292938,
258
+ "eval_runtime": 43.9545,
259
+ "eval_samples_per_second": 40.542,
260
+ "eval_steps_per_second": 5.073,
261
+ "step": 4906
262
+ },
263
+ {
264
+ "epoch": 22.42,
265
+ "learning_rate": 2.5451807228915665e-06,
266
+ "loss": 0.2597,
267
+ "step": 5000
268
+ },
269
+ {
270
+ "epoch": 23.0,
271
+ "eval_accuracy": 0.9927048260381593,
272
+ "eval_loss": 0.319038063287735,
273
+ "eval_runtime": 44.4102,
274
+ "eval_samples_per_second": 40.126,
275
+ "eval_steps_per_second": 5.021,
276
+ "step": 5129
277
+ },
278
+ {
279
+ "epoch": 24.0,
280
+ "eval_accuracy": 0.9927048260381593,
281
+ "eval_loss": 0.30528706312179565,
282
+ "eval_runtime": 43.8791,
283
+ "eval_samples_per_second": 40.612,
284
+ "eval_steps_per_second": 5.082,
285
+ "step": 5352
286
+ },
287
+ {
288
+ "epoch": 24.66,
289
+ "learning_rate": 1.7921686746987955e-06,
290
+ "loss": 0.2276,
291
+ "step": 5500
292
+ },
293
+ {
294
+ "epoch": 25.0,
295
+ "eval_accuracy": 0.9921436588103255,
296
+ "eval_loss": 0.2928442060947418,
297
+ "eval_runtime": 43.9358,
298
+ "eval_samples_per_second": 40.559,
299
+ "eval_steps_per_second": 5.076,
300
+ "step": 5575
301
+ },
302
+ {
303
+ "epoch": 26.0,
304
+ "eval_accuracy": 0.9921436588103255,
305
+ "eval_loss": 0.2847427427768707,
306
+ "eval_runtime": 44.1076,
307
+ "eval_samples_per_second": 40.401,
308
+ "eval_steps_per_second": 5.056,
309
+ "step": 5798
310
+ },
311
+ {
312
+ "epoch": 26.91,
313
+ "learning_rate": 1.0391566265060242e-06,
314
+ "loss": 0.208,
315
+ "step": 6000
316
+ },
317
+ {
318
+ "epoch": 27.0,
319
+ "eval_accuracy": 0.9921436588103255,
320
+ "eval_loss": 0.278621643781662,
321
+ "eval_runtime": 223.3741,
322
+ "eval_samples_per_second": 7.978,
323
+ "eval_steps_per_second": 0.998,
324
+ "step": 6021
325
+ },
326
+ {
327
+ "epoch": 28.0,
328
+ "eval_accuracy": 0.9927048260381593,
329
+ "eval_loss": 0.2740115225315094,
330
+ "eval_runtime": 44.8032,
331
+ "eval_samples_per_second": 39.774,
332
+ "eval_steps_per_second": 4.977,
333
+ "step": 6244
334
+ },
335
+ {
336
+ "epoch": 29.0,
337
+ "eval_accuracy": 0.9927048260381593,
338
+ "eval_loss": 0.27092334628105164,
339
+ "eval_runtime": 44.5356,
340
+ "eval_samples_per_second": 40.013,
341
+ "eval_steps_per_second": 5.007,
342
+ "step": 6467
343
+ },
344
+ {
345
+ "epoch": 29.15,
346
+ "learning_rate": 2.8614457831325304e-07,
347
+ "loss": 0.1961,
348
+ "step": 6500
349
+ },
350
+ {
351
+ "epoch": 30.0,
352
+ "eval_accuracy": 0.9927048260381593,
353
+ "eval_loss": 0.26971209049224854,
354
+ "eval_runtime": 45.6479,
355
+ "eval_samples_per_second": 39.038,
356
+ "eval_steps_per_second": 4.885,
357
+ "step": 6690
358
+ }
359
+ ],
360
+ "logging_steps": 500,
361
+ "max_steps": 6690,
362
+ "num_train_epochs": 30,
363
+ "save_steps": 500,
364
+ "total_flos": 1.6569009834178314e+19,
365
+ "trial_name": null,
366
+ "trial_params": null
367
+ }
checkpoint-6690/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e1a73b795ad1d960babcef6b054956509efa2db208a53fcf22ddd27e4cbcb494
3
+ size 4027
config.json ADDED
@@ -0,0 +1,132 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "google/vit-base-patch16-224-in21k",
3
+ "architectures": [
4
+ "ViTForImageClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.0,
7
+ "encoder_stride": 16,
8
+ "hidden_act": "gelu",
9
+ "hidden_dropout_prob": 0.0,
10
+ "hidden_size": 768,
11
+ "id2label": {
12
+ "0": "Doddpathre",
13
+ "1": "Arali",
14
+ "2": "Avacado",
15
+ "3": "Pomegranate",
16
+ "4": "Lemon",
17
+ "5": "Honge",
18
+ "6": "Mango",
19
+ "7": "Geranium",
20
+ "8": "Gauva",
21
+ "9": "Mint",
22
+ "10": "Palak(Spinach)",
23
+ "11": "Betel",
24
+ "12": "Coriender",
25
+ "13": "Catharanthus",
26
+ "14": "Betel_Nut",
27
+ "15": "Henna",
28
+ "16": "Jackfruit",
29
+ "17": "Curry_Leaf",
30
+ "18": "Tulsi",
31
+ "19": "Basale",
32
+ "20": "Brahmi",
33
+ "21": "Ganike",
34
+ "22": "Aloevera",
35
+ "23": "Curry",
36
+ "24": "Tulasi",
37
+ "25": "Tamarind",
38
+ "26": "Amla",
39
+ "27": "Nithyapushpa",
40
+ "28": "Hibiscus",
41
+ "29": "Neem",
42
+ "30": "Pappaya",
43
+ "31": "Ekka",
44
+ "32": "Raktachandini",
45
+ "33": "Doddapatre",
46
+ "34": "Nagadali",
47
+ "35": "Castor",
48
+ "36": "Amruta_Balli",
49
+ "37": "Guava",
50
+ "38": "Bamboo",
51
+ "39": "Ashwagandha",
52
+ "40": "Insulin",
53
+ "41": "Pepper",
54
+ "42": "Rose",
55
+ "43": "Wood_sorel",
56
+ "44": "Seethapala",
57
+ "45": "Lemon_grass",
58
+ "46": "Jasmine",
59
+ "47": "Bhrami",
60
+ "48": "Papaya",
61
+ "49": "Nooni",
62
+ "50": "Sapota",
63
+ "51": "Ashoka"
64
+ },
65
+ "image_size": 224,
66
+ "initializer_range": 0.02,
67
+ "intermediate_size": 3072,
68
+ "label2id": {
69
+ "Aloevera": 22,
70
+ "Amla": 26,
71
+ "Amruta_Balli": 36,
72
+ "Arali": 1,
73
+ "Ashoka": 51,
74
+ "Ashwagandha": 39,
75
+ "Avacado": 2,
76
+ "Bamboo": 38,
77
+ "Basale": 19,
78
+ "Betel": 11,
79
+ "Betel_Nut": 14,
80
+ "Bhrami": 47,
81
+ "Brahmi": 20,
82
+ "Castor": 35,
83
+ "Catharanthus": 13,
84
+ "Coriender": 12,
85
+ "Curry": 23,
86
+ "Curry_Leaf": 17,
87
+ "Doddapatre": 33,
88
+ "Doddpathre": 0,
89
+ "Ekka": 31,
90
+ "Ganike": 21,
91
+ "Gauva": 8,
92
+ "Geranium": 7,
93
+ "Guava": 37,
94
+ "Henna": 15,
95
+ "Hibiscus": 28,
96
+ "Honge": 5,
97
+ "Insulin": 40,
98
+ "Jackfruit": 16,
99
+ "Jasmine": 46,
100
+ "Lemon": 4,
101
+ "Lemon_grass": 45,
102
+ "Mango": 6,
103
+ "Mint": 9,
104
+ "Nagadali": 34,
105
+ "Neem": 29,
106
+ "Nithyapushpa": 27,
107
+ "Nooni": 49,
108
+ "Palak(Spinach)": 10,
109
+ "Papaya": 48,
110
+ "Pappaya": 30,
111
+ "Pepper": 41,
112
+ "Pomegranate": 3,
113
+ "Raktachandini": 32,
114
+ "Rose": 42,
115
+ "Sapota": 50,
116
+ "Seethapala": 44,
117
+ "Tamarind": 25,
118
+ "Tulasi": 24,
119
+ "Tulsi": 18,
120
+ "Wood_sorel": 43
121
+ },
122
+ "layer_norm_eps": 1e-12,
123
+ "model_type": "vit",
124
+ "num_attention_heads": 12,
125
+ "num_channels": 3,
126
+ "num_hidden_layers": 12,
127
+ "patch_size": 16,
128
+ "problem_type": "single_label_classification",
129
+ "qkv_bias": true,
130
+ "torch_dtype": "float32",
131
+ "transformers_version": "4.34.0"
132
+ }
preprocessor_config.json ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "do_normalize": true,
3
+ "do_rescale": true,
4
+ "do_resize": true,
5
+ "image_mean": [
6
+ 0.5,
7
+ 0.5,
8
+ 0.5
9
+ ],
10
+ "image_processor_type": "ViTImageProcessor",
11
+ "image_std": [
12
+ 0.5,
13
+ 0.5,
14
+ 0.5
15
+ ],
16
+ "resample": 2,
17
+ "rescale_factor": 0.00392156862745098,
18
+ "size": {
19
+ "height": 224,
20
+ "width": 224
21
+ }
22
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2260320ca894b275d8322e37961fbb12cef8c5200ae4ed2ef939a52b3aa9cf9d
3
+ size 343422509
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e1a73b795ad1d960babcef6b054956509efa2db208a53fcf22ddd27e4cbcb494
3
+ size 4027