divyagupta commited on
Commit
0fcc0d4
·
1 Parent(s): 029ddad

Upload folder using huggingface_hub

Browse files
checkpoint-6690/config.json ADDED
@@ -0,0 +1,132 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "google/vit-base-patch16-224-in21k",
3
+ "architectures": [
4
+ "ViTForImageClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.0,
7
+ "encoder_stride": 16,
8
+ "hidden_act": "gelu",
9
+ "hidden_dropout_prob": 0.0,
10
+ "hidden_size": 768,
11
+ "id2label": {
12
+ "0": "Aloevera",
13
+ "1": "Jackfruit",
14
+ "2": "Honge",
15
+ "3": "Avacado",
16
+ "4": "Nithyapushpa",
17
+ "5": "Curry",
18
+ "6": "Lemon_grass",
19
+ "7": "Pappaya",
20
+ "8": "Guava",
21
+ "9": "Tulsi",
22
+ "10": "Betel",
23
+ "11": "Henna",
24
+ "12": "Hibiscus",
25
+ "13": "Arali",
26
+ "14": "Nooni",
27
+ "15": "Basale",
28
+ "16": "Ashoka",
29
+ "17": "Wood_sorel",
30
+ "18": "Lemon",
31
+ "19": "Bhrami",
32
+ "20": "Neem",
33
+ "21": "Nagadali",
34
+ "22": "Palak(Spinach)",
35
+ "23": "Curry_Leaf",
36
+ "24": "Doddpathre",
37
+ "25": "Rose",
38
+ "26": "Insulin",
39
+ "27": "Gauva",
40
+ "28": "Coriender",
41
+ "29": "Papaya",
42
+ "30": "Brahmi",
43
+ "31": "Pepper",
44
+ "32": "Seethapala",
45
+ "33": "Jasmine",
46
+ "34": "Ashwagandha",
47
+ "35": "Raktachandini",
48
+ "36": "Ekka",
49
+ "37": "Bamboo",
50
+ "38": "Amla",
51
+ "39": "Mango",
52
+ "40": "Betel_Nut",
53
+ "41": "Tamarind",
54
+ "42": "Amruta_Balli",
55
+ "43": "Mint",
56
+ "44": "Tulasi",
57
+ "45": "Geranium",
58
+ "46": "Castor",
59
+ "47": "Pomegranate",
60
+ "48": "Doddapatre",
61
+ "49": "Ganike",
62
+ "50": "Sapota",
63
+ "51": "Catharanthus"
64
+ },
65
+ "image_size": 224,
66
+ "initializer_range": 0.02,
67
+ "intermediate_size": 3072,
68
+ "label2id": {
69
+ "Aloevera": 0,
70
+ "Amla": 38,
71
+ "Amruta_Balli": 42,
72
+ "Arali": 13,
73
+ "Ashoka": 16,
74
+ "Ashwagandha": 34,
75
+ "Avacado": 3,
76
+ "Bamboo": 37,
77
+ "Basale": 15,
78
+ "Betel": 10,
79
+ "Betel_Nut": 40,
80
+ "Bhrami": 19,
81
+ "Brahmi": 30,
82
+ "Castor": 46,
83
+ "Catharanthus": 51,
84
+ "Coriender": 28,
85
+ "Curry": 5,
86
+ "Curry_Leaf": 23,
87
+ "Doddapatre": 48,
88
+ "Doddpathre": 24,
89
+ "Ekka": 36,
90
+ "Ganike": 49,
91
+ "Gauva": 27,
92
+ "Geranium": 45,
93
+ "Guava": 8,
94
+ "Henna": 11,
95
+ "Hibiscus": 12,
96
+ "Honge": 2,
97
+ "Insulin": 26,
98
+ "Jackfruit": 1,
99
+ "Jasmine": 33,
100
+ "Lemon": 18,
101
+ "Lemon_grass": 6,
102
+ "Mango": 39,
103
+ "Mint": 43,
104
+ "Nagadali": 21,
105
+ "Neem": 20,
106
+ "Nithyapushpa": 4,
107
+ "Nooni": 14,
108
+ "Palak(Spinach)": 22,
109
+ "Papaya": 29,
110
+ "Pappaya": 7,
111
+ "Pepper": 31,
112
+ "Pomegranate": 47,
113
+ "Raktachandini": 35,
114
+ "Rose": 25,
115
+ "Sapota": 50,
116
+ "Seethapala": 32,
117
+ "Tamarind": 41,
118
+ "Tulasi": 44,
119
+ "Tulsi": 9,
120
+ "Wood_sorel": 17
121
+ },
122
+ "layer_norm_eps": 1e-12,
123
+ "model_type": "vit",
124
+ "num_attention_heads": 12,
125
+ "num_channels": 3,
126
+ "num_hidden_layers": 12,
127
+ "patch_size": 16,
128
+ "problem_type": "single_label_classification",
129
+ "qkv_bias": true,
130
+ "torch_dtype": "float32",
131
+ "transformers_version": "4.33.2"
132
+ }
checkpoint-6690/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:89e6315f55f8002a225fcc726975e8f470d37e532a4f71e2261ea2eb0e28745b
3
+ size 686876037
checkpoint-6690/preprocessor_config.json ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "do_normalize": true,
3
+ "do_rescale": true,
4
+ "do_resize": true,
5
+ "image_mean": [
6
+ 0.5,
7
+ 0.5,
8
+ 0.5
9
+ ],
10
+ "image_processor_type": "ViTImageProcessor",
11
+ "image_std": [
12
+ 0.5,
13
+ 0.5,
14
+ 0.5
15
+ ],
16
+ "resample": 2,
17
+ "rescale_factor": 0.00392156862745098,
18
+ "size": {
19
+ "height": 224,
20
+ "width": 224
21
+ }
22
+ }
checkpoint-6690/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2a17430298ed46a4b84221069f7fc2d13f38b8de74c65ca5dd935ea36c4d65f0
3
+ size 343422509
checkpoint-6690/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e0c7fea4d850b4851a1da61d9fc8818b483c1951b244b085ee2453183c5223e1
3
+ size 14575
checkpoint-6690/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1d52ee7fdc399d5377aeff037fc7da32f92f6f36eb0780e5135892bb5ede30e5
3
+ size 627
checkpoint-6690/trainer_state.json ADDED
@@ -0,0 +1,367 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.2728825807571411,
3
+ "best_model_checkpoint": "medicinal_plants_image_detection/checkpoint-6690",
4
+ "epoch": 30.0,
5
+ "eval_steps": 500,
6
+ "global_step": 6690,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 1.0,
13
+ "eval_accuracy": 0.5084175084175084,
14
+ "eval_loss": 3.579941511154175,
15
+ "eval_runtime": 46.4248,
16
+ "eval_samples_per_second": 38.385,
17
+ "eval_steps_per_second": 4.803,
18
+ "step": 223
19
+ },
20
+ {
21
+ "epoch": 2.0,
22
+ "eval_accuracy": 0.787317620650954,
23
+ "eval_loss": 3.106814384460449,
24
+ "eval_runtime": 46.2058,
25
+ "eval_samples_per_second": 38.567,
26
+ "eval_steps_per_second": 4.826,
27
+ "step": 446
28
+ },
29
+ {
30
+ "epoch": 2.24,
31
+ "learning_rate": 9.322289156626508e-06,
32
+ "loss": 3.4743,
33
+ "step": 500
34
+ },
35
+ {
36
+ "epoch": 3.0,
37
+ "eval_accuracy": 0.8597081930415263,
38
+ "eval_loss": 2.69696044921875,
39
+ "eval_runtime": 46.2796,
40
+ "eval_samples_per_second": 38.505,
41
+ "eval_steps_per_second": 4.819,
42
+ "step": 669
43
+ },
44
+ {
45
+ "epoch": 4.0,
46
+ "eval_accuracy": 0.8922558922558923,
47
+ "eval_loss": 2.3514912128448486,
48
+ "eval_runtime": 51.6673,
49
+ "eval_samples_per_second": 34.49,
50
+ "eval_steps_per_second": 4.316,
51
+ "step": 892
52
+ },
53
+ {
54
+ "epoch": 4.48,
55
+ "learning_rate": 8.569277108433736e-06,
56
+ "loss": 2.4679,
57
+ "step": 1000
58
+ },
59
+ {
60
+ "epoch": 5.0,
61
+ "eval_accuracy": 0.9219977553310886,
62
+ "eval_loss": 2.057647228240967,
63
+ "eval_runtime": 45.3221,
64
+ "eval_samples_per_second": 39.319,
65
+ "eval_steps_per_second": 4.92,
66
+ "step": 1115
67
+ },
68
+ {
69
+ "epoch": 6.0,
70
+ "eval_accuracy": 0.9438832772166106,
71
+ "eval_loss": 1.7974578142166138,
72
+ "eval_runtime": 45.3049,
73
+ "eval_samples_per_second": 39.333,
74
+ "eval_steps_per_second": 4.922,
75
+ "step": 1338
76
+ },
77
+ {
78
+ "epoch": 6.73,
79
+ "learning_rate": 7.816265060240964e-06,
80
+ "loss": 1.7869,
81
+ "step": 1500
82
+ },
83
+ {
84
+ "epoch": 7.0,
85
+ "eval_accuracy": 0.9612794612794613,
86
+ "eval_loss": 1.5717405080795288,
87
+ "eval_runtime": 45.5037,
88
+ "eval_samples_per_second": 39.162,
89
+ "eval_steps_per_second": 4.901,
90
+ "step": 1561
91
+ },
92
+ {
93
+ "epoch": 8.0,
94
+ "eval_accuracy": 0.9646464646464646,
95
+ "eval_loss": 1.3833696842193604,
96
+ "eval_runtime": 45.5141,
97
+ "eval_samples_per_second": 39.153,
98
+ "eval_steps_per_second": 4.9,
99
+ "step": 1784
100
+ },
101
+ {
102
+ "epoch": 8.97,
103
+ "learning_rate": 7.063253012048194e-06,
104
+ "loss": 1.2912,
105
+ "step": 2000
106
+ },
107
+ {
108
+ "epoch": 9.0,
109
+ "eval_accuracy": 0.9725028058361391,
110
+ "eval_loss": 1.2191885709762573,
111
+ "eval_runtime": 45.4202,
112
+ "eval_samples_per_second": 39.234,
113
+ "eval_steps_per_second": 4.91,
114
+ "step": 2007
115
+ },
116
+ {
117
+ "epoch": 10.0,
118
+ "eval_accuracy": 0.9764309764309764,
119
+ "eval_loss": 1.072811245918274,
120
+ "eval_runtime": 45.5474,
121
+ "eval_samples_per_second": 39.124,
122
+ "eval_steps_per_second": 4.896,
123
+ "step": 2230
124
+ },
125
+ {
126
+ "epoch": 11.0,
127
+ "eval_accuracy": 0.978675645342312,
128
+ "eval_loss": 0.9478395581245422,
129
+ "eval_runtime": 45.4902,
130
+ "eval_samples_per_second": 39.173,
131
+ "eval_steps_per_second": 4.902,
132
+ "step": 2453
133
+ },
134
+ {
135
+ "epoch": 11.21,
136
+ "learning_rate": 6.310240963855422e-06,
137
+ "loss": 0.9408,
138
+ "step": 2500
139
+ },
140
+ {
141
+ "epoch": 12.0,
142
+ "eval_accuracy": 0.9814814814814815,
143
+ "eval_loss": 0.8372712135314941,
144
+ "eval_runtime": 45.141,
145
+ "eval_samples_per_second": 39.476,
146
+ "eval_steps_per_second": 4.94,
147
+ "step": 2676
148
+ },
149
+ {
150
+ "epoch": 13.0,
151
+ "eval_accuracy": 0.9848484848484849,
152
+ "eval_loss": 0.738318145275116,
153
+ "eval_runtime": 46.7789,
154
+ "eval_samples_per_second": 38.094,
155
+ "eval_steps_per_second": 4.767,
156
+ "step": 2899
157
+ },
158
+ {
159
+ "epoch": 13.45,
160
+ "learning_rate": 5.5572289156626515e-06,
161
+ "loss": 0.6876,
162
+ "step": 3000
163
+ },
164
+ {
165
+ "epoch": 14.0,
166
+ "eval_accuracy": 0.9831649831649831,
167
+ "eval_loss": 0.6616793870925903,
168
+ "eval_runtime": 46.7114,
169
+ "eval_samples_per_second": 38.149,
170
+ "eval_steps_per_second": 4.774,
171
+ "step": 3122
172
+ },
173
+ {
174
+ "epoch": 15.0,
175
+ "eval_accuracy": 0.9870931537598204,
176
+ "eval_loss": 0.5913097858428955,
177
+ "eval_runtime": 47.664,
178
+ "eval_samples_per_second": 37.387,
179
+ "eval_steps_per_second": 4.679,
180
+ "step": 3345
181
+ },
182
+ {
183
+ "epoch": 15.7,
184
+ "learning_rate": 4.80421686746988e-06,
185
+ "loss": 0.5136,
186
+ "step": 3500
187
+ },
188
+ {
189
+ "epoch": 16.0,
190
+ "eval_accuracy": 0.9870931537598204,
191
+ "eval_loss": 0.5324914455413818,
192
+ "eval_runtime": 46.3828,
193
+ "eval_samples_per_second": 38.419,
194
+ "eval_steps_per_second": 4.808,
195
+ "step": 3568
196
+ },
197
+ {
198
+ "epoch": 17.0,
199
+ "eval_accuracy": 0.9887766554433222,
200
+ "eval_loss": 0.479704886674881,
201
+ "eval_runtime": 46.0713,
202
+ "eval_samples_per_second": 38.679,
203
+ "eval_steps_per_second": 4.84,
204
+ "step": 3791
205
+ },
206
+ {
207
+ "epoch": 17.94,
208
+ "learning_rate": 4.051204819277109e-06,
209
+ "loss": 0.3943,
210
+ "step": 4000
211
+ },
212
+ {
213
+ "epoch": 18.0,
214
+ "eval_accuracy": 0.989337822671156,
215
+ "eval_loss": 0.4396406412124634,
216
+ "eval_runtime": 45.7686,
217
+ "eval_samples_per_second": 38.935,
218
+ "eval_steps_per_second": 4.872,
219
+ "step": 4014
220
+ },
221
+ {
222
+ "epoch": 19.0,
223
+ "eval_accuracy": 0.989337822671156,
224
+ "eval_loss": 0.4080045521259308,
225
+ "eval_runtime": 45.7582,
226
+ "eval_samples_per_second": 38.944,
227
+ "eval_steps_per_second": 4.873,
228
+ "step": 4237
229
+ },
230
+ {
231
+ "epoch": 20.0,
232
+ "eval_accuracy": 0.98989898989899,
233
+ "eval_loss": 0.37790489196777344,
234
+ "eval_runtime": 45.6986,
235
+ "eval_samples_per_second": 38.995,
236
+ "eval_steps_per_second": 4.88,
237
+ "step": 4460
238
+ },
239
+ {
240
+ "epoch": 20.18,
241
+ "learning_rate": 3.2981927710843376e-06,
242
+ "loss": 0.3172,
243
+ "step": 4500
244
+ },
245
+ {
246
+ "epoch": 21.0,
247
+ "eval_accuracy": 0.9887766554433222,
248
+ "eval_loss": 0.3557441234588623,
249
+ "eval_runtime": 45.6747,
250
+ "eval_samples_per_second": 39.015,
251
+ "eval_steps_per_second": 4.882,
252
+ "step": 4683
253
+ },
254
+ {
255
+ "epoch": 22.0,
256
+ "eval_accuracy": 0.98989898989899,
257
+ "eval_loss": 0.3380172848701477,
258
+ "eval_runtime": 45.0718,
259
+ "eval_samples_per_second": 39.537,
260
+ "eval_steps_per_second": 4.948,
261
+ "step": 4906
262
+ },
263
+ {
264
+ "epoch": 22.42,
265
+ "learning_rate": 2.5451807228915665e-06,
266
+ "loss": 0.2682,
267
+ "step": 5000
268
+ },
269
+ {
270
+ "epoch": 23.0,
271
+ "eval_accuracy": 0.9887766554433222,
272
+ "eval_loss": 0.322950541973114,
273
+ "eval_runtime": 45.6123,
274
+ "eval_samples_per_second": 39.068,
275
+ "eval_steps_per_second": 4.889,
276
+ "step": 5129
277
+ },
278
+ {
279
+ "epoch": 24.0,
280
+ "eval_accuracy": 0.98989898989899,
281
+ "eval_loss": 0.3070899248123169,
282
+ "eval_runtime": 45.7915,
283
+ "eval_samples_per_second": 38.916,
284
+ "eval_steps_per_second": 4.87,
285
+ "step": 5352
286
+ },
287
+ {
288
+ "epoch": 24.66,
289
+ "learning_rate": 1.7921686746987955e-06,
290
+ "loss": 0.2353,
291
+ "step": 5500
292
+ },
293
+ {
294
+ "epoch": 25.0,
295
+ "eval_accuracy": 0.9910213243546577,
296
+ "eval_loss": 0.2948002815246582,
297
+ "eval_runtime": 46.3588,
298
+ "eval_samples_per_second": 38.439,
299
+ "eval_steps_per_second": 4.81,
300
+ "step": 5575
301
+ },
302
+ {
303
+ "epoch": 26.0,
304
+ "eval_accuracy": 0.9887766554433222,
305
+ "eval_loss": 0.29044070839881897,
306
+ "eval_runtime": 45.4782,
307
+ "eval_samples_per_second": 39.184,
308
+ "eval_steps_per_second": 4.903,
309
+ "step": 5798
310
+ },
311
+ {
312
+ "epoch": 26.91,
313
+ "learning_rate": 1.0391566265060242e-06,
314
+ "loss": 0.2151,
315
+ "step": 6000
316
+ },
317
+ {
318
+ "epoch": 27.0,
319
+ "eval_accuracy": 0.98989898989899,
320
+ "eval_loss": 0.28158509731292725,
321
+ "eval_runtime": 45.7196,
322
+ "eval_samples_per_second": 38.977,
323
+ "eval_steps_per_second": 4.878,
324
+ "step": 6021
325
+ },
326
+ {
327
+ "epoch": 28.0,
328
+ "eval_accuracy": 0.989337822671156,
329
+ "eval_loss": 0.27782607078552246,
330
+ "eval_runtime": 45.8013,
331
+ "eval_samples_per_second": 38.907,
332
+ "eval_steps_per_second": 4.869,
333
+ "step": 6244
334
+ },
335
+ {
336
+ "epoch": 29.0,
337
+ "eval_accuracy": 0.98989898989899,
338
+ "eval_loss": 0.27393096685409546,
339
+ "eval_runtime": 45.4997,
340
+ "eval_samples_per_second": 39.165,
341
+ "eval_steps_per_second": 4.901,
342
+ "step": 6467
343
+ },
344
+ {
345
+ "epoch": 29.15,
346
+ "learning_rate": 2.8614457831325304e-07,
347
+ "loss": 0.2032,
348
+ "step": 6500
349
+ },
350
+ {
351
+ "epoch": 30.0,
352
+ "eval_accuracy": 0.989337822671156,
353
+ "eval_loss": 0.2728825807571411,
354
+ "eval_runtime": 45.5942,
355
+ "eval_samples_per_second": 39.084,
356
+ "eval_steps_per_second": 4.891,
357
+ "step": 6690
358
+ }
359
+ ],
360
+ "logging_steps": 500,
361
+ "max_steps": 6690,
362
+ "num_train_epochs": 30,
363
+ "save_steps": 500,
364
+ "total_flos": 1.6569009834178314e+19,
365
+ "trial_name": null,
366
+ "trial_params": null
367
+ }
checkpoint-6690/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dd4f3514f17a397e002d618bb4c685094a71f6570a36f5bfe4984529d10ef39b
3
+ size 4027
config.json ADDED
@@ -0,0 +1,132 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "google/vit-base-patch16-224-in21k",
3
+ "architectures": [
4
+ "ViTForImageClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.0,
7
+ "encoder_stride": 16,
8
+ "hidden_act": "gelu",
9
+ "hidden_dropout_prob": 0.0,
10
+ "hidden_size": 768,
11
+ "id2label": {
12
+ "0": "Aloevera",
13
+ "1": "Jackfruit",
14
+ "2": "Honge",
15
+ "3": "Avacado",
16
+ "4": "Nithyapushpa",
17
+ "5": "Curry",
18
+ "6": "Lemon_grass",
19
+ "7": "Pappaya",
20
+ "8": "Guava",
21
+ "9": "Tulsi",
22
+ "10": "Betel",
23
+ "11": "Henna",
24
+ "12": "Hibiscus",
25
+ "13": "Arali",
26
+ "14": "Nooni",
27
+ "15": "Basale",
28
+ "16": "Ashoka",
29
+ "17": "Wood_sorel",
30
+ "18": "Lemon",
31
+ "19": "Bhrami",
32
+ "20": "Neem",
33
+ "21": "Nagadali",
34
+ "22": "Palak(Spinach)",
35
+ "23": "Curry_Leaf",
36
+ "24": "Doddpathre",
37
+ "25": "Rose",
38
+ "26": "Insulin",
39
+ "27": "Gauva",
40
+ "28": "Coriender",
41
+ "29": "Papaya",
42
+ "30": "Brahmi",
43
+ "31": "Pepper",
44
+ "32": "Seethapala",
45
+ "33": "Jasmine",
46
+ "34": "Ashwagandha",
47
+ "35": "Raktachandini",
48
+ "36": "Ekka",
49
+ "37": "Bamboo",
50
+ "38": "Amla",
51
+ "39": "Mango",
52
+ "40": "Betel_Nut",
53
+ "41": "Tamarind",
54
+ "42": "Amruta_Balli",
55
+ "43": "Mint",
56
+ "44": "Tulasi",
57
+ "45": "Geranium",
58
+ "46": "Castor",
59
+ "47": "Pomegranate",
60
+ "48": "Doddapatre",
61
+ "49": "Ganike",
62
+ "50": "Sapota",
63
+ "51": "Catharanthus"
64
+ },
65
+ "image_size": 224,
66
+ "initializer_range": 0.02,
67
+ "intermediate_size": 3072,
68
+ "label2id": {
69
+ "Aloevera": 0,
70
+ "Amla": 38,
71
+ "Amruta_Balli": 42,
72
+ "Arali": 13,
73
+ "Ashoka": 16,
74
+ "Ashwagandha": 34,
75
+ "Avacado": 3,
76
+ "Bamboo": 37,
77
+ "Basale": 15,
78
+ "Betel": 10,
79
+ "Betel_Nut": 40,
80
+ "Bhrami": 19,
81
+ "Brahmi": 30,
82
+ "Castor": 46,
83
+ "Catharanthus": 51,
84
+ "Coriender": 28,
85
+ "Curry": 5,
86
+ "Curry_Leaf": 23,
87
+ "Doddapatre": 48,
88
+ "Doddpathre": 24,
89
+ "Ekka": 36,
90
+ "Ganike": 49,
91
+ "Gauva": 27,
92
+ "Geranium": 45,
93
+ "Guava": 8,
94
+ "Henna": 11,
95
+ "Hibiscus": 12,
96
+ "Honge": 2,
97
+ "Insulin": 26,
98
+ "Jackfruit": 1,
99
+ "Jasmine": 33,
100
+ "Lemon": 18,
101
+ "Lemon_grass": 6,
102
+ "Mango": 39,
103
+ "Mint": 43,
104
+ "Nagadali": 21,
105
+ "Neem": 20,
106
+ "Nithyapushpa": 4,
107
+ "Nooni": 14,
108
+ "Palak(Spinach)": 22,
109
+ "Papaya": 29,
110
+ "Pappaya": 7,
111
+ "Pepper": 31,
112
+ "Pomegranate": 47,
113
+ "Raktachandini": 35,
114
+ "Rose": 25,
115
+ "Sapota": 50,
116
+ "Seethapala": 32,
117
+ "Tamarind": 41,
118
+ "Tulasi": 44,
119
+ "Tulsi": 9,
120
+ "Wood_sorel": 17
121
+ },
122
+ "layer_norm_eps": 1e-12,
123
+ "model_type": "vit",
124
+ "num_attention_heads": 12,
125
+ "num_channels": 3,
126
+ "num_hidden_layers": 12,
127
+ "patch_size": 16,
128
+ "problem_type": "single_label_classification",
129
+ "qkv_bias": true,
130
+ "torch_dtype": "float32",
131
+ "transformers_version": "4.33.2"
132
+ }
preprocessor_config.json ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "do_normalize": true,
3
+ "do_rescale": true,
4
+ "do_resize": true,
5
+ "image_mean": [
6
+ 0.5,
7
+ 0.5,
8
+ 0.5
9
+ ],
10
+ "image_processor_type": "ViTImageProcessor",
11
+ "image_std": [
12
+ 0.5,
13
+ 0.5,
14
+ 0.5
15
+ ],
16
+ "resample": 2,
17
+ "rescale_factor": 0.00392156862745098,
18
+ "size": {
19
+ "height": 224,
20
+ "width": 224
21
+ }
22
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2a17430298ed46a4b84221069f7fc2d13f38b8de74c65ca5dd935ea36c4d65f0
3
+ size 343422509
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dd4f3514f17a397e002d618bb4c685094a71f6570a36f5bfe4984529d10ef39b
3
+ size 4027