vignesh456 commited on
Commit
242e5dc
·
verified ·
1 Parent(s): 8d62829

Upload 4 files

Browse files
100-epoch with regularization.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ec769e8790691bbf2f5445b3a338095db7af87939df0f323eb01b91234daf5b3
3
+ size 2554400
cnn_train.py ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
3
+
4
+ import warnings
5
+ warnings.filterwarnings('ignore')
6
+
7
+ import tensorflow as tf
8
+ from keras.models import Sequential
9
+ from keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout
10
+ from keras.callbacks import EarlyStopping
11
+ import numpy as np
12
+
13
+ np.random.seed(1337)
14
+ classifier = Sequential()
15
+
16
+ classifier.add(Conv2D(32, (3, 3), input_shape=(128, 128, 3), activation='relu'))
17
+ classifier.add(MaxPooling2D(pool_size=(2, 2)))
18
+ classifier.add(Conv2D(16, (3, 3), activation='relu'))
19
+ classifier.add(MaxPooling2D(pool_size=(2, 2)))
20
+ classifier.add(Conv2D(8, (3, 3), activation='relu'))
21
+ classifier.add(MaxPooling2D(pool_size=(2, 2)))
22
+ classifier.add(Flatten())
23
+ classifier.add(Dense(128, activation='relu'))
24
+ classifier.add(Dropout(0.5))
25
+ classifier.add(Dense(10, activation='softmax'))
26
+
27
+ classifier.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
28
+ print(classifier.summary())
29
+
30
+ train_dir = '/home/vignesh/tomato_data/train'
31
+ val_dir = '/home/vignesh/tomato_data/val'
32
+
33
+ train_data_raw = tf.keras.utils.image_dataset_from_directory(
34
+ train_dir,
35
+ labels='inferred',
36
+ label_mode='categorical',
37
+ image_size=(128, 128),
38
+ batch_size=32,
39
+ shuffle=True
40
+ )
41
+ class_names = train_data_raw.class_names # Get class names before mapping
42
+ train_data = train_data_raw.map(lambda x, y: (x / 255.0, y)).prefetch(tf.data.AUTOTUNE)
43
+
44
+ val_data = tf.keras.utils.image_dataset_from_directory(
45
+ val_dir,
46
+ labels='inferred',
47
+ label_mode='categorical',
48
+ image_size=(128, 128),
49
+ batch_size=32,
50
+ shuffle=False
51
+ )
52
+ val_data = val_data.map(lambda x, y: (x / 255.0, y)).prefetch(tf.data.AUTOTUNE)
53
+
54
+ print({name: idx for idx, name in enumerate(class_names)})
55
+
56
+ # Early stopping callback
57
+ early_stop = EarlyStopping(monitor='val_loss', patience=10,restore_best_weights=True)
58
+
59
+ classifier.fit(
60
+ train_data,
61
+ epochs=30,
62
+ validation_data=val_data,
63
+ )
64
+
65
+ classifier.save('keras_potato_trained_model(2.h5')
66
+ print('Saved trained model as %s ' % 'keras_potato_trained_model.h5')
requirements.txt CHANGED
@@ -1,3 +1,5 @@
1
- altair
2
- pandas
3
- streamlit
 
 
 
1
+ streamlit>=1.20.0
2
+ numpy>=1.23.0
3
+ pillow>=9.0.0
4
+ tensorflow>=2.10.0,<2.16.0
5
+ h5py>=3.7.0
streamlit.py ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import tensorflow as tf
3
+ import numpy as np
4
+ from PIL import Image
5
+
6
+ st.title('🍅 Simple Tomato Leaf Disease Classifier')
7
+
8
+ @st.cache_resource
9
+ def load_model():
10
+ return tf.keras.models.load_model('100-epoch with regularization.h5')
11
+
12
+ model = load_model()
13
+
14
+ # Class names (update if your classes are different)
15
+ class_names = [
16
+ 'Tomato___Bacterial_spot',
17
+ 'Tomato___Early_blight',
18
+ 'Tomato___Late_blight',
19
+ 'Tomato___Leaf_Mold',
20
+ 'Tomato___Septoria_leaf_spot',
21
+ 'Tomato___Spider_mites Two-spotted_spider_mite',
22
+ 'Tomato___Target_Spot',
23
+ 'Tomato___Tomato_Yellow_Leaf_Curl_Virus',
24
+ 'Tomato___Tomato_mosaic_virus',
25
+ 'Tomato___healthy'
26
+ ]
27
+
28
+ uploaded_file = st.file_uploader('Upload a tomato leaf image', type=['jpg', 'jpeg', 'png'])
29
+
30
+ if uploaded_file is not None:
31
+ image = Image.open(uploaded_file).convert('RGB')
32
+ st.image(image, caption='Uploaded Image', use_column_width=True)
33
+ img = image.resize((128, 128))
34
+ img_array = np.array(img) / 255.0
35
+ img_array = np.expand_dims(img_array, axis=0)
36
+ preds = model.predict(img_array)
37
+ pred_class = np.argmax(preds, axis=1)[0]
38
+ st.success(f'Predicted Class: {class_names[pred_class]}')