Pushp123 commited on
Commit
f0ab91f
·
verified ·
1 Parent(s): 9640146

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +256 -0
app.py ADDED
@@ -0,0 +1,256 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from google.colab import drive
2
+
3
+ drive.mount("/content/drive")
4
+
5
+ #Data Preprocessing
6
+
7
+ import os
8
+ import numpy as np
9
+ import tensorflow as tf
10
+ from tensorflow.keras.preprocessing.image import ImageDataGenerator
11
+ from PIL import Image
12
+
13
+ # Set image size and batch size
14
+ IMAGE_SIZE = (224, 224)
15
+ BATCH_SIZE = 32
16
+
17
+ # Paths to your dataset
18
+ TRAIN_PATH = '/content/drive/MyDrive/archive/dataset'
19
+
20
+ # Data generator for loading and preprocessing images
21
+ datagen = ImageDataGenerator(rescale=1./255, validation_split=0.15)
22
+
23
+ train_data = datagen.flow_from_directory(
24
+ TRAIN_PATH,
25
+ target_size=IMAGE_SIZE,
26
+ batch_size=BATCH_SIZE,
27
+ class_mode='binary',
28
+ subset='training' # Set as training data
29
+ )
30
+
31
+ val_data = datagen.flow_from_directory(
32
+ TRAIN_PATH,
33
+ target_size=IMAGE_SIZE,
34
+ batch_size=BATCH_SIZE,
35
+ class_mode='binary',
36
+ subset='validation' # Set as validation data
37
+ )
38
+
39
+ #CNN Model Setup (Transfer Learning)
40
+
41
+ import tensorflow as tf
42
+ from tensorflow.keras.applications import ResNet50
43
+ from tensorflow.keras.layers import Dense, GlobalAveragePooling2D, Dropout
44
+ from tensorflow.keras.models import Model
45
+
46
+ # Define the input shape
47
+ input_shape = (224, 224, 3)
48
+
49
+ # Load ResNet50 with input shape and without the top layer
50
+ base_model = ResNet50(weights='imagenet', include_top=False, input_shape=input_shape)
51
+
52
+ # Freeze the layers in the base model
53
+ base_model.trainable = False
54
+
55
+ # Add custom layers on top
56
+ x = base_model.output
57
+ x = GlobalAveragePooling2D()(x)
58
+ x = Dense(128, activation='relu')(x)
59
+ x = Dropout(0.5)(x)
60
+ predictions = Dense(1, activation='sigmoid')(x)
61
+
62
+ # Define the model
63
+ model = Model(inputs=base_model.input, outputs=predictions)
64
+
65
+ # Compile the model
66
+ model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
67
+
68
+ # Model summary
69
+ model.summary()
70
+
71
+ #Training the Model
72
+
73
+ # Train the model
74
+ history = model.fit(
75
+ train_data,
76
+ validation_data=val_data,
77
+ epochs=10, # Adjust epochs as needed
78
+ verbose=1
79
+ )
80
+
81
+ import matplotlib.pyplot as plt
82
+
83
+ # Plot the training and validation accuracy
84
+ plt.figure(figsize=(12, 6))
85
+
86
+ # Accuracy plot
87
+ plt.subplot(1, 2, 1)
88
+ plt.plot(history.history['accuracy'], label='Training Accuracy')
89
+ plt.plot(history.history['val_accuracy'], label='Validation Accuracy')
90
+ plt.title('Model Accuracy')
91
+ plt.xlabel('Epoch')
92
+ plt.ylabel('Accuracy')
93
+ plt.legend(loc='lower right')
94
+ plt.grid(True)
95
+
96
+ # Loss plot
97
+ plt.subplot(1, 2, 2)
98
+ plt.plot(history.history['loss'], label='Training Loss')
99
+ plt.plot(history.history['val_loss'], label='Validation Loss')
100
+ plt.title('Model Loss')
101
+ plt.xlabel('Epoch')
102
+ plt.ylabel('Loss')
103
+ plt.legend(loc='upper right')
104
+ plt.grid(True)
105
+
106
+ # Show the plot
107
+ plt.tight_layout()
108
+ plt.show()
109
+
110
+ #Explainable AI Integration (Grad-CAM)
111
+
112
+ import numpy as np
113
+ import tensorflow as tf
114
+ import matplotlib.pyplot as plt
115
+ from tensorflow.keras.models import Model
116
+ from PIL import Image
117
+
118
+ def make_gradcam_heatmap(img_array, model, last_conv_layer_name):
119
+ grad_model = Model(
120
+ inputs=[model.inputs],
121
+ outputs=[model.get_layer(last_conv_layer_name).output, model.output]
122
+ )
123
+
124
+ # Record operations for automatic differentiation
125
+ with tf.GradientTape() as tape:
126
+ conv_outputs, predictions = grad_model(img_array)
127
+ loss = predictions[:, 0] # Assuming binary classification (0 = Healthy, 1 = COVID-19)
128
+
129
+ # Compute gradients
130
+ grads = tape.gradient(loss, conv_outputs)
131
+ pooled_grads = tf.reduce_mean(grads, axis=(0, 1, 2))
132
+
133
+ conv_outputs = conv_outputs[0]
134
+ heatmap = tf.reduce_mean(tf.multiply(pooled_grads, conv_outputs), axis=-1)
135
+ heatmap = np.maximum(heatmap, 0) / np.max(heatmap) # Normalize between 0 and 1
136
+ return heatmap
137
+
138
+ def display_gradcam(img_path, heatmap, alpha=0.4):
139
+ img = Image.open(img_path)
140
+ img = img.resize((224, 224)) # Resize the image to match model input size
141
+
142
+ heatmap = np.uint8(255 * heatmap) # Convert heatmap to 0-255 scale
143
+ heatmap = Image.fromarray(heatmap).resize((img.size), Image.LANCZOS)
144
+ heatmap = np.array(heatmap)
145
+
146
+ # Create figure to plot the image and heatmap
147
+ fig, ax = plt.subplots(1, 2, figsize=(10, 5))
148
+ ax[0].imshow(img)
149
+ ax[1].imshow(img)
150
+ ax[1].imshow(heatmap, cmap='jet', alpha=alpha) # Overlay the heatmap
151
+ plt.show()
152
+
153
+ # Load and preprocess the image
154
+ def preprocess_image(image_path):
155
+ img = Image.open(image_path)
156
+ img = img.resize((224, 224)) # Resize to match the input shape of the model
157
+ img = np.array(img) / 255.0 # Normalize pixel values between 0 and 1
158
+ img = np.expand_dims(img, axis=0) # Add batch dimension
159
+ return img
160
+
161
+ # Path to the image
162
+ img_path = '/content/drive/MyDrive/archive/dataset/covid/01E392EE-69F9-4E33-BFCE-E5C968654078.jpeg'
163
+
164
+ # Preprocess the image
165
+ img_array = preprocess_image(img_path)
166
+
167
+ # Get the heatmap
168
+ heatmap = make_gradcam_heatmap(img_array, model, 'conv5_block3_out') # Replace with your last conv layer's name
169
+
170
+ # Display the original image with the Grad-CAM heatmap overlay
171
+ display_gradcam(img_path, heatmap)
172
+
173
+ #Evaluation
174
+
175
+ # Evaluate model on validation data
176
+ test_loss, test_acc = model.evaluate(val_data, verbose=2)
177
+ print(f'Test Accuracy: {test_acc:.2f}')
178
+
179
+
180
+ # UI for the model
181
+
182
+
183
+ import gradio as gr
184
+ import numpy as np
185
+ from PIL import Image
186
+ import tensorflow as tf
187
+ from tensorflow.keras.models import Model
188
+ import matplotlib.pyplot as plt
189
+ import cv2 # For color mapping the heatmap
190
+
191
+ # Define the Grad-CAM function
192
+ def make_gradcam_heatmap(img_array, model, last_conv_layer_name):
193
+ grad_model = Model([model.inputs], [model.get_layer(last_conv_layer_name).output, model.output])
194
+ with tf.GradientTape() as tape:
195
+ conv_outputs, predictions = grad_model(img_array)
196
+ loss = predictions[:, 0] # For binary classification
197
+ grads = tape.gradient(loss, conv_outputs)
198
+ pooled_grads = tf.reduce_mean(grads, axis=(0, 1, 2))
199
+ conv_outputs = conv_outputs[0]
200
+ heatmap = tf.reduce_mean(tf.multiply(pooled_grads, conv_outputs), axis=-1)
201
+ heatmap = np.maximum(heatmap, 0) # ReLU activation to make it non-negative
202
+ heatmap = heatmap / np.max(heatmap) # Normalize between 0 and 1
203
+ return heatmap
204
+
205
+ # Function to overlay the heatmap on the original image
206
+ def apply_heatmap_to_image(img, heatmap):
207
+ # Resize heatmap to match image size
208
+ heatmap = cv2.resize(heatmap, (img.size[0], img.size[1]))
209
+
210
+ # Convert heatmap to RGB (apply 'jet' colormap)
211
+ heatmap_colored = cv2.applyColorMap(np.uint8(255 * heatmap), cv2.COLORMAP_JET)
212
+
213
+ # Convert to RGB mode (since OpenCV uses BGR)
214
+ heatmap_colored = cv2.cvtColor(heatmap_colored, cv2.COLOR_BGR2RGB)
215
+
216
+ # Overlay the heatmap on the original image
217
+ overlay = np.array(img) * 0.6 + heatmap_colored * 0.4
218
+ overlay = np.clip(overlay, 0, 255).astype('uint8')
219
+ return Image.fromarray(overlay)
220
+
221
+ # Define the prediction and explainability function
222
+ def predict_and_explain(img):
223
+ img = Image.fromarray(img).resize((224, 224)) # Resize image for the model
224
+ img_array = np.array(img) / 255.0 # Normalize pixel values
225
+ img_array = np.expand_dims(img_array, axis=0) # Add batch dimension
226
+
227
+ # Get the prediction
228
+ prediction = model.predict(img_array)
229
+ confidence = float(prediction[0][0])
230
+ result = "COVID-19 Positive" if confidence > 0.5 else "Healthy"
231
+
232
+ # Generate the Grad-CAM heatmap
233
+ last_conv_layer_name = 'conv5_block3_out' # Update with the actual last convolution layer name
234
+ heatmap = make_gradcam_heatmap(img_array, model, last_conv_layer_name)
235
+
236
+ # Apply heatmap on the image
237
+ heatmap_img = apply_heatmap_to_image(img, heatmap)
238
+
239
+ # Display confidence and heatmap
240
+ confidence_text = f"Confidence: {confidence:.2f}"
241
+ return result, confidence_text, heatmap_img
242
+
243
+ # Gradio interface
244
+ def create_interface():
245
+ gr_interface = gr.Interface(
246
+ fn=predict_and_explain,
247
+ inputs=gr.Image(type="numpy"),
248
+ outputs=[gr.Textbox(label="Prediction"), gr.Textbox(label="Confidence"), gr.Image(label="Heatmap")],
249
+ title="COVID-19 X-ray Classification with Explainability",
250
+ description="Upload an X-ray image to predict if the patient has COVID-19, see the confidence score, and view the Grad-CAM heatmap."
251
+ )
252
+ return gr_interface
253
+
254
+ # Launch the interface
255
+ gr_interface = create_interface()
256
+ gr_interface.launch()