Upload 3 files
Browse files- app.py +108 -0
- requirements.txt +6 -0
- train_model.py +92 -0
app.py
ADDED
|
@@ -0,0 +1,108 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# app.py
|
| 2 |
+
|
| 3 |
+
import streamlit as st
|
| 4 |
+
import tensorflow as tf
|
| 5 |
+
from tensorflow.keras.preprocessing import image
|
| 6 |
+
import numpy as np
|
| 7 |
+
from PIL import Image
|
| 8 |
+
import cv2 # OpenCV for easy image processing (Make sure it's in requirements.txt)
|
| 9 |
+
|
| 10 |
+
# --- CONFIGURATION ---
|
| 11 |
+
MODEL_PATH = 'model.keras' # Aapka saved model file
|
| 12 |
+
IMAGE_SIZE = (224, 224)
|
| 13 |
+
CLASS_NAMES = ['No Tumor', 'Tumor'] # 0: No Tumor, 1: Tumor
|
| 14 |
+
|
| 15 |
+
# --- Helper Function: Load Model ---
|
| 16 |
+
# Streamlit mein model ko sirf ek baar load karna chahiye
|
| 17 |
+
@st.cache_resource
|
| 18 |
+
def load_model():
|
| 19 |
+
try:
|
| 20 |
+
model = tf.keras.models.load_model(MODEL_PATH)
|
| 21 |
+
return model
|
| 22 |
+
except Exception as e:
|
| 23 |
+
st.error(f"Error loading model: {e}")
|
| 24 |
+
return None
|
| 25 |
+
|
| 26 |
+
# --- Helper Function: Preprocess Image ---
|
| 27 |
+
def preprocess_image(img):
|
| 28 |
+
"""Uploaded image ko model input ke liye ready karna."""
|
| 29 |
+
# Convert PIL image to OpenCV format (numpy array)
|
| 30 |
+
img_array = np.array(img.convert('RGB'))
|
| 31 |
+
|
| 32 |
+
# Resize and Rescale (Jaisa training mein kiya tha)
|
| 33 |
+
img_resized = cv2.resize(img_array, IMAGE_SIZE)
|
| 34 |
+
img_normalized = img_resized / 255.0 # Normalize to 0-1
|
| 35 |
+
|
| 36 |
+
# Add batch dimension: (224, 224, 3) -> (1, 224, 224, 3)
|
| 37 |
+
img_batch = np.expand_dims(img_normalized, axis=0)
|
| 38 |
+
|
| 39 |
+
return img_batch
|
| 40 |
+
|
| 41 |
+
# --- Streamlit UI Start ---
|
| 42 |
+
|
| 43 |
+
# 1. Page Title (Requirement a)
|
| 44 |
+
st.title("🧠 Brain Tumor Detection System (AI Powered)")
|
| 45 |
+
|
| 46 |
+
# Load the model
|
| 47 |
+
model = load_model()
|
| 48 |
+
|
| 49 |
+
if model is None:
|
| 50 |
+
st.warning("Model file 'model.keras' not found. Please run 'train_model.py' first and place the file here.")
|
| 51 |
+
else:
|
| 52 |
+
# 2. Image Upload Section (Requirement b)
|
| 53 |
+
uploaded_file = st.file_uploader(
|
| 54 |
+
"Upload a Brain MRI Image (.jpg, .png) for Classification:",
|
| 55 |
+
type=["jpg", "png", "jpeg"]
|
| 56 |
+
)
|
| 57 |
+
|
| 58 |
+
if uploaded_file is not None:
|
| 59 |
+
# Display image preview (Requirement b)
|
| 60 |
+
image_preview = Image.open(uploaded_file)
|
| 61 |
+
st.image(image_preview, caption='Uploaded MRI Image', use_column_width=True)
|
| 62 |
+
st.write("")
|
| 63 |
+
|
| 64 |
+
# 3. Prediction Button (Requirement c)
|
| 65 |
+
if st.button("🔬 Detect Tumor"):
|
| 66 |
+
st.write("Processing...")
|
| 67 |
+
|
| 68 |
+
# --- Prediction Logic ---
|
| 69 |
+
try:
|
| 70 |
+
# 1. Preprocess the image
|
| 71 |
+
processed_img = preprocess_image(image_preview)
|
| 72 |
+
|
| 73 |
+
# 2. Predict
|
| 74 |
+
prediction = model.predict(processed_img)
|
| 75 |
+
|
| 76 |
+
# 3. Interpret the result
|
| 77 |
+
# Output is a single value, e.g., [0.98] or [0.02]
|
| 78 |
+
confidence_score = prediction[0][0] * 100
|
| 79 |
+
|
| 80 |
+
if confidence_score > 50:
|
| 81 |
+
# Confidence is high for Tumor (Class 1)
|
| 82 |
+
result_index = 1
|
| 83 |
+
else:
|
| 84 |
+
# Confidence is high for No Tumor (Class 0)
|
| 85 |
+
result_index = 0
|
| 86 |
+
# For 'No Tumor', confidence is 100 - (Model's confidence for Tumor)
|
| 87 |
+
confidence_score = 100 - confidence_score
|
| 88 |
+
|
| 89 |
+
predicted_class = CLASS_NAMES[result_index]
|
| 90 |
+
|
| 91 |
+
# 4. Output Section (Requirement d)
|
| 92 |
+
st.subheader("✅ Prediction Results")
|
| 93 |
+
|
| 94 |
+
if predicted_class == 'Tumor':
|
| 95 |
+
st.error(f"**Prediction:** **{predicted_class}** - Immediate medical consultation is advised.")
|
| 96 |
+
else:
|
| 97 |
+
st.success(f"**Prediction:** **{predicted_class}** - Looks clear.")
|
| 98 |
+
|
| 99 |
+
# Display Confidence Score
|
| 100 |
+
st.info(f"**Confidence Score:** **{confidence_score:.2f}%**")
|
| 101 |
+
|
| 102 |
+
except Exception as e:
|
| 103 |
+
st.error(f"An error occurred during prediction: {e}")
|
| 104 |
+
|
| 105 |
+
else:
|
| 106 |
+
st.info("Please upload an image file to begin detection.")
|
| 107 |
+
|
| 108 |
+
# --- End of Streamlit App ---
|
requirements.txt
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
tensorflow
|
| 2 |
+
keras
|
| 3 |
+
streamlit
|
| 4 |
+
numpy
|
| 5 |
+
Pillow
|
| 6 |
+
opencv-python
|
train_model.py
ADDED
|
@@ -0,0 +1,92 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# train_model.py
|
| 2 |
+
|
| 3 |
+
import tensorflow as tf
|
| 4 |
+
from tensorflow.keras.applications import VGG16
|
| 5 |
+
from tensorflow.keras.preprocessing.image import ImageDataGenerator
|
| 6 |
+
from tensorflow.keras.models import Sequential
|
| 7 |
+
from tensorflow.keras.layers import Flatten, Dense, Dropout
|
| 8 |
+
|
| 9 |
+
# --- Step 1: Define Data Paths and Parameters ---
|
| 10 |
+
IMAGE_SIZE = (224, 224) # VGG16 needs 224x224 input
|
| 11 |
+
BATCH_SIZE = 32
|
| 12 |
+
EPOCHS = 5 # Aap isko 10 ya 15 kar sakte hain acche results ke liye
|
| 13 |
+
|
| 14 |
+
# Update this path to your dataset folder
|
| 15 |
+
DATA_DIR = 'dataset'
|
| 16 |
+
|
| 17 |
+
# --- Step 2: Data Augmentation and Loading ---
|
| 18 |
+
# Data Augmentation se model better seekhta hai
|
| 19 |
+
train_datagen = ImageDataGenerator(
|
| 20 |
+
rescale=1./255, # Normalize pixels to 0-1
|
| 21 |
+
shear_range=0.2,
|
| 22 |
+
zoom_range=0.2,
|
| 23 |
+
horizontal_flip=True,
|
| 24 |
+
validation_split=0.2 # Validation data ke liye 20% data use hoga
|
| 25 |
+
)
|
| 26 |
+
|
| 27 |
+
train_generator = train_datagen.flow_from_directory(
|
| 28 |
+
DATA_DIR,
|
| 29 |
+
target_size=IMAGE_SIZE,
|
| 30 |
+
batch_size=BATCH_SIZE,
|
| 31 |
+
class_mode='binary', # Tumour (0) or No Tumour (1) - Binary classification
|
| 32 |
+
subset='training'
|
| 33 |
+
)
|
| 34 |
+
|
| 35 |
+
validation_generator = train_datagen.flow_from_directory(
|
| 36 |
+
DATA_DIR,
|
| 37 |
+
target_size=IMAGE_SIZE,
|
| 38 |
+
batch_size=BATCH_SIZE,
|
| 39 |
+
class_mode='binary',
|
| 40 |
+
subset='validation' # Yeh validation data hai
|
| 41 |
+
)
|
| 42 |
+
|
| 43 |
+
print("Data Generators Ready!")
|
| 44 |
+
|
| 45 |
+
# --- Step 3: Build the Transfer Learning Model (VGG16) ---
|
| 46 |
+
|
| 47 |
+
# Load Pre-trained VGG16 model (without the top classification layers)
|
| 48 |
+
base_model = VGG16(
|
| 49 |
+
weights='imagenet',
|
| 50 |
+
include_top=False, # Top classification layers ko nahi lena hai
|
| 51 |
+
input_shape=(224, 224, 3)
|
| 52 |
+
)
|
| 53 |
+
|
| 54 |
+
# Freeze Base Layers (Requirement: Don't train VGG16 weights)
|
| 55 |
+
base_model.trainable = False
|
| 56 |
+
|
| 57 |
+
# Create the Sequential Model
|
| 58 |
+
model = Sequential([
|
| 59 |
+
base_model, # VGG16 base
|
| 60 |
+
Flatten(), # Flatten the output from VGG16
|
| 61 |
+
Dense(512, activation='relu'), # Custom layer 1
|
| 62 |
+
Dropout(0.5), # Regularization to prevent overfitting
|
| 63 |
+
Dense(1, activation='sigmoid') # Output layer: 1 neuron for binary classification (Tumor/No Tumor)
|
| 64 |
+
])
|
| 65 |
+
|
| 66 |
+
# --- Step 4: Compile and Train the Model ---
|
| 67 |
+
model.compile(
|
| 68 |
+
optimizer='adam',
|
| 69 |
+
loss='binary_crossentropy', # Use binary_crossentropy for 2 classes
|
| 70 |
+
metrics=['accuracy']
|
| 71 |
+
)
|
| 72 |
+
|
| 73 |
+
model.summary()
|
| 74 |
+
|
| 75 |
+
print("Model Training Started...")
|
| 76 |
+
history = model.fit(
|
| 77 |
+
train_generator,
|
| 78 |
+
steps_per_epoch=train_generator.samples // BATCH_SIZE,
|
| 79 |
+
epochs=EPOCHS,
|
| 80 |
+
validation_data=validation_generator,
|
| 81 |
+
validation_steps=validation_generator.samples // BATCH_SIZE
|
| 82 |
+
)
|
| 83 |
+
|
| 84 |
+
print("Model Training Complete!")
|
| 85 |
+
|
| 86 |
+
# --- Step 5: Save the Model (Requirement) ---
|
| 87 |
+
MODEL_FILE_NAME = 'model.keras'
|
| 88 |
+
model.save(MODEL_FILE_NAME)
|
| 89 |
+
print(f"Model saved as: {MODEL_FILE_NAME}")
|
| 90 |
+
|
| 91 |
+
# Optional: Print final accuracy
|
| 92 |
+
print(f"Final Validation Accuracy: {history.history['val_accuracy'][-1]:.4f}")
|