Upload 6 files
Browse files- .gitattributes +1 -0
- Notebook/InceptionV3_Brain_Tumor_MRI.ipynb +0 -0
- Results/InceptionV3_Brain_Tumor_MRI.mp4 +3 -0
- app/main.py +65 -0
- app/model.py +176 -0
- requirements.txt +7 -0
- saved_model/InceptionV3_Brain_Tumor_MRI.h5 +3 -0
.gitattributes
CHANGED
|
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
| 36 |
+
Results/InceptionV3_Brain_Tumor_MRI.mp4 filter=lfs diff=lfs merge=lfs -text
|
Notebook/InceptionV3_Brain_Tumor_MRI.ipynb
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
Results/InceptionV3_Brain_Tumor_MRI.mp4
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:1ff3674007c28ddaa0e4920e55fe6232bb0fdaf0d4b0bfb00b9909798eb2077f
|
| 3 |
+
size 17354075
|
app/main.py
ADDED
|
@@ -0,0 +1,65 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# ---------------------------
|
| 2 |
+
# File: app/main.py
|
| 3 |
+
# ---------------------------
|
| 4 |
+
from fastapi import FastAPI, UploadFile, File, Query
|
| 5 |
+
from fastapi.responses import JSONResponse, StreamingResponse
|
| 6 |
+
from PIL import Image
|
| 7 |
+
import io
|
| 8 |
+
import numpy as np
|
| 9 |
+
import traceback
|
| 10 |
+
|
| 11 |
+
# Import the model utilities
|
| 12 |
+
from app.model import predict, gradcam, CLASS_NAMES
|
| 13 |
+
|
| 14 |
+
app = FastAPI(title="Brain Tumor MRI Classifier (InceptionV3 + Grad-CAM)")
|
| 15 |
+
|
| 16 |
+
@app.post("/predict")
|
| 17 |
+
async def predict_image(file: UploadFile = File(...)):
|
| 18 |
+
try:
|
| 19 |
+
contents = await file.read()
|
| 20 |
+
pil_img = Image.open(io.BytesIO(contents)).convert("RGB")
|
| 21 |
+
label, confidence, probs = predict(pil_img)
|
| 22 |
+
return JSONResponse({
|
| 23 |
+
"predicted_label": label,
|
| 24 |
+
"confidence": round(confidence, 3),
|
| 25 |
+
"probabilities": {k: round(v, 6) for k, v in probs.items()}
|
| 26 |
+
})
|
| 27 |
+
except Exception as e:
|
| 28 |
+
tb = traceback.format_exc()
|
| 29 |
+
return JSONResponse({"error": str(e), "trace": tb}, status_code=500)
|
| 30 |
+
|
| 31 |
+
@app.post("/gradcam")
|
| 32 |
+
async def gradcam_image(file: UploadFile = File(...), interpolant: float = Query(0.5, ge=0.0, le=1.0)):
|
| 33 |
+
"""
|
| 34 |
+
Returns a PNG image (overlay) produced by gradcam().
|
| 35 |
+
`interpolant` controls mixing (0..1).
|
| 36 |
+
"""
|
| 37 |
+
try:
|
| 38 |
+
contents = await file.read()
|
| 39 |
+
pil_img = Image.open(io.BytesIO(contents)).convert("RGB")
|
| 40 |
+
|
| 41 |
+
# Compute overlay (this calls the optimized gradcam in model.py)
|
| 42 |
+
overlay = gradcam(pil_img, interpolant=float(interpolant))
|
| 43 |
+
|
| 44 |
+
# Ensure correct dtype and shape
|
| 45 |
+
overlay = np.asarray(overlay).astype("uint8")
|
| 46 |
+
if overlay.ndim == 2:
|
| 47 |
+
overlay = np.stack([overlay] * 3, axis=-1)
|
| 48 |
+
|
| 49 |
+
# Convert to PNG bytes
|
| 50 |
+
buf = io.BytesIO()
|
| 51 |
+
Image.fromarray(overlay).save(buf, format="PNG")
|
| 52 |
+
buf.seek(0)
|
| 53 |
+
return StreamingResponse(buf, media_type="image/png")
|
| 54 |
+
|
| 55 |
+
except Exception as e:
|
| 56 |
+
tb = traceback.format_exc()
|
| 57 |
+
return JSONResponse({"error": str(e), "trace": tb}, status_code=500)
|
| 58 |
+
|
| 59 |
+
# Optional health endpoint
|
| 60 |
+
@app.get("/health")
|
| 61 |
+
async def health():
|
| 62 |
+
return {"status": "ok", "classes": CLASS_NAMES}
|
| 63 |
+
# ---------------------------
|
| 64 |
+
# End of app/main.py
|
| 65 |
+
# ---------------------------
|
app/model.py
ADDED
|
@@ -0,0 +1,176 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# ---------------------------
|
| 2 |
+
# File: app/model.py
|
| 3 |
+
# ---------------------------
|
| 4 |
+
import os
|
| 5 |
+
import numpy as np
|
| 6 |
+
from PIL import Image
|
| 7 |
+
import tensorflow as tf
|
| 8 |
+
import cv2
|
| 9 |
+
from tensorflow.keras.models import Model
|
| 10 |
+
from tensorflow.keras.layers import Conv2D
|
| 11 |
+
|
| 12 |
+
# ---------- GPU setup ----------
|
| 13 |
+
# Try to enable memory growth for GPUs to avoid TF pre-allocating all memory.
|
| 14 |
+
gpus = tf.config.list_physical_devices("GPU")
|
| 15 |
+
if gpus:
|
| 16 |
+
try:
|
| 17 |
+
for g in gpus:
|
| 18 |
+
tf.config.experimental.set_memory_growth(g, True)
|
| 19 |
+
except Exception as e:
|
| 20 |
+
# If setting memory growth fails, just print a warning and continue.
|
| 21 |
+
print("Warning: Could not set memory growth:", e)
|
| 22 |
+
|
| 23 |
+
print("Num GPUs Available:", len(gpus))
|
| 24 |
+
print("TensorFlow version:", tf.__version__)
|
| 25 |
+
|
| 26 |
+
# -------- Load model --------
|
| 27 |
+
MODEL_PATH = os.getenv("MODEL_PATH", "saved_model/InceptionV3_Brain_Tumor_MRI.h5")
|
| 28 |
+
print("Loading model from:", MODEL_PATH)
|
| 29 |
+
model = tf.keras.models.load_model(MODEL_PATH)
|
| 30 |
+
model.trainable = False
|
| 31 |
+
|
| 32 |
+
# -------- Find last conv layer and build grad_model once --------
|
| 33 |
+
# Find the last Conv2D layer
|
| 34 |
+
last_conv_layer = None
|
| 35 |
+
for layer in reversed(model.layers):
|
| 36 |
+
if isinstance(layer, Conv2D):
|
| 37 |
+
last_conv_layer = layer
|
| 38 |
+
break
|
| 39 |
+
if last_conv_layer is None:
|
| 40 |
+
raise RuntimeError("No Conv2D layer found in the model; cannot build Grad-CAM.")
|
| 41 |
+
|
| 42 |
+
target_layer = model.get_layer(last_conv_layer.name)
|
| 43 |
+
grad_model = Model(inputs=model.inputs, outputs=[target_layer.output, model.output])
|
| 44 |
+
print("Built grad_model with target layer:", target_layer.name)
|
| 45 |
+
|
| 46 |
+
# -------- Labels --------
|
| 47 |
+
CLASS_NAMES = ["glioma", "meningioma", "notumor", "pituitary"]
|
| 48 |
+
|
| 49 |
+
# -------- Preprocessing (use 299x299 for InceptionV3) --------
|
| 50 |
+
def preprocess_image_pil(img: Image.Image, target_size=(512, 512)):
|
| 51 |
+
"""
|
| 52 |
+
Accepts PIL.Image, returns float32 numpy array shaped (1,H,W,3) with values in [0,1].
|
| 53 |
+
"""
|
| 54 |
+
img = img.convert("RGB")
|
| 55 |
+
img = img.resize(target_size, resample=Image.BILINEAR)
|
| 56 |
+
arr = np.asarray(img).astype("float32") / 255.0
|
| 57 |
+
arr = np.expand_dims(arr, axis=0)
|
| 58 |
+
return arr
|
| 59 |
+
|
| 60 |
+
def pil_to_tf_tensor(img: Image.Image, target_size=(512, 512)):
|
| 61 |
+
"""
|
| 62 |
+
Convert PIL image to a TF tensor float32 (1,H,W,3) scaled to [0,1].
|
| 63 |
+
Uses TF ops to allow better GPU pipeline.
|
| 64 |
+
"""
|
| 65 |
+
arr = preprocess_image_pil(img, target_size=target_size)
|
| 66 |
+
return tf.convert_to_tensor(arr, dtype=tf.float32)
|
| 67 |
+
|
| 68 |
+
# -------- Prediction helper --------
|
| 69 |
+
def predict(img: Image.Image):
|
| 70 |
+
"""
|
| 71 |
+
Returns (label, confidence, prob_dict)
|
| 72 |
+
"""
|
| 73 |
+
input_tensor = preprocess_image_pil(img) # numpy (1,H,W,3)
|
| 74 |
+
# Try to call model by direct positional input (works for most Keras models).
|
| 75 |
+
preds = model(input_tensor, training=False)
|
| 76 |
+
probs = preds.numpy()[0]
|
| 77 |
+
class_idx = int(np.argmax(probs))
|
| 78 |
+
confidence = float(np.max(probs))
|
| 79 |
+
prob_dict = {CLASS_NAMES[i]: float(probs[i]) for i in range(len(CLASS_NAMES))}
|
| 80 |
+
return CLASS_NAMES[class_idx], confidence, prob_dict
|
| 81 |
+
|
| 82 |
+
# --------- Compiled Grad-CAM compute function ---------
|
| 83 |
+
# We create a tf.function that computes conv features and gradients for a given input and class index.
|
| 84 |
+
@tf.function
|
| 85 |
+
def _compute_conv_and_grads(img_input, class_index):
|
| 86 |
+
with tf.GradientTape() as tape:
|
| 87 |
+
conv_outputs, preds = grad_model(img_input)
|
| 88 |
+
|
| 89 |
+
# preds is probably a list -> convert it
|
| 90 |
+
if isinstance(preds, (list, tuple)):
|
| 91 |
+
preds = preds[0] # take the actual tensor
|
| 92 |
+
|
| 93 |
+
class_logits = preds[:, class_index]
|
| 94 |
+
|
| 95 |
+
grads = tape.gradient(class_logits, conv_outputs)
|
| 96 |
+
return conv_outputs, grads, preds
|
| 97 |
+
|
| 98 |
+
def compute_gradcam_overlay(img: Image.Image, interpolant=0.5, target_size=(512,512)):
|
| 99 |
+
"""
|
| 100 |
+
High-level wrapper:
|
| 101 |
+
- builds input tensor
|
| 102 |
+
- obtains predicted class index (fast forward)
|
| 103 |
+
- calls compiled grad function to get conv features + grads
|
| 104 |
+
- computes heatmap and overlay efficiently
|
| 105 |
+
Returns: overlay as uint8 HxWx3 numpy array
|
| 106 |
+
"""
|
| 107 |
+
# Build tensor
|
| 108 |
+
input_tf = pil_to_tf_tensor(img, target_size=target_size) # (1,H,W,3), float32
|
| 109 |
+
|
| 110 |
+
# Fast predict to get class index (cheap forward pass)
|
| 111 |
+
preds = model(input_tf, training=False)
|
| 112 |
+
pred_np = preds.numpy()[0]
|
| 113 |
+
class_idx = int(np.argmax(pred_np))
|
| 114 |
+
|
| 115 |
+
# Use compiled function to compute conv features and grads for that class
|
| 116 |
+
conv_out, grads, _ = _compute_conv_and_grads(input_tf, tf.constant(class_idx, dtype=tf.int64))
|
| 117 |
+
|
| 118 |
+
# Convert to numpy and handle shapes robustly
|
| 119 |
+
conv_out_np = conv_out.numpy()
|
| 120 |
+
grads_np = grads.numpy() if grads is not None else None
|
| 121 |
+
|
| 122 |
+
if grads_np is None:
|
| 123 |
+
# Fallback: gradients None -> return original image as overlay (no heatmap)
|
| 124 |
+
H = input_tf.shape[1]
|
| 125 |
+
W = input_tf.shape[2]
|
| 126 |
+
original_img = np.array(img.resize((W, H))).astype("uint8")
|
| 127 |
+
if original_img.ndim == 2:
|
| 128 |
+
original_img = np.stack([original_img]*3, axis=-1)
|
| 129 |
+
return original_img
|
| 130 |
+
|
| 131 |
+
# conv_out_np shape (1,Hf,Wf,C) -> take first batch
|
| 132 |
+
if conv_out_np.ndim == 4 and conv_out_np.shape[0] == 1:
|
| 133 |
+
conv_out_np = conv_out_np[0]
|
| 134 |
+
# grads_np shape (1,Hf,Wf,C)
|
| 135 |
+
if grads_np.ndim == 4 and grads_np.shape[0] == 1:
|
| 136 |
+
grads_np = grads_np[0]
|
| 137 |
+
|
| 138 |
+
# Global average pooling of gradients over spatial dims (Hf,Wf)
|
| 139 |
+
pooled_grads = np.mean(grads_np, axis=(0,1)) # shape (C,)
|
| 140 |
+
|
| 141 |
+
# Weighted sum of conv feature maps
|
| 142 |
+
heatmap = np.sum(conv_out_np * pooled_grads[np.newaxis, np.newaxis, :], axis=-1) # (Hf,Wf)
|
| 143 |
+
heatmap = np.maximum(heatmap, 0.0)
|
| 144 |
+
max_val = np.max(heatmap) if heatmap.size else 0.0
|
| 145 |
+
if max_val > 0:
|
| 146 |
+
heatmap = heatmap / (max_val + 1e-9)
|
| 147 |
+
else:
|
| 148 |
+
heatmap = np.zeros_like(heatmap, dtype=np.float32)
|
| 149 |
+
|
| 150 |
+
# Resize heatmap to original image size
|
| 151 |
+
H = input_tf.shape[1]
|
| 152 |
+
W = input_tf.shape[2]
|
| 153 |
+
original_img = np.array(img.resize((W, H))).astype("float32")
|
| 154 |
+
if original_img.ndim == 2:
|
| 155 |
+
original_img = np.stack([original_img]*3, axis=-1)
|
| 156 |
+
|
| 157 |
+
heatmap_resized = cv2.resize((heatmap * 255.0).astype("uint8"), (W, H))
|
| 158 |
+
heatmap_color = cv2.applyColorMap(heatmap_resized, cv2.COLORMAP_JET) # BGR
|
| 159 |
+
heatmap_color = cv2.cvtColor(heatmap_color, cv2.COLOR_BGR2RGB).astype("float32")
|
| 160 |
+
|
| 161 |
+
# Ensure original image is in uint8 0..255
|
| 162 |
+
orig_uint8 = np.clip(original_img, 0, 255).astype("uint8")
|
| 163 |
+
|
| 164 |
+
# Combine using interpolant: (interpolant * original + (1-interpolant) * heatmap_color)
|
| 165 |
+
overlay = np.clip(orig_uint8.astype("float32") * interpolant + heatmap_color * (1.0 - interpolant), 0, 255).astype("uint8")
|
| 166 |
+
return overlay
|
| 167 |
+
|
| 168 |
+
# Expose functions for main.py
|
| 169 |
+
__all__ = ["model", "grad_model", "predict", "compute_gradcam_overlay", "CLASS_NAMES"]
|
| 170 |
+
# Backwards-compatible function name expected by main.py
|
| 171 |
+
def gradcam(img: Image.Image, interpolant=0.5):
|
| 172 |
+
return compute_gradcam_overlay(img, interpolant=interpolant)
|
| 173 |
+
|
| 174 |
+
# ---------------------------
|
| 175 |
+
# End of app/model.py
|
| 176 |
+
# ---------------------------
|
requirements.txt
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
fastapi
|
| 2 |
+
uvicorn
|
| 3 |
+
tensorflow
|
| 4 |
+
numpy
|
| 5 |
+
python-multipart
|
| 6 |
+
pillow
|
| 7 |
+
opencv-python
|
saved_model/InceptionV3_Brain_Tumor_MRI.h5
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:bf2329f8276083fe7883defc1e647555c6dee8a161b83698ca53dbe379a2c271
|
| 3 |
+
size 100823584
|