|
|
import os |
|
|
import traceback |
|
|
import torch |
|
|
from flask import Flask, render_template, request |
|
|
from PIL import Image |
|
|
import numpy as np |
|
|
import cv2 |
|
|
from werkzeug.utils import secure_filename |
|
|
|
|
|
|
|
|
|
|
|
from gradcam import GradCAM, model, classes, get_model |
|
|
|
|
|
from torchvision import transforms |
|
|
|
|
|
app = Flask(__name__) |
|
|
UPLOAD_FOLDER = "static/uploads" |
|
|
os.makedirs(UPLOAD_FOLDER, exist_ok=True) |
|
|
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER |
|
|
ALLOWED_EXT = {"png", "jpg", "jpeg", "bmp"} |
|
|
|
|
|
transform = transforms.Compose([ |
|
|
transforms.Resize((224, 224)), |
|
|
transforms.ToTensor(), |
|
|
transforms.Normalize([0.485, 0.456, 0.406], |
|
|
[0.229, 0.224, 0.225]) |
|
|
]) |
|
|
|
|
|
def allowed_file(filename): |
|
|
return "." in filename and filename.rsplit(".", 1)[1].lower() in ALLOWED_EXT |
|
|
|
|
|
@app.route('/') |
|
|
def index(): |
|
|
return render_template('index.html') |
|
|
|
|
|
@app.route('/predict', methods=['POST']) |
|
|
def predict(): |
|
|
|
|
|
global model |
|
|
if model is None: |
|
|
model = get_model(reload=True) |
|
|
if model is None: |
|
|
|
|
|
err = ( |
|
|
"Model is not available. Please upload a valid `model.pth` to the Space " |
|
|
"or check the application logs for details." |
|
|
) |
|
|
return render_template('error.html', error_message=err), 500 |
|
|
|
|
|
if 'image' not in request.files: |
|
|
return "No image uploaded", 400 |
|
|
|
|
|
file = request.files['image'] |
|
|
if file.filename == '': |
|
|
return "No selected image", 400 |
|
|
|
|
|
if not allowed_file(file.filename): |
|
|
return "Unsupported file type", 400 |
|
|
|
|
|
|
|
|
filename = secure_filename(file.filename) |
|
|
img_path = os.path.join(app.config['UPLOAD_FOLDER'], filename) |
|
|
file.save(img_path) |
|
|
|
|
|
try: |
|
|
|
|
|
image = Image.open(img_path).convert("RGB") |
|
|
input_tensor = transform(image).unsqueeze(0) |
|
|
|
|
|
|
|
|
try: |
|
|
model_device = next(model.parameters()).device |
|
|
except Exception: |
|
|
model_device = torch.device("cuda" if torch.cuda.is_available() else "cpu") |
|
|
|
|
|
input_tensor = input_tensor.to(model_device) |
|
|
|
|
|
|
|
|
with torch.no_grad(): |
|
|
output = model(input_tensor) |
|
|
pred_idx = int(torch.argmax(output, dim=1).item()) |
|
|
confidence = float(torch.softmax(output, dim=1)[0][pred_idx].item()) |
|
|
|
|
|
|
|
|
|
|
|
try: |
|
|
|
|
|
target_layer = getattr(model.features, "denseblock4", None) |
|
|
if target_layer is None: |
|
|
|
|
|
target_layer = model.features[-1] |
|
|
except Exception: |
|
|
target_layer = model.features |
|
|
|
|
|
gradcam = GradCAM(model, target_layer=target_layer) |
|
|
|
|
|
|
|
|
cam_map, probs, returned_idx = gradcam.generate(input_tensor, class_idx=pred_idx) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
orig_np = np.array(image.resize((224, 224))).astype(np.uint8) |
|
|
|
|
|
|
|
|
heatmap = np.uint8(255 * cam_map) |
|
|
heatmap_color = cv2.applyColorMap(heatmap, cv2.COLORMAP_JET) |
|
|
heatmap_color = cv2.cvtColor(heatmap_color, cv2.COLOR_BGR2RGB) |
|
|
|
|
|
|
|
|
overlay = (0.6 * orig_np.astype(np.float32) + 0.4 * heatmap_color.astype(np.float32)) |
|
|
overlay = np.clip(overlay, 0, 255).astype(np.uint8) |
|
|
|
|
|
|
|
|
cam_filename = f"cam_{filename}" |
|
|
cam_path = os.path.join(app.config['UPLOAD_FOLDER'], cam_filename) |
|
|
|
|
|
cv2.imwrite(cam_path, cv2.cvtColor(overlay, cv2.COLOR_RGB2BGR)) |
|
|
|
|
|
return render_template( |
|
|
'result.html', |
|
|
prediction=classes[pred_idx] if pred_idx < len(classes) else str(pred_idx), |
|
|
confidence=f"{confidence * 100:.2f}%", |
|
|
uploaded_image=filename, |
|
|
cam_image=cam_filename |
|
|
) |
|
|
|
|
|
except Exception as e: |
|
|
|
|
|
tb = traceback.format_exc() |
|
|
print("Error during prediction:", e) |
|
|
print(tb) |
|
|
return render_template('error.html', error_message=str(e)), 500 |
|
|
|
|
|
if __name__ == '__main__': |
|
|
port = int(os.environ.get("PORT", 7860)) |
|
|
|
|
|
app.run(host="0.0.0.0", port=port, debug=True) |
|
|
|