Abs6187 commited on
Commit
2494ccd
·
verified ·
1 Parent(s): e64dd2d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +26 -105
app.py CHANGED
@@ -1,8 +1,7 @@
1
  import os
2
- # --- FIX: Disable GPU to prevent CUDA initialization errors ---
3
  os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
4
 
5
- from flask import Flask, render_template, request, jsonify, session, redirect, url_for, send_from_directory
6
  from flask_pymongo import PyMongo
7
  from flask_bcrypt import Bcrypt
8
  import tensorflow as tf
@@ -17,30 +16,20 @@ import uuid
17
  import secrets
18
  import logging
19
 
20
- # -------------------- Setup & Config --------------------
21
-
22
- # Load environment variables
23
  load_dotenv()
24
 
25
  app = Flask(__name__)
26
 
27
- # Configurations
28
  app.config["MONGO_URI"] = os.getenv("MONGODB_URI") or os.getenv("MONGO_URI")
29
- # Keep your format, just ensure it's set once per process
30
  app.config['SECRET_KEY'] = os.getenv("SECRET_KEY") or secrets.token_hex(16)
31
-
32
- # Slightly safer cookie defaults without changing your session usage
33
  app.config.setdefault("SESSION_COOKIE_HTTPONLY", True)
34
  app.config.setdefault("SESSION_COOKIE_SAMESITE", "Lax")
35
 
36
  GEMINI_API_KEY = os.getenv("GEMINI_API_KEY") or os.getenv("GOOGLE_API_KEY")
37
 
38
- # Basic logging
39
  logging.basicConfig(level=logging.INFO)
40
  logger = logging.getLogger("app")
41
 
42
- # Initialize extensions
43
- # If MONGO_URI missing, still construct PyMongo but avoid immediate use crashes
44
  try:
45
  if app.config["MONGO_URI"]:
46
  mongo = PyMongo(app, tlsCAFile=certifi.where())
@@ -49,24 +38,20 @@ try:
49
  mongo = PyMongo(app, tlsCAFile=certifi.where())
50
  except Exception as e:
51
  logger.error(f"Mongo initialization error: {e}")
52
- # Keep an object to avoid NameError later
53
  mongo = None
54
 
55
  bcrypt = Bcrypt(app)
56
 
57
- # Configure Gemini
58
- gemini_model = "gemini-2.0-flash"
59
  if GEMINI_API_KEY:
60
  try:
61
  genai.configure(api_key=GEMINI_API_KEY)
62
- # Keep your model name
63
  gemini_model = genai.GenerativeModel('gemini-2.0-flash')
64
  except Exception as e:
65
  logger.error(f"Gemini initialization error: {e}")
66
  else:
67
  logger.warning("GEMINI_API_KEY/GOOGLE_API_KEY not set. /chat will return a friendly error.")
68
 
69
- # --- Model Configuration ---
70
  MODEL_CONFIG = {
71
  "Pneumonia": {
72
  "path": "model/best_pneumonia_model.h5",
@@ -102,11 +87,9 @@ MODEL_CONFIG = {
102
  }
103
  }
104
 
105
- # --- Model Loading ---
106
  models = {}
107
 
108
  def load_all_models():
109
- """Loads all models from the 'model' directory based on MODEL_CONFIG."""
110
  for name, config in MODEL_CONFIG.items():
111
  try:
112
  model_path = config["path"]
@@ -118,15 +101,11 @@ def load_all_models():
118
  except Exception as e:
119
  logger.error(f"Error loading model {name}: {e}")
120
 
121
- # Load models on application startup
122
  load_all_models()
123
 
124
- # --- Image Preprocessing ---
125
  def preprocess_image(img_path, target_size=(224, 224)):
126
- """Preprocesses the image for model prediction."""
127
  img = image.load_img(img_path, target_size=target_size)
128
  img_array = image.img_to_array(img)
129
- # Handle grayscale or alpha automatically by broadcasting if needed
130
  if img_array.ndim == 2:
131
  img_array = np.stack([img_array]*3, axis=-1)
132
  elif img_array.shape[-1] == 4:
@@ -135,102 +114,68 @@ def preprocess_image(img_path, target_size=(224, 224)):
135
  img_array = img_array.astype("float32") / 255.0
136
  return img_array
137
 
138
- # --- Grad-CAM Utilities ---
139
  def _safe_get_layer(model, layer_name):
140
- """Return layer if exists; else None."""
141
  try:
142
  return model.get_layer(layer_name)
143
  except Exception:
144
  return None
145
 
146
  def find_last_conv_layer(model):
147
- """Finds the name of the last convolutional layer in a model."""
148
- logger.info("--- DEBUG: Searching for last convolutional layer ---")
149
  for layer in reversed(model.layers):
150
  if isinstance(layer, (tf.keras.layers.Conv2D, tf.keras.layers.DepthwiseConv2D)):
151
- # 4D output: (batch, h, w, channels)
152
  try:
153
  out_shape = layer.output_shape
154
  except Exception:
155
  out_shape = None
156
  if out_shape and len(out_shape) == 4:
157
- logger.info(f"Found candidate last conv layer: {layer.name}")
158
  return layer.name
159
  raise ValueError("Could not automatically find a convolutional layer in the model.")
160
 
161
  def get_gradcam_heatmap(model, img_array, last_conv_layer_name, pred_index=None):
162
- """Generates a Grad-CAM heatmap."""
163
- # If configured layer isn't present, auto-detect
164
  if not _safe_get_layer(model, last_conv_layer_name):
165
  last_conv_layer_name = find_last_conv_layer(model)
166
-
167
  conv_layer = model.get_layer(last_conv_layer_name)
168
- grad_model = tf.keras.models.Model(
169
- [model.inputs], [conv_layer.output, model.output]
170
- )
171
-
172
  with tf.GradientTape() as tape:
173
  conv_outputs, preds = grad_model(img_array, training=False)
174
-
175
  if isinstance(preds, (list, tuple)):
176
  preds = preds[0]
177
-
178
- # Ensure preds is a tensor
179
  preds = tf.convert_to_tensor(preds)
180
-
181
- # If model is binary with single logit/sigmoid output
182
  if preds.shape.rank is not None and preds.shape[-1] == 1:
183
  class_channel = preds[:, 0]
184
  else:
185
  if pred_index is None:
186
  pred_index = tf.argmax(preds[0])
187
  class_channel = preds[:, pred_index]
188
-
189
  grads = tape.gradient(class_channel, conv_outputs)
190
  if grads is None:
191
- # Fallback: no gradient (e.g., custom layers). Return uniform zeros heatmap.
192
  heatmap = tf.zeros(conv_outputs.shape[1:3], dtype=tf.float32)
193
  return heatmap.numpy()
194
-
195
  pooled_grads = tf.reduce_mean(grads, axis=(0, 1, 2))
196
  conv_outputs = conv_outputs[0]
197
  heatmap = tf.tensordot(conv_outputs, pooled_grads, axes=(2, 0))
198
-
199
  heatmap = tf.maximum(heatmap, 0)
200
  denom = tf.math.reduce_max(heatmap)
201
  heatmap = heatmap / (denom + 1e-8)
202
  return heatmap.numpy()
203
 
204
  def save_gradcam_image(img_path, heatmap, output_path, threshold=0.6, alpha=0.4):
205
- """
206
- Saves the Grad-CAM image by highlighting only the most important areas
207
- with light red spots.
208
- """
209
  img = cv2.imread(img_path)
210
  if img is None:
211
  raise ValueError("Failed to read image with OpenCV.")
212
- img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # Work with RGB
213
-
214
  heatmap = cv2.resize(heatmap, (img.shape[1], img.shape[0]))
215
-
216
- # Create a mask where the heatmap is above the threshold
217
  mask = heatmap > threshold
218
-
219
- # Create a red overlay
220
  overlay = np.zeros_like(img, dtype=np.uint8)
221
- overlay[mask] = [255, 0, 0] # Red color for highlighted spots
222
-
223
- # Blend the original image with the red overlay using the mask
224
  superimposed_img = cv2.addWeighted(overlay, alpha, img, 1 - alpha, 0)
225
-
226
- # Areas outside the mask should be the original image
227
  superimposed_img[~mask] = img[~mask]
228
-
229
  superimposed_img = cv2.cvtColor(superimposed_img, cv2.COLOR_RGB2BGR)
230
  cv2.imwrite(output_path, superimposed_img)
231
  return output_path
232
 
233
- # -------------------- Routes --------------------
 
234
 
235
  @app.route("/")
236
  def home():
@@ -240,44 +185,48 @@ def home():
240
  def serve_tmp_file(filename):
241
  return send_from_directory('/tmp', filename)
242
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
243
  @app.route('/login', methods=['GET', 'POST'])
244
  def login():
245
- # Authentication removed; redirect to main app
246
  return redirect(url_for('index'))
247
 
248
  @app.route('/signup', methods=['GET', 'POST'])
249
  def signup():
250
- # Authentication removed; redirect to main app
251
  return redirect(url_for('index'))
252
 
253
  @app.route('/index')
254
  def index():
255
- # Publicly accessible index
256
  return render_template('index.html')
257
 
258
  @app.route('/logout')
259
  def logout():
260
- # Authentication removed; redirect to main app
261
  return redirect(url_for('index'))
262
 
263
  def _postprocess_binary_prediction(raw):
264
- """
265
- Normalize binary outputs across shapes:
266
- - (1,) or (N,) : sigmoid probabilities
267
- - (1,1) or (N,1) : sigmoid probabilities
268
- - logits also supported (auto-sigmoid)
269
- Returns probability in [0,1].
270
- """
271
  arr = np.array(raw, dtype=np.float32)
272
  arr = np.squeeze(arr)
273
- # If scalar, keep it
274
  if arr.ndim == 0:
275
  prob = float(arr)
276
- # Heuristic: if obviously a logit (|x|>1 and not in [0,1]), apply sigmoid
277
  if prob < 0.0 or prob > 1.0:
278
  prob = float(1.0 / (1.0 + np.exp(-prob)))
279
  return min(max(prob, 0.0), 1.0)
280
- # If 1D vector, take first
281
  prob = float(arr[0])
282
  if prob < 0.0 or prob > 1.0:
283
  prob = float(1.0 / (1.0 + np.exp(-prob)))
@@ -287,33 +236,23 @@ def _postprocess_binary_prediction(raw):
287
  def predict():
288
  if "file" not in request.files:
289
  return jsonify({"error": "No file part"}), 400
290
-
291
  file = request.files["file"]
292
  model_name = request.form.get("model")
293
-
294
  if not file or file.filename == "":
295
  return jsonify({"error": "No selected file"}), 400
296
-
297
  if model_name not in models:
298
  return jsonify({"error": "Invalid model selected"}), 400
299
-
300
  try:
301
  filename = f"{uuid.uuid4()}_{file.filename}"
302
  filepath = os.path.join("/tmp", filename)
303
  file.save(filepath)
304
-
305
  model_config = MODEL_CONFIG[model_name]
306
  model = models[model_name]
307
  labels = model_config["labels"]
308
  input_size = model_config.get("input_size", (224, 224))
309
-
310
  img_array = preprocess_image(filepath, target_size=input_size)
311
  prediction = model.predict(img_array, verbose=0)
312
-
313
- # Ensure numpy array
314
  prediction = np.array(prediction)
315
-
316
- # Binary case (2 labels) with single neuron output (logit or sigmoid)
317
  if len(labels) == 2 and prediction.ndim >= 1 and prediction.shape[-1] in (1,) and prediction.size >= 1:
318
  prob_pos = _postprocess_binary_prediction(prediction)
319
  if prob_pos >= 0.5:
@@ -325,12 +264,10 @@ def predict():
325
  predicted_label = labels[0]
326
  confidence = 1.0 - prob_pos
327
  else:
328
- # Multi-class: softmax or logits
329
  if prediction.ndim == 2:
330
  vec = prediction[0]
331
  else:
332
  vec = prediction.reshape(-1)
333
- # If appears to be logits, apply softmax for confidence; otherwise trust as probs
334
  if np.any(vec < 0) or np.any(vec > 1) or not np.isclose(np.sum(vec), 1.0, atol=1e-3):
335
  exps = np.exp(vec - np.max(vec))
336
  probs = exps / (np.sum(exps) + 1e-8)
@@ -339,25 +276,16 @@ def predict():
339
  predicted_index = int(np.argmax(probs))
340
  predicted_label = labels[predicted_index]
341
  confidence = float(np.max(probs))
342
-
343
  gradcam_url = None
344
  try:
345
- logger.info(f"--- Generating Grad-CAM for model: {model_name} ---")
346
  last_conv_layer_name = MODEL_CONFIG[model_name].get('last_conv_layer') or ""
347
  heatmap = get_gradcam_heatmap(model, img_array, last_conv_layer_name, pred_index=predicted_index)
348
-
349
  gradcam_filename = f"gradcam_{filename}"
350
  gradcam_filepath = os.path.join("/tmp", gradcam_filename)
351
  save_gradcam_image(filepath, heatmap, gradcam_filepath)
352
  gradcam_url = url_for('serve_tmp_file', filename=gradcam_filename)
353
- logger.info("--- Successfully generated Grad-CAM image ---")
354
  except Exception as e:
355
- logger.error(f"--- Grad-CAM Generation FAILED for model: {model_name} --- Error: {e}")
356
- try:
357
- model.summary(print_fn=lambda x: logger.info(x))
358
- except Exception:
359
- pass
360
-
361
  return jsonify({
362
  "original_image": url_for('serve_tmp_file', filename=filename),
363
  "gradcam_image": gradcam_url,
@@ -374,8 +302,6 @@ def chat():
374
  data = request.get_json(silent=True) or {}
375
  user_message = data.get("message", "")
376
  prediction_context = data.get("context") or {}
377
-
378
- # Guard against missing keys
379
  model_used = prediction_context.get('model_used', 'Unknown Model')
380
  pred_label = prediction_context.get('prediction', 'Unknown')
381
  conf = prediction_context.get('confidence', 0.0)
@@ -383,7 +309,6 @@ def chat():
383
  conf_pct = float(conf) * 100.0
384
  except Exception:
385
  conf_pct = 0.0
386
-
387
  prompt = f"""
388
  You are a helpful medical assistant chatbot.
389
  A medical image was analyzed with the following results:
@@ -394,20 +319,16 @@ The user's question is: "{user_message}"
394
  Based on this context, provide a helpful and informative response.
395
  Do not provide a diagnosis. Advise the user to consult a medical professional.
396
  """
397
-
398
  try:
399
  if gemini_model is None:
400
  return jsonify({"error": "Gemini API not configured. Set GEMINI_API_KEY in environment."}), 500
401
  response = gemini_model.generate_content(prompt)
402
- # Some SDKs return .text; guard if attribute missing
403
  text = getattr(response, "text", None)
404
  if not text:
405
- # Try to stringify safely
406
  text = str(response)
407
  return jsonify({"response": text})
408
  except Exception as e:
409
  return jsonify({"error": str(e)}), 500
410
 
411
  if __name__ == "__main__":
412
- # Keep your debug flag as-is
413
  app.run(debug=True)
 
1
  import os
 
2
  os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
3
 
4
+ from flask import Flask, render_template, request, jsonify, redirect, url_for, send_from_directory
5
  from flask_pymongo import PyMongo
6
  from flask_bcrypt import Bcrypt
7
  import tensorflow as tf
 
16
  import secrets
17
  import logging
18
 
 
 
 
19
  load_dotenv()
20
 
21
  app = Flask(__name__)
22
 
 
23
  app.config["MONGO_URI"] = os.getenv("MONGODB_URI") or os.getenv("MONGO_URI")
 
24
  app.config['SECRET_KEY'] = os.getenv("SECRET_KEY") or secrets.token_hex(16)
 
 
25
  app.config.setdefault("SESSION_COOKIE_HTTPONLY", True)
26
  app.config.setdefault("SESSION_COOKIE_SAMESITE", "Lax")
27
 
28
  GEMINI_API_KEY = os.getenv("GEMINI_API_KEY") or os.getenv("GOOGLE_API_KEY")
29
 
 
30
  logging.basicConfig(level=logging.INFO)
31
  logger = logging.getLogger("app")
32
 
 
 
33
  try:
34
  if app.config["MONGO_URI"]:
35
  mongo = PyMongo(app, tlsCAFile=certifi.where())
 
38
  mongo = PyMongo(app, tlsCAFile=certifi.where())
39
  except Exception as e:
40
  logger.error(f"Mongo initialization error: {e}")
 
41
  mongo = None
42
 
43
  bcrypt = Bcrypt(app)
44
 
45
+ gemini_model = None
 
46
  if GEMINI_API_KEY:
47
  try:
48
  genai.configure(api_key=GEMINI_API_KEY)
 
49
  gemini_model = genai.GenerativeModel('gemini-2.0-flash')
50
  except Exception as e:
51
  logger.error(f"Gemini initialization error: {e}")
52
  else:
53
  logger.warning("GEMINI_API_KEY/GOOGLE_API_KEY not set. /chat will return a friendly error.")
54
 
 
55
  MODEL_CONFIG = {
56
  "Pneumonia": {
57
  "path": "model/best_pneumonia_model.h5",
 
87
  }
88
  }
89
 
 
90
  models = {}
91
 
92
  def load_all_models():
 
93
  for name, config in MODEL_CONFIG.items():
94
  try:
95
  model_path = config["path"]
 
101
  except Exception as e:
102
  logger.error(f"Error loading model {name}: {e}")
103
 
 
104
  load_all_models()
105
 
 
106
  def preprocess_image(img_path, target_size=(224, 224)):
 
107
  img = image.load_img(img_path, target_size=target_size)
108
  img_array = image.img_to_array(img)
 
109
  if img_array.ndim == 2:
110
  img_array = np.stack([img_array]*3, axis=-1)
111
  elif img_array.shape[-1] == 4:
 
114
  img_array = img_array.astype("float32") / 255.0
115
  return img_array
116
 
 
117
  def _safe_get_layer(model, layer_name):
 
118
  try:
119
  return model.get_layer(layer_name)
120
  except Exception:
121
  return None
122
 
123
  def find_last_conv_layer(model):
 
 
124
  for layer in reversed(model.layers):
125
  if isinstance(layer, (tf.keras.layers.Conv2D, tf.keras.layers.DepthwiseConv2D)):
 
126
  try:
127
  out_shape = layer.output_shape
128
  except Exception:
129
  out_shape = None
130
  if out_shape and len(out_shape) == 4:
 
131
  return layer.name
132
  raise ValueError("Could not automatically find a convolutional layer in the model.")
133
 
134
  def get_gradcam_heatmap(model, img_array, last_conv_layer_name, pred_index=None):
 
 
135
  if not _safe_get_layer(model, last_conv_layer_name):
136
  last_conv_layer_name = find_last_conv_layer(model)
 
137
  conv_layer = model.get_layer(last_conv_layer_name)
138
+ grad_model = tf.keras.models.Model([model.inputs], [conv_layer.output, model.output])
 
 
 
139
  with tf.GradientTape() as tape:
140
  conv_outputs, preds = grad_model(img_array, training=False)
 
141
  if isinstance(preds, (list, tuple)):
142
  preds = preds[0]
 
 
143
  preds = tf.convert_to_tensor(preds)
 
 
144
  if preds.shape.rank is not None and preds.shape[-1] == 1:
145
  class_channel = preds[:, 0]
146
  else:
147
  if pred_index is None:
148
  pred_index = tf.argmax(preds[0])
149
  class_channel = preds[:, pred_index]
 
150
  grads = tape.gradient(class_channel, conv_outputs)
151
  if grads is None:
 
152
  heatmap = tf.zeros(conv_outputs.shape[1:3], dtype=tf.float32)
153
  return heatmap.numpy()
 
154
  pooled_grads = tf.reduce_mean(grads, axis=(0, 1, 2))
155
  conv_outputs = conv_outputs[0]
156
  heatmap = tf.tensordot(conv_outputs, pooled_grads, axes=(2, 0))
 
157
  heatmap = tf.maximum(heatmap, 0)
158
  denom = tf.math.reduce_max(heatmap)
159
  heatmap = heatmap / (denom + 1e-8)
160
  return heatmap.numpy()
161
 
162
  def save_gradcam_image(img_path, heatmap, output_path, threshold=0.6, alpha=0.4):
 
 
 
 
163
  img = cv2.imread(img_path)
164
  if img is None:
165
  raise ValueError("Failed to read image with OpenCV.")
166
+ img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
 
167
  heatmap = cv2.resize(heatmap, (img.shape[1], img.shape[0]))
 
 
168
  mask = heatmap > threshold
 
 
169
  overlay = np.zeros_like(img, dtype=np.uint8)
170
+ overlay[mask] = [255, 0, 0]
 
 
171
  superimposed_img = cv2.addWeighted(overlay, alpha, img, 1 - alpha, 0)
 
 
172
  superimposed_img[~mask] = img[~mask]
 
173
  superimposed_img = cv2.cvtColor(superimposed_img, cv2.COLOR_RGB2BGR)
174
  cv2.imwrite(output_path, superimposed_img)
175
  return output_path
176
 
177
+ BASE_DIR = os.path.dirname(os.path.abspath(__file__))
178
+ TEST_IMAGES_DIR = os.path.join(BASE_DIR, 'testimages')
179
 
180
  @app.route("/")
181
  def home():
 
185
  def serve_tmp_file(filename):
186
  return send_from_directory('/tmp', filename)
187
 
188
+ @app.route('/testimages/<path:filename>')
189
+ def serve_test_image(filename):
190
+ return send_from_directory(TEST_IMAGES_DIR, filename)
191
+
192
+ @app.route('/example_images')
193
+ def example_images():
194
+ try:
195
+ files = []
196
+ if os.path.isdir(TEST_IMAGES_DIR):
197
+ for f in os.listdir(TEST_IMAGES_DIR):
198
+ lf = f.lower()
199
+ if lf.endswith(('.png', '.jpg', '.jpeg')):
200
+ files.append(url_for('serve_test_image', filename=f))
201
+ return jsonify({"images": files})
202
+ except Exception as e:
203
+ logger.error(f"example_images error: {e}")
204
+ return jsonify({"images": []})
205
+
206
  @app.route('/login', methods=['GET', 'POST'])
207
  def login():
 
208
  return redirect(url_for('index'))
209
 
210
  @app.route('/signup', methods=['GET', 'POST'])
211
  def signup():
 
212
  return redirect(url_for('index'))
213
 
214
  @app.route('/index')
215
  def index():
 
216
  return render_template('index.html')
217
 
218
  @app.route('/logout')
219
  def logout():
 
220
  return redirect(url_for('index'))
221
 
222
  def _postprocess_binary_prediction(raw):
 
 
 
 
 
 
 
223
  arr = np.array(raw, dtype=np.float32)
224
  arr = np.squeeze(arr)
 
225
  if arr.ndim == 0:
226
  prob = float(arr)
 
227
  if prob < 0.0 or prob > 1.0:
228
  prob = float(1.0 / (1.0 + np.exp(-prob)))
229
  return min(max(prob, 0.0), 1.0)
 
230
  prob = float(arr[0])
231
  if prob < 0.0 or prob > 1.0:
232
  prob = float(1.0 / (1.0 + np.exp(-prob)))
 
236
  def predict():
237
  if "file" not in request.files:
238
  return jsonify({"error": "No file part"}), 400
 
239
  file = request.files["file"]
240
  model_name = request.form.get("model")
 
241
  if not file or file.filename == "":
242
  return jsonify({"error": "No selected file"}), 400
 
243
  if model_name not in models:
244
  return jsonify({"error": "Invalid model selected"}), 400
 
245
  try:
246
  filename = f"{uuid.uuid4()}_{file.filename}"
247
  filepath = os.path.join("/tmp", filename)
248
  file.save(filepath)
 
249
  model_config = MODEL_CONFIG[model_name]
250
  model = models[model_name]
251
  labels = model_config["labels"]
252
  input_size = model_config.get("input_size", (224, 224))
 
253
  img_array = preprocess_image(filepath, target_size=input_size)
254
  prediction = model.predict(img_array, verbose=0)
 
 
255
  prediction = np.array(prediction)
 
 
256
  if len(labels) == 2 and prediction.ndim >= 1 and prediction.shape[-1] in (1,) and prediction.size >= 1:
257
  prob_pos = _postprocess_binary_prediction(prediction)
258
  if prob_pos >= 0.5:
 
264
  predicted_label = labels[0]
265
  confidence = 1.0 - prob_pos
266
  else:
 
267
  if prediction.ndim == 2:
268
  vec = prediction[0]
269
  else:
270
  vec = prediction.reshape(-1)
 
271
  if np.any(vec < 0) or np.any(vec > 1) or not np.isclose(np.sum(vec), 1.0, atol=1e-3):
272
  exps = np.exp(vec - np.max(vec))
273
  probs = exps / (np.sum(exps) + 1e-8)
 
276
  predicted_index = int(np.argmax(probs))
277
  predicted_label = labels[predicted_index]
278
  confidence = float(np.max(probs))
 
279
  gradcam_url = None
280
  try:
 
281
  last_conv_layer_name = MODEL_CONFIG[model_name].get('last_conv_layer') or ""
282
  heatmap = get_gradcam_heatmap(model, img_array, last_conv_layer_name, pred_index=predicted_index)
 
283
  gradcam_filename = f"gradcam_{filename}"
284
  gradcam_filepath = os.path.join("/tmp", gradcam_filename)
285
  save_gradcam_image(filepath, heatmap, gradcam_filepath)
286
  gradcam_url = url_for('serve_tmp_file', filename=gradcam_filename)
 
287
  except Exception as e:
288
+ logger.error(f"Grad-CAM error: {e}")
 
 
 
 
 
289
  return jsonify({
290
  "original_image": url_for('serve_tmp_file', filename=filename),
291
  "gradcam_image": gradcam_url,
 
302
  data = request.get_json(silent=True) or {}
303
  user_message = data.get("message", "")
304
  prediction_context = data.get("context") or {}
 
 
305
  model_used = prediction_context.get('model_used', 'Unknown Model')
306
  pred_label = prediction_context.get('prediction', 'Unknown')
307
  conf = prediction_context.get('confidence', 0.0)
 
309
  conf_pct = float(conf) * 100.0
310
  except Exception:
311
  conf_pct = 0.0
 
312
  prompt = f"""
313
  You are a helpful medical assistant chatbot.
314
  A medical image was analyzed with the following results:
 
319
  Based on this context, provide a helpful and informative response.
320
  Do not provide a diagnosis. Advise the user to consult a medical professional.
321
  """
 
322
  try:
323
  if gemini_model is None:
324
  return jsonify({"error": "Gemini API not configured. Set GEMINI_API_KEY in environment."}), 500
325
  response = gemini_model.generate_content(prompt)
 
326
  text = getattr(response, "text", None)
327
  if not text:
 
328
  text = str(response)
329
  return jsonify({"response": text})
330
  except Exception as e:
331
  return jsonify({"error": str(e)}), 500
332
 
333
  if __name__ == "__main__":
 
334
  app.run(debug=True)