Hammad712 commited on
Commit
fdbb1bf
·
verified ·
1 Parent(s): b9d1861

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +201 -103
app.py CHANGED
@@ -1,50 +1,55 @@
1
  #!/usr/bin/env python3
2
  """
3
- Streamlit Brain MRI Tumor Detection App (updated, safe startup + LLM safety)
4
- - Monkeypatches torch.classes.__path__ before importing streamlit to avoid a Streamlit <-> PyTorch watcher crash.
 
 
 
5
  - Returns probabilistic model output (label + confidence).
6
- - Adds a visible medical disclaimer.
7
- - Adds robust error handling around the Groq (Deepseek R1) call and ensures the LLM output contains a safety sentence.
8
- - Keeps your original UI/CSS with small UX improvements.
9
  """
10
 
11
  import os
12
  import logging
13
  import traceback
 
 
 
14
 
15
  # ------------------ Safe startup: import torch first and monkeypatch ------------------
16
  # This avoids Streamlit's file-watcher triggering PyTorch C++ registry introspection errors.
17
  try:
18
- import torch
19
- # Force a benign __path__ so Streamlit's watcher won't attempt unsafe introspection.
20
  try:
21
- # If torch.classes exists, ensure __path__ is present and is a harmless list.
22
  if hasattr(torch, "classes"):
23
- # Some torch builds may already have __path__; overwrite safely.
24
  torch.classes.__path__ = []
25
  except Exception as _e:
26
- # If something goes wrong, don't crash the app at module import time.
27
  logging.warning("Failed to set torch.classes.__path__: %s", _e)
28
  except Exception as e:
29
- # If torch can't be imported at all, we still continue so Streamlit can display an error to the user.
30
- # Log it; later we'll surface a friendly message in the UI.
31
  logging.error("Unable to import torch at startup: %s\n%s", e, traceback.format_exc())
32
- torch = None
33
 
34
- # ------------------ Now safe to import Streamlit and other packages ------------------
35
- import streamlit as st
36
- from PIL import Image
37
- from io import BytesIO
38
- import base64
39
- from transformers import ViTForImageClassification, ViTImageProcessor
40
- from groq import Groq
41
- import numpy as np
42
- import torch.nn.functional as F
 
 
 
43
 
44
  # ------------------ Logging ------------------
45
- logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
 
46
 
47
- # ------------------ Page config + CSS ------------------
48
  st.set_page_config(layout="wide", page_title="Brain MRI Tumor Detection")
49
  combined_css = """
50
  .main, .sidebar .sidebar-content { background-color: #1c1c1c; color: #f0f2f6; }
@@ -54,8 +59,8 @@ combined_css = """
54
  .title {
55
  font-size: 3rem;
56
  font-weight: bold;
57
- display: flex;
58
- align-items: center;
59
  justify-content: center;
60
  }
61
  .colorful-text {
@@ -78,24 +83,23 @@ combined_css = """
78
  """
79
  st.markdown(f"<style>{combined_css}</style>", unsafe_allow_html=True)
80
 
81
- # ------------------ App header ------------------
82
  st.markdown(
83
  '<div class="title"><span class="colorful-text">Brain MRI</span> <span class="black-white-text">Tumor Detection</span></div>',
84
- unsafe_allow_html=True
85
  )
86
  st.markdown(
87
  '<div class="custom-text">Upload an MRI image to detect a brain tumor and receive next steps and recommendations.</div>',
88
- unsafe_allow_html=True
89
  )
90
 
91
- # Medical disclaimer (visible)
92
  st.markdown(
93
  "<div class='disclaimer'>⚠️ This app is experimental and informational only. It is NOT a medical diagnosis. "
94
  "If you have health concerns, consult a licensed medical professional. In emergencies call your local emergency number.</div>",
95
- unsafe_allow_html=True
96
  )
97
 
98
- # ------------------ Model loading with graceful errors ------------------
99
  repository_id = "EnDevSols/brainmri-vit-model"
100
 
101
  model = None
@@ -103,23 +107,21 @@ feature_extractor = None
103
  model_load_error = None
104
 
105
  try:
106
- # Only attempt to load model if torch was imported successfully
107
  if torch is None:
108
  raise RuntimeError("Torch is not available in this environment.")
109
- # Model loading can be slow; catch errors and show a friendly message later.
110
  model = ViTForImageClassification.from_pretrained(repository_id)
111
  feature_extractor = ViTImageProcessor.from_pretrained(repository_id)
112
- logging.info("Model and feature extractor loaded successfully.")
113
  except Exception as e:
114
  model_load_error = str(e)
115
- logging.error("Failed to load model or feature extractor: %s\n%s", e, traceback.format_exc())
116
 
117
- # ------------------ Prediction function (returns label + confidence) ------------------
118
- def predict(image):
119
  """
120
  Given a PIL image, returns (diagnosis_label, confidence_float).
121
- 'diagnosis_label' is "Yes" (tumor) or "No" (no tumor).
122
- 'confidence_float' is between 0 and 1.
123
  """
124
  if model is None or feature_extractor is None:
125
  raise RuntimeError("Model is not loaded.")
@@ -138,26 +140,28 @@ def predict(image):
138
  diagnosis = label_map.get(predicted_idx, "Unknown")
139
  return diagnosis, confidence
140
 
141
- # ------------------ Deepseek (Groq) helper ------------------
142
- def get_assistance_from_deepseek(diagnosis_text):
143
  """
144
- Calls Groq Deepseek R1 with a safety-first prompt.
145
- Returns a string. On error, returns a conservative fallback message.
146
  """
 
 
 
 
 
147
  api_key = os.getenv("API_KEY")
148
  if not api_key:
149
- logging.error("API_KEY environment variable not found for Groq client.")
150
- return ("No assistance available because the Deepseek API key is not configured. "
151
- "Please set the API_KEY environment variable.")
152
  try:
153
  client = Groq(api_key=api_key)
154
  except Exception as e:
155
- logging.error("Failed to instantiate Groq client: %s\n%s", e, traceback.format_exc())
156
- return ("Assistance temporarily unavailable (failed to initialize model client). "
157
- "Please try again later or contact support.")
158
 
159
- # Safer prompt: require the assistant to include a clinician referral sentence
160
- safety_sentence = "This information is informational only — seek evaluation from a licensed medical professional."
161
  prompt = (
162
  f"You are a cautious, safety-first medical assistant. Given the model-diagnosis below, "
163
  "provide general, non-prescriptive information a patient could use to understand options. "
@@ -170,20 +174,26 @@ def get_assistance_from_deepseek(diagnosis_text):
170
 
171
  messages = [
172
  {"role": "system", "content": "You are a careful medical assistant always advising a user to consult a clinician."},
173
- {"role": "user", "content": prompt}
174
  ]
175
 
 
 
 
 
 
 
176
  try:
177
  completion = client.chat.completions.create(
178
  model="deepseek-r1-distill-llama-70b",
179
  messages=messages,
180
  temperature=0.6,
181
- max_completion_tokens=1024,
182
  top_p=0.95,
183
  stream=False,
184
  stop=None,
185
  )
186
- # Try different response shapes safely
187
  assistance_text = ""
188
  try:
189
  assistance_text = completion.choices[0].message.content
@@ -193,79 +203,167 @@ def get_assistance_from_deepseek(diagnosis_text):
193
  except Exception:
194
  assistance_text = str(completion)
195
 
196
- # Ensure the required safety sentence is present
197
  if safety_sentence not in assistance_text:
198
  assistance_text = safety_sentence + "\n\n" + assistance_text
199
-
200
  return assistance_text
201
  except Exception as e:
202
- logging.error("Deepseek Groq call failed: %s\n%s", e, traceback.format_exc())
203
- return ("Assistance is temporarily unavailable due to an error contacting the assistance model. "
204
- "Please consult a licensed medical professional for evaluation. If you are experiencing severe or life-threatening symptoms, seek emergency care immediately.")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
205
 
206
- # ------------------ Streamlit UI: image upload + inference flow ------------------
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
207
  uploaded_file = st.file_uploader("Choose an image...", type=["jpg", "jpeg", "png"])
208
 
209
  if uploaded_file is not None:
 
210
  try:
211
  image = Image.open(uploaded_file)
212
  except Exception as e:
213
- st.error(f"Failed to open the uploaded file as an image: {e}")
214
- image = None
215
-
216
  if image is not None:
217
- # Display resized thumbnail
218
- resized_image = image.resize((150, 150))
219
- buffered = BytesIO()
220
- resized_image.save(buffered, format="JPEG")
221
- img_str = base64.b64encode(buffered.getvalue()).decode()
222
- st.markdown(
223
- f"<div style='text-align: center;'><img src='data:image/jpeg;base64,{img_str}' alt='Uploaded Image' width='300'></div>",
224
- unsafe_allow_html=True
225
- )
 
 
 
226
 
227
- # Check model loaded
228
  if model_load_error:
229
- st.error("The model failed to load at startup. See logs for details.")
230
  st.code(model_load_error)
231
  else:
232
- st.write("")
233
- st.write("Processing the image...")
 
 
 
 
 
 
 
 
 
234
 
235
- # Run prediction with try/except
236
- try:
237
- diagnosis, confidence = predict(image)
238
- st.markdown("### Diagnosis (model prediction):")
239
- st.write(f"**{diagnosis}** (confidence: **{confidence:.2%}**)")
240
- st.markdown("_Model output is probabilistic and not a clinical diagnosis._", unsafe_allow_html=True)
241
- except Exception as e:
242
- st.error("Prediction failed: " + str(e))
243
- logging.error("Prediction error: %s\n%s", e, traceback.format_exc())
244
- diagnosis = None
245
- confidence = None
246
-
247
- # If we have a diagnosis, call Deepseek for additional guidance (with spinner)
248
  if diagnosis is not None:
249
  with st.spinner("Fetching additional guidance based on your diagnosis..."):
250
- assistance = get_assistance_from_deepseek(f"Diagnosis: {diagnosis} (confidence {confidence:.2%})")
251
  st.markdown("### Next Steps and Recommendations:")
252
- # Use st.write which keeps newlines and formatting reasonable.
253
  st.write(assistance)
254
 
255
- # ------------------ If no file uploaded, show sample placeholder / instructions ------------------
256
  if uploaded_file is None:
257
- st.markdown("<div class='small-muted'>Upload a brain MRI image (jpg/png) to get a model prediction and informational next steps. </div>", unsafe_allow_html=True)
258
 
259
- # ------------------ Helpful debug / info for developers (hidden by default) ------------------
260
  with st.expander("Developer info / Troubleshooting"):
261
- st.markdown("**Model repository**: " + repository_id)
262
- st.markdown("**Torch available**: " + ("Yes" if torch is not None else "No"))
263
- st.markdown("**Model loaded**: " + ("Yes" if model is not None else "No"))
 
 
264
  if model_load_error:
 
265
  st.code(model_load_error)
266
- st.markdown("**Environment**:")
267
- st.write({
268
- "CUDA available": torch.cuda.is_available() if torch is not None else False,
269
- "API_KEY set for Groq": bool(os.getenv("API_KEY"))
270
- })
271
  st.markdown("**Notes:**\n- This app is for informational use only. Do not use it as a replacement for professional medical advice.")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  #!/usr/bin/env python3
2
  """
3
+ Streamlit Brain MRI Tumor Detection App (complete updated version)
4
+
5
+ Features:
6
+ - Monkeypatches torch.classes.__path__ before importing Streamlit to avoid Streamlit <-> PyTorch watcher crash.
7
+ - Loads a ViT model for image classification (repository: EnDevSols/brainmri-vit-model).
8
  - Returns probabilistic model output (label + confidence).
9
+ - Adds visible medical disclaimer and safety-first LLM prompting.
10
+ - Robust, defensive Groq (Deepseek R1) integration with detailed error logging and a quick-test button.
11
+ - Developer expander shows helpful debug info (no secrets printed).
12
  """
13
 
14
  import os
15
  import logging
16
  import traceback
17
+ import json
18
+ from io import BytesIO
19
+ import base64
20
 
21
  # ------------------ Safe startup: import torch first and monkeypatch ------------------
22
  # This avoids Streamlit's file-watcher triggering PyTorch C++ registry introspection errors.
23
  try:
24
+ import torch # noqa: E402
 
25
  try:
26
+ # Ensure torch.classes.__path__ exists and is a harmless list to prevent Streamlit introspection issues
27
  if hasattr(torch, "classes"):
 
28
  torch.classes.__path__ = []
29
  except Exception as _e:
 
30
  logging.warning("Failed to set torch.classes.__path__: %s", _e)
31
  except Exception as e:
 
 
32
  logging.error("Unable to import torch at startup: %s\n%s", e, traceback.format_exc())
33
+ torch = None # keep variable so rest of app can check availability
34
 
35
+ # ------------------ Now safe to import Streamlit and other libs ------------------
36
+ import streamlit as st # noqa: E402
37
+ from PIL import Image # noqa: E402
38
+ import numpy as np # noqa: E402
39
+ import torch.nn.functional as F # noqa: E402
40
+ from transformers import ViTForImageClassification, ViTImageProcessor # noqa: E402
41
+
42
+ # Groq client (Deepseek R1). Keep import near usage in case environment doesn't have it.
43
+ try:
44
+ from groq import Groq # noqa: E402
45
+ except Exception:
46
+ Groq = None
47
 
48
  # ------------------ Logging ------------------
49
+ logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s")
50
+ logger = logging.getLogger(__name__)
51
 
52
+ # ------------------ Streamlit page config and CSS ------------------
53
  st.set_page_config(layout="wide", page_title="Brain MRI Tumor Detection")
54
  combined_css = """
55
  .main, .sidebar .sidebar-content { background-color: #1c1c1c; color: #f0f2f6; }
 
59
  .title {
60
  font-size: 3rem;
61
  font-weight: bold;
62
+ display: flex;
63
+ align-items: center;
64
  justify-content: center;
65
  }
66
  .colorful-text {
 
83
  """
84
  st.markdown(f"<style>{combined_css}</style>", unsafe_allow_html=True)
85
 
86
+ # ------------------ UI header ------------------
87
  st.markdown(
88
  '<div class="title"><span class="colorful-text">Brain MRI</span> <span class="black-white-text">Tumor Detection</span></div>',
89
+ unsafe_allow_html=True,
90
  )
91
  st.markdown(
92
  '<div class="custom-text">Upload an MRI image to detect a brain tumor and receive next steps and recommendations.</div>',
93
+ unsafe_allow_html=True,
94
  )
95
 
 
96
  st.markdown(
97
  "<div class='disclaimer'>⚠️ This app is experimental and informational only. It is NOT a medical diagnosis. "
98
  "If you have health concerns, consult a licensed medical professional. In emergencies call your local emergency number.</div>",
99
+ unsafe_allow_html=True,
100
  )
101
 
102
+ # ------------------ Model loading (graceful) ------------------
103
  repository_id = "EnDevSols/brainmri-vit-model"
104
 
105
  model = None
 
107
  model_load_error = None
108
 
109
  try:
 
110
  if torch is None:
111
  raise RuntimeError("Torch is not available in this environment.")
112
+ # Attempt to load the model and feature extractor (this may be slow)
113
  model = ViTForImageClassification.from_pretrained(repository_id)
114
  feature_extractor = ViTImageProcessor.from_pretrained(repository_id)
115
+ logger.info("Model and feature extractor loaded successfully.")
116
  except Exception as e:
117
  model_load_error = str(e)
118
+ logger.exception("Failed to load model or feature extractor: %s", e)
119
 
120
+ # ------------------ Prediction function ------------------
121
+ def predict(image: Image.Image):
122
  """
123
  Given a PIL image, returns (diagnosis_label, confidence_float).
124
+ diagnosis_label is "Yes" or "No". confidence_float is between 0 and 1.
 
125
  """
126
  if model is None or feature_extractor is None:
127
  raise RuntimeError("Model is not loaded.")
 
140
  diagnosis = label_map.get(predicted_idx, "Unknown")
141
  return diagnosis, confidence
142
 
143
+ # ------------------ Defensive Groq helper with detailed logging ------------------
144
+ def get_assistance_from_deepseek(diagnosis_text: str, max_completion_tokens: int = 512):
145
  """
146
+ Calls Groq Deepseek R1 with a safety-first prompt. Returns a string message.
147
+ On failure, returns a safe fallback message and logs detailed info (without exposing secrets).
148
  """
149
+ safety_sentence = "This information is informational only — seek evaluation from a licensed medical professional."
150
+ if Groq is None:
151
+ logger.error("groq client library not installed or failed to import.")
152
+ return "Assistance not available: Groq client library not available in this environment."
153
+
154
  api_key = os.getenv("API_KEY")
155
  if not api_key:
156
+ logger.error("API_KEY environment variable not found.")
157
+ return "No assistance available because the Deepseek API key is not configured. Please set the API_KEY environment variable."
158
+
159
  try:
160
  client = Groq(api_key=api_key)
161
  except Exception as e:
162
+ logger.exception("Failed to instantiate Groq client: %s", e)
163
+ return "Assistance temporarily unavailable (failed to initialize model client)."
 
164
 
 
 
165
  prompt = (
166
  f"You are a cautious, safety-first medical assistant. Given the model-diagnosis below, "
167
  "provide general, non-prescriptive information a patient could use to understand options. "
 
174
 
175
  messages = [
176
  {"role": "system", "content": "You are a careful medical assistant always advising a user to consult a clinician."},
177
+ {"role": "user", "content": prompt},
178
  ]
179
 
180
+ # Log non-sensitive metadata
181
+ try:
182
+ logger.info("Sending Groq request: model=deepseek-r1-distill-llama-70b, prompt_chars=%d", len(prompt))
183
+ except Exception:
184
+ pass
185
+
186
  try:
187
  completion = client.chat.completions.create(
188
  model="deepseek-r1-distill-llama-70b",
189
  messages=messages,
190
  temperature=0.6,
191
+ max_completion_tokens=max_completion_tokens,
192
  top_p=0.95,
193
  stream=False,
194
  stop=None,
195
  )
196
+ # Extract response robustly
197
  assistance_text = ""
198
  try:
199
  assistance_text = completion.choices[0].message.content
 
203
  except Exception:
204
  assistance_text = str(completion)
205
 
206
+ # Ensure safety sentence present
207
  if safety_sentence not in assistance_text:
208
  assistance_text = safety_sentence + "\n\n" + assistance_text
 
209
  return assistance_text
210
  except Exception as e:
211
+ # Detailed logging for debugging 400/other errors
212
+ logger.exception("Groq API call failed: %s", e)
213
+ # Attempt to surface HTTP-like details from common exception attributes
214
+ resp_obj = None
215
+ for attr in ("response", "http_response", "raw_response", "resp"):
216
+ resp_obj = getattr(e, attr, None)
217
+ if resp_obj:
218
+ break
219
+
220
+ if resp_obj:
221
+ try:
222
+ status = getattr(resp_obj, "status_code", getattr(resp_obj, "status", "unknown"))
223
+ body = getattr(resp_obj, "text", getattr(resp_obj, "body", None))
224
+ # Log a concise summary without printing large bodies
225
+ logger.error("Groq response object: status=%s body_preview=%s", status, (str(body)[:500] + "...") if body else "None")
226
+ except Exception:
227
+ logger.error("Could not extract status/body from response-like object: %s", repr(resp_obj))
228
+ else:
229
+ # Log exception repr and attributes for manual inspection
230
+ logger.error("Groq exception repr: %s", repr(e))
231
+ logger.error("Groq exception dir: %s", ", ".join(sorted(set(dir(e)))))
232
 
233
+ # Return safe fallback
234
+ return (
235
+ "Assistance is temporarily unavailable due to an error contacting the assistance model. "
236
+ "Please try again later or consult a licensed medical professional. If you have severe or life-threatening symptoms, seek emergency care immediately."
237
+ )
238
+
239
+ # ------------------ Small helper: test Groq connectivity (for developer button) ------------------
240
+ def groq_test_ping(max_tokens: int = 8):
241
+ """
242
+ Make a minimal test call to the Groq chat endpoint to reproduce/inspect errors.
243
+ Returns a dict with 'ok' (bool), and 'result' (string or dict).
244
+ """
245
+ if Groq is None:
246
+ return {"ok": False, "result": "Groq client library not available."}
247
+ api_key = os.getenv("API_KEY")
248
+ if not api_key:
249
+ return {"ok": False, "result": "API_KEY not configured in environment."}
250
+ try:
251
+ client = Groq(api_key=api_key)
252
+ res = client.chat.completions.create(
253
+ model="deepseek-r1-distill-llama-70b",
254
+ messages=[{"role": "user", "content": "ping"}],
255
+ max_completion_tokens=max_tokens,
256
+ )
257
+ # Return a compact representation
258
+ try:
259
+ content = res.choices[0].message.content
260
+ except Exception:
261
+ try:
262
+ content = res.choices[0].text
263
+ except Exception:
264
+ content = str(res)
265
+ return {"ok": True, "result": content}
266
+ except Exception as e:
267
+ # Attempt to capture useful attributes from the exception
268
+ info = {"exception_repr": repr(e)}
269
+ for attr in ("response", "http_response", "raw_response", "resp"):
270
+ if hasattr(e, attr):
271
+ rval = getattr(e, attr)
272
+ try:
273
+ info[attr] = {
274
+ "status": getattr(rval, "status_code", getattr(rval, "status", "unknown")),
275
+ "body_preview": (getattr(rval, "text", getattr(rval, "body", str(rval)))[:1000] + "...") if getattr(rval, "text", None) or getattr(rval, "body", None) else str(rval),
276
+ }
277
+ except Exception:
278
+ info[attr] = str(rval)
279
+ logger.exception("Groq test ping failed: %s", e)
280
+ return {"ok": False, "result": info}
281
+
282
+ # ------------------ Streamlit UI: file uploader and inference flow ------------------
283
  uploaded_file = st.file_uploader("Choose an image...", type=["jpg", "jpeg", "png"])
284
 
285
  if uploaded_file is not None:
286
+ image = None
287
  try:
288
  image = Image.open(uploaded_file)
289
  except Exception as e:
290
+ st.error(f"Failed to open uploaded file as an image: {e}")
291
+ logger.exception("Open image error: %s", e)
 
292
  if image is not None:
293
+ # Display a thumbnail
294
+ try:
295
+ resized_image = image.resize((150, 150))
296
+ buffered = BytesIO()
297
+ resized_image.save(buffered, format="JPEG")
298
+ img_str = base64.b64encode(buffered.getvalue()).decode()
299
+ st.markdown(
300
+ f"<div style='text-align: center;'><img src='data:image/jpeg;base64,{img_str}' alt='Uploaded Image' width='300'></div>",
301
+ unsafe_allow_html=True,
302
+ )
303
+ except Exception as e:
304
+ logger.exception("Failed to create/display thumbnail: %s", e)
305
 
306
+ # Check model availability
307
  if model_load_error:
308
+ st.error("The model failed to load at startup. See developer info for details.")
309
  st.code(model_load_error)
310
  else:
311
+ with st.spinner("Processing the image..."):
312
+ try:
313
+ diagnosis, confidence = predict(image)
314
+ st.markdown("### Diagnosis (model prediction):")
315
+ st.write(f"**{diagnosis}** (confidence: **{confidence:.2%}**)")
316
+ st.markdown("_Model output is probabilistic and not a clinical diagnosis._", unsafe_allow_html=True)
317
+ except Exception as e:
318
+ st.error("Prediction failed: " + str(e))
319
+ logger.exception("Prediction error: %s", e)
320
+ diagnosis = None
321
+ confidence = None
322
 
323
+ # If we have a diagnosis, call Groq for assistance
 
 
 
 
 
 
 
 
 
 
 
 
324
  if diagnosis is not None:
325
  with st.spinner("Fetching additional guidance based on your diagnosis..."):
326
+ assistance = get_assistance_from_deepseek(f"Diagnosis: {diagnosis} (confidence {confidence:.2%})", max_completion_tokens=512)
327
  st.markdown("### Next Steps and Recommendations:")
 
328
  st.write(assistance)
329
 
330
+ # Footer / instructions when no file uploaded
331
  if uploaded_file is None:
332
+ st.markdown("<div class='small-muted'>Upload a brain MRI image (jpg/png) to get a model prediction and informational next steps.</div>", unsafe_allow_html=True)
333
 
334
+ # ------------------ Developer info / troubleshooting expander ------------------
335
  with st.expander("Developer info / Troubleshooting"):
336
+ st.markdown(f"**Model repository**: `{repository_id}`")
337
+ st.markdown(f"**Torch available**: {'Yes' if torch is not None else 'No'}")
338
+ st.markdown(f"**Model loaded**: {'Yes' if model is not None else 'No'}")
339
+ st.write({"CUDA available": torch.cuda.is_available() if torch is not None else False, "API_KEY set for Groq": bool(os.getenv("API_KEY"))})
340
+
341
  if model_load_error:
342
+ st.markdown("**Model load error**:")
343
  st.code(model_load_error)
344
+
 
 
 
 
345
  st.markdown("**Notes:**\n- This app is for informational use only. Do not use it as a replacement for professional medical advice.")
346
+
347
+ st.markdown("---")
348
+ st.markdown("### Groq/Deepseek quick test")
349
+ st.markdown("Use this to reproduce and capture raw error details from the Groq API (no secrets shown).")
350
+ if st.button("Run Groq test ping"):
351
+ result = groq_test_ping()
352
+ if result.get("ok"):
353
+ st.success("Groq ping successful.")
354
+ st.text_area("Result (truncated)", str(result.get("result"))[:2000], height=200)
355
+ else:
356
+ st.error("Groq ping failed. See details below.")
357
+ st.json(result.get("result"))
358
+
359
+ st.markdown("---")
360
+ st.markdown("### Debugging tips")
361
+ st.markdown(
362
+ "- If you get `400` from the Groq API: check model name, token limits, and `messages` shape.\n"
363
+ "- Use the Groq test ping to view the API's error body which typically explains the cause.\n"
364
+ "- Ensure `API_KEY` is set in the environment and has permissions for the model.\n"
365
+ "- To avoid the Streamlit <-> PyTorch watcher issue you can also run Streamlit with: "
366
+ "`streamlit run app.py --server.fileWatcherType none` or add `.streamlit/config.toml` with `fileWatcherType = \"none\"`.\n"
367
+ )
368
+
369
+ # End of file