ibadhasnain commited on
Commit
f76c514
·
verified ·
1 Parent(s): 8c3ae93

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +154 -144
app.py CHANGED
@@ -1,53 +1,136 @@
1
- # app.py
2
  import os, re, json
3
- from typing import List, Dict
 
 
4
  import chainlit as cl
5
  from dotenv import load_dotenv
6
- from pydantic import BaseModel, Field
7
-
8
- # === Your agents framework (shim in ./agents) ===
9
- from agents import (
10
- Agent,
11
- Runner,
12
- AsyncOpenAI,
13
- OpenAIChatCompletionsModel,
14
- set_tracing_disabled,
15
- function_tool,
16
- )
17
- from agents.exceptions import InputGuardrailTripwireTriggered
18
 
19
- # -----------------------------
20
- # Setup: auto provider (Gemini/OpenAI)
21
- # -----------------------------
22
- load_dotenv()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
23
 
24
- GEMINI_API_KEY = os.environ.get("Gem")
25
- OPENAI_API_KEY = os.environ.get("OPENAI_API_KEY")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
26
 
27
  if GEMINI_API_KEY:
28
- PROVIDER = "gemini"
29
  API_KEY = GEMINI_API_KEY
30
  BASE_URL = "https://generativelanguage.googleapis.com/v1beta/openai/"
31
  MODEL_ID = "gemini-2.5-flash"
32
  elif OPENAI_API_KEY:
33
- PROVIDER = "openai"
34
  API_KEY = OPENAI_API_KEY
35
  BASE_URL = None
36
  MODEL_ID = "gpt-4o-mini"
37
  else:
38
- raise RuntimeError("Missing GEMINI_API_KEY or OPENAI_API_KEY in your environment.")
39
 
40
- set_tracing_disabled(disabled=True)
 
 
41
 
42
- ext_client: AsyncOpenAI = AsyncOpenAI(api_key=API_KEY, base_url=BASE_URL)
43
- llm_model: OpenAIChatCompletionsModel = OpenAIChatCompletionsModel(
44
- model=MODEL_ID,
45
- openai_client=ext_client,
46
- )
47
-
48
- # -----------------------------
49
- # Tools (function calling)
50
- # -----------------------------
51
  @function_tool
52
  def infer_modality_from_filename(filename: str) -> dict:
53
  """
@@ -56,14 +139,14 @@ def infer_modality_from_filename(filename: str) -> dict:
56
  """
57
  f = (filename or "").lower()
58
  mapping = {
59
- "xray": "X-ray", "x_ray": "X-ray", "xr": "X-ray", "cXR": "X-ray", "chest": "X-ray",
60
  "mri": "MRI", "t1": "MRI", "t2": "MRI", "flair": "MRI", "dwi": "MRI", "adc": "MRI", "swi": "MRI",
61
  "ct": "CT", "cta": "CT",
62
  "ultrasound": "Ultrasound", "usg": "Ultrasound", "echo": "Ultrasound",
63
  }
64
- for key, mod in mapping.items():
65
- if key in f:
66
- return {"modality": mod}
67
  return {"modality": "unknown"}
68
 
69
  @function_tool
@@ -77,8 +160,8 @@ def imaging_reference_guide(modality: str) -> dict:
77
  return {
78
  "acquisition": [
79
  "Projection radiography with ionizing radiation.",
80
- "Common views: AP/PA/lateral; adjust kVp/mAs and positioning.",
81
- "Grids/collimation reduce scatter and improve contrast."
82
  ],
83
  "artifacts": [
84
  "Motion blur; under/overexposure.",
@@ -91,49 +174,49 @@ def imaging_reference_guide(modality: str) -> dict:
91
  "Edge enhancement (unsharp) sparingly to avoid halos."
92
  ],
93
  "study_tips": [
94
- "Use a systematic pattern (e.g., ABCDE for chest).",
95
- "Compare sides; verify markers/labels/devices.",
96
  "Practice with checklists for consistency."
97
  ],
98
  }
99
  if mod in ["mri", "mr"]:
100
  return {
101
  "acquisition": [
102
- "MR signal via RF pulses in a magnetic field; sequences set contrast.",
103
  "Common: T1, T2, FLAIR, DWI/ADC, GRE/SWI.",
104
  "TR/TE/flip angle trade off SNR, contrast, scan time."
105
  ],
106
  "artifacts": [
107
  "Motion/ghosting; susceptibility near metal/air.",
108
  "Chemical shift; Gibbs ringing.",
109
- "B0/B1 inhomogeneity causing intensity non-uniformity."
110
  ],
111
  "preprocessing": [
112
  "Bias-field correction (N4).",
113
- "Denoising (NLM); spatial registration/normalization.",
114
  "Skull stripping (brain); intensity standardization."
115
  ],
116
  "study_tips": [
117
- "Learn what each sequence emphasizes (T1 anatomy; T2 fluid; FLAIR edema).",
118
- "Always review diffusion for acute ischemia (with ADC).",
119
- "Keep window/level consistent for timepoint comparison."
120
  ],
121
  }
122
  if mod in ["ct"]:
123
  return {
124
  "acquisition": [
125
- "Helical CT; HU reflect X-ray attenuation.",
126
- "Reconstruction kernels affect sharpness vs noise.",
127
- "Contrast timing (arterial/venous) per clinical question."
128
  ],
129
  "artifacts": [
130
  "Beam hardening streaks; partial volume; motion.",
131
- "Metal artifacts; MAR/iterative recon can help."
132
  ],
133
  "preprocessing": [
134
  "Denoising (bilateral/NLM) with edge preservation.",
135
  "Window/level by organ system (lung, mediastinum, bone).",
136
- "Metal artifact reduction when available."
137
  ],
138
  "study_tips": [
139
  "Use standard planes; scroll systematically.",
@@ -141,23 +224,23 @@ def imaging_reference_guide(modality: str) -> dict:
141
  "Compare with priors when teaching cases."
142
  ],
143
  }
144
- # Generic fallback
145
  return {
146
  "acquisition": [
147
- "Acquisition parameters determine contrast, resolution, and noise.",
148
- "Positioning and motion control are essential to quality."
149
  ],
150
  "artifacts": [
151
- "Motion blur/ghosting; foreign objects and hardware can create streaks/voids.",
152
  "Under/overexposure or parameter misconfiguration."
153
  ],
154
  "preprocessing": [
155
- "Denoising and contrast normalization can aid teaching clarity.",
156
- "Registration and standard planes help consistent comparisons."
157
  ],
158
  "study_tips": [
159
  "Adopt a checklist; compare bilaterally or across time.",
160
- "Understand modality-specific controls (window/level, sequence types)."
161
  ],
162
  }
163
 
@@ -166,17 +249,13 @@ def file_facts(filename: str, size_bytes: str) -> dict:
166
  """Return simple file facts (name and size)."""
167
  return {"filename": filename, "size_bytes": size_bytes}
168
 
169
- # -----------------------------
170
- # Guardrails
171
- # -----------------------------
172
  ALLOWED_COMMANDS = ("/help", "/policy")
173
-
174
  TOPIC_KEYWORDS = [
175
  "imaging","image","radiology","biomedical","device","equipment","oem","modality",
176
- "acquisition","artifact","artifacts","preprocessing","window","level","sequence","kVp","mAs",
177
  "mri","t1","t2","flair","dwi","adc","swi","ct","xray","x-ray","ultrasound","usg","echo"
178
  ]
179
-
180
  RE_FORBIDDEN_CLINICAL = re.compile(r"\b(diagnos(e|is|tic)|prescrib|medicat|treat(ment|ing)?|dose|drug|therapy)\b", re.I)
181
  RE_INVASIVE_REPAIR = re.compile(r"\b(open(ing)?\s+(device|casing|cover)|solder|board[- ]level|reflow|replace\s+(capacitor|ic))\b", re.I)
182
  RE_ALARM_BYPASS = re.compile(r"\b(bypass|disable|silence)\s+(alarm|alert|safety|interlock)\b", re.I)
@@ -204,57 +283,10 @@ def local_guard(text: str) -> List[str]:
204
  issues.append("phi_share_or_collect")
205
  return issues
206
 
207
- class SafetyCheck(BaseModel):
208
- in_scope: bool = Field(description="True if about biomedical imaging education/troubleshooting.")
209
- clinical_advice: bool = Field(description="Diagnosis/treatment/dosing/medication/therapy.")
210
- invasive_repair: bool = Field(description="Open device, solder, board-level steps.")
211
- alarm_bypass: bool = Field(description="Bypass/disable alarms or interlocks.")
212
- firmware_tamper: bool = Field(description="Firmware hacks/unlock codes/passwords.")
213
- phi_share_or_collect: bool = Field(description="Sharing/collecting personal identifiers.")
214
- reasoning: str
215
-
216
- guardrail_agent = Agent(
217
- name="Safety Classifier",
218
- instructions=(
219
- "You are a strict safety classifier for a **biomedical imaging education tutor**.\n"
220
- "Return ONLY JSON with keys: {in_scope, clinical_advice, invasive_repair, alarm_bypass, "
221
- "firmware_tamper, phi_share_or_collect, reasoning}.\n"
222
- "- in_scope: true ONLY if the message is about biomedical IMAGING education/troubleshooting (no diagnosis).\n"
223
- "- clinical_advice: diagnosis/treatment/dose/medication/therapy.\n"
224
- "- invasive_repair: opening casing, soldering, board-level steps.\n"
225
- "- alarm_bypass: silencing/disabling alarms/interlocks.\n"
226
- "- firmware_tamper: rooting/jailbreaking/unlocking firmware/service modes/passwords.\n"
227
- "- phi_share_or_collect: asking to share or store personal identifiers.\n"
228
- "Respond with compact JSON only."
229
- ),
230
- model=llm_model,
231
- output_type=SafetyCheck,
232
- )
233
-
234
- def parse_guard_json(s: str) -> Dict[str, bool]:
235
- try:
236
- m = re.search(r"\{.*\}", s or "", re.S)
237
- data = json.loads(m.group(0) if m else (s or "{}"))
238
- defaults = {
239
- "in_scope": True,
240
- "clinical_advice": False,
241
- "invasive_repair": False,
242
- "alarm_bypass": False,
243
- "firmware_tamper": False,
244
- "phi_share_or_collect": False
245
- }
246
- defaults.update({k: bool(v) for k, v in data.items() if k in defaults})
247
- return defaults
248
- except Exception:
249
- return {"in_scope": True, "clinical_advice": False, "invasive_repair": False,
250
- "alarm_bypass": False, "firmware_tamper": False, "phi_share_or_collect": False}
251
-
252
- # -----------------------------
253
- # Tutor Agent
254
- # -----------------------------
255
  tutor_instructions = (
256
  "You are a Biomedical Imaging **Education** Tutor. Explain how images are acquired, common artifacts, "
257
- "and preprocessing for study/teaching. You do NOT diagnose, identify diseases, or give clinical advice.\n\n"
258
  "Output a concise, structured answer with sections in this order:\n"
259
  "1) Acquisition overview\n"
260
  "2) Common artifacts\n"
@@ -264,7 +296,6 @@ tutor_instructions = (
264
  "Use tools to infer modality (from filename) and fetch a modality-specific reference guide. "
265
  "If modality unclear, provide a generic overview and invite the user to specify."
266
  )
267
-
268
  tutor_agent = Agent(
269
  name="Biomedical Imaging Tutor",
270
  instructions=tutor_instructions,
@@ -272,9 +303,7 @@ tutor_agent = Agent(
272
  tools=[infer_modality_from_filename, imaging_reference_guide, file_facts],
273
  )
274
 
275
- # -----------------------------
276
- # UI strings
277
- # -----------------------------
278
  WELCOME = (
279
  "🎓 **Multimodal Biomedical Imaging Tutor**\n\n"
280
  "Upload an **MRI/X-ray/CT/Ultrasound** image (PNG/JPG), then ask what you’d like to learn.\n"
@@ -284,10 +313,10 @@ WELCOME = (
284
  POLICY = (
285
  "🛡️ **Safety & Scope Policy**\n"
286
  "- Scope: biomedical **imaging education/troubleshooting** only.\n"
287
- "- No clinical advice (no diagnosis/treatment/dosing/medications).\n"
288
  "- No invasive repair steps (opening casing, soldering, board-level).\n"
289
  "- No alarm bypass or firmware tampering.\n"
290
- "- No collecting or sharing personal identifiers.\n"
291
  "- OEM manuals & local policy take priority."
292
  )
293
  REFUSAL = (
@@ -295,9 +324,7 @@ REFUSAL = (
295
  "I can explain **imaging acquisition, artifacts, and preprocessing** for education."
296
  )
297
 
298
- # -----------------------------
299
- # Chainlit flows
300
- # -----------------------------
301
  @cl.on_chat_start
302
  async def on_chat_start():
303
  await cl.Message(content=WELCOME).send()
@@ -326,33 +353,16 @@ async def on_message(message: cl.Message):
326
  if text.lower().startswith("/policy"):
327
  await cl.Message(content=POLICY).send(); return
328
 
329
- # Topic gate first
330
  if not on_topic(text):
331
  await cl.Message(
332
  content="I only discuss **biomedical imaging education** (acquisition, artifacts, preprocessing). "
333
  "Please ask about MRI/X-ray/CT/Ultrasound imaging."
334
- ).send()
335
- return
336
 
337
- # Local guard (fast)
338
  issues = local_guard(text)
339
  if issues:
340
- await cl.Message(content=REFUSAL + "\n\n" + POLICY).send()
341
- return
342
-
343
- # LLM guard (nuanced)
344
- try:
345
- verdict = await Runner.run(guardrail_agent, text)
346
- flags = parse_guard_json(verdict.final_output)
347
- if (not flags.get("in_scope", True)) or any(
348
- flags.get(k, False) for k in
349
- ["clinical_advice", "invasive_repair", "alarm_bypass", "firmware_tamper", "phi_share_or_collect"]
350
- ):
351
- await cl.Message(content=REFUSAL + "\n\n" + POLICY).send()
352
- return
353
- except Exception:
354
- # If guard fails, continue (tutor prompt is already safety-constrained)
355
- pass
356
 
357
  # Context from uploaded file
358
  file_name = cl.user_session.get("last_file_name")
@@ -362,7 +372,7 @@ async def on_message(message: cl.Message):
362
  if file_size is not None: context_lines.append(f"Size: {file_size} bytes")
363
  context_block = "\n".join(context_lines)
364
 
365
- # Compose user query for tutor
366
  user_query = text if not context_block else f"{text}\n\n[Context]\n{context_block}"
367
 
368
  # Run tutor
 
1
+ # app.py — Self-contained (no external 'agents' package needed)
2
  import os, re, json
3
+ from typing import Any, Callable, Dict, List, Optional
4
+ from dataclasses import dataclass, field
5
+
6
  import chainlit as cl
7
  from dotenv import load_dotenv
8
+ from openai import AsyncOpenAI as _SDKAsyncOpenAI
 
 
 
 
 
 
 
 
 
 
 
9
 
10
+ # ========= Minimal "agents" shim (inline) =========
11
+ def set_tracing_disabled(disabled: bool = True):
12
+ return disabled
13
+
14
+ def function_tool(func: Callable):
15
+ func._is_tool = True
16
+ return func
17
+
18
+ class InputGuardrailTripwireTriggered(Exception):
19
+ pass
20
+
21
+ class AsyncOpenAI:
22
+ def __init__(self, api_key: str, base_url: Optional[str] = None):
23
+ kwargs = {"api_key": api_key}
24
+ if base_url:
25
+ kwargs["base_url"] = base_url
26
+ self._client = _SDKAsyncOpenAI(**kwargs)
27
+ @property
28
+ def client(self):
29
+ return self._client
30
+
31
+ class OpenAIChatCompletionsModel:
32
+ def __init__(self, model: str, openai_client: AsyncOpenAI):
33
+ self.model = model
34
+ self.client = openai_client.client
35
+
36
+ @dataclass
37
+ class Agent:
38
+ name: str
39
+ instructions: str
40
+ model: OpenAIChatCompletionsModel
41
+ tools: Optional[List[Callable]] = field(default_factory=list)
42
 
43
+ def tool_specs(self) -> List[Dict[str, Any]]:
44
+ specs = []
45
+ for t in (self.tools or []):
46
+ if getattr(t, "_is_tool", False):
47
+ argnames = list(t.__code__.co_varnames[:t.__code__.co_argcount])
48
+ specs.append({
49
+ "type": "function",
50
+ "function": {
51
+ "name": t.__name__,
52
+ "description": (t.__doc__ or "")[:512],
53
+ "parameters": {
54
+ "type": "object",
55
+ "properties": {p: {"type": "string"} for p in argnames},
56
+ "required": argnames,
57
+ },
58
+ },
59
+ })
60
+ return specs
61
+
62
+ class Runner:
63
+ @staticmethod
64
+ async def run(agent: Agent, user_input: str, context: Optional[Dict[str, Any]] = None):
65
+ msgs = [
66
+ {"role": "system", "content": agent.instructions},
67
+ {"role": "user", "content": user_input},
68
+ ]
69
+ tools = agent.tool_specs()
70
+ tool_map = {t.__name__: t for t in (agent.tools or []) if getattr(t, "_is_tool", False)}
71
+
72
+ for _ in range(4):
73
+ resp = await agent.model.client.chat.completions.create(
74
+ model=agent.model.model,
75
+ messages=msgs,
76
+ tools=tools or None,
77
+ tool_choice="auto" if tools else None,
78
+ )
79
+ msg = resp.choices[0].message
80
+ msgs.append({"role": "assistant", "content": msg.content or "", "tool_calls": msg.tool_calls})
81
+
82
+ if msg.tool_calls:
83
+ for call in msg.tool_calls:
84
+ fn_name = call.function.name
85
+ args = json.loads(call.function.arguments or "{}")
86
+ result = {"error": f"Unknown tool: {fn_name}"}
87
+ if fn_name in tool_map:
88
+ try:
89
+ result = tool_map[fn_name](**args)
90
+ except Exception as e:
91
+ result = {"error": str(e)}
92
+ msgs.append({
93
+ "role": "tool",
94
+ "tool_call_id": call.id,
95
+ "name": fn_name,
96
+ "content": json.dumps(result),
97
+ })
98
+ continue
99
+
100
+ # Final answer
101
+ result_obj = type("Result", (), {})()
102
+ result_obj.final_output = msg.content or ""
103
+ result_obj.context = context or {}
104
+ result_obj.final_output_as = lambda *_: result_obj.final_output
105
+ return result_obj
106
+
107
+ result_obj = type("Result", (), {})()
108
+ result_obj.final_output = "Sorry, I couldn't complete the request."
109
+ result_obj.context = context or {}
110
+ result_obj.final_output_as = lambda *_: result_obj.final_output
111
+ return result_obj
112
+
113
+ # ========= Setup: provider auto-detect =========
114
+ load_dotenv()
115
+ GEMINI_API_KEY = os.getenv("GEMINI_API_KEY")
116
+ OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
117
 
118
  if GEMINI_API_KEY:
 
119
  API_KEY = GEMINI_API_KEY
120
  BASE_URL = "https://generativelanguage.googleapis.com/v1beta/openai/"
121
  MODEL_ID = "gemini-2.5-flash"
122
  elif OPENAI_API_KEY:
 
123
  API_KEY = OPENAI_API_KEY
124
  BASE_URL = None
125
  MODEL_ID = "gpt-4o-mini"
126
  else:
127
+ raise RuntimeError("Missing GEMINI_API_KEY or OPENAI_API_KEY in env/secrets.")
128
 
129
+ set_tracing_disabled(True)
130
+ ext_client = AsyncOpenAI(api_key=API_KEY, base_url=BASE_URL)
131
+ llm_model = OpenAIChatCompletionsModel(model=MODEL_ID, openai_client=ext_client)
132
 
133
+ # ========= Tools =========
 
 
 
 
 
 
 
 
134
  @function_tool
135
  def infer_modality_from_filename(filename: str) -> dict:
136
  """
 
139
  """
140
  f = (filename or "").lower()
141
  mapping = {
142
+ "xray": "X-ray", "x_ray": "X-ray", "xr": "X-ray", "chest": "X-ray",
143
  "mri": "MRI", "t1": "MRI", "t2": "MRI", "flair": "MRI", "dwi": "MRI", "adc": "MRI", "swi": "MRI",
144
  "ct": "CT", "cta": "CT",
145
  "ultrasound": "Ultrasound", "usg": "Ultrasound", "echo": "Ultrasound",
146
  }
147
+ for k, v in mapping.items():
148
+ if k in f:
149
+ return {"modality": v}
150
  return {"modality": "unknown"}
151
 
152
  @function_tool
 
160
  return {
161
  "acquisition": [
162
  "Projection radiography with ionizing radiation.",
163
+ "Views: AP/PA/lateral; tune kVp/mAs and positioning.",
164
+ "Grids & collimation reduce scatter to improve contrast."
165
  ],
166
  "artifacts": [
167
  "Motion blur; under/overexposure.",
 
174
  "Edge enhancement (unsharp) sparingly to avoid halos."
175
  ],
176
  "study_tips": [
177
+ "Use ABCDE (for CXR), check markers/labels/devices.",
178
+ "Compare sides and prior images.",
179
  "Practice with checklists for consistency."
180
  ],
181
  }
182
  if mod in ["mri", "mr"]:
183
  return {
184
  "acquisition": [
185
+ "MR signal via RF pulses; sequences define contrast.",
186
  "Common: T1, T2, FLAIR, DWI/ADC, GRE/SWI.",
187
  "TR/TE/flip angle trade off SNR, contrast, scan time."
188
  ],
189
  "artifacts": [
190
  "Motion/ghosting; susceptibility near metal/air.",
191
  "Chemical shift; Gibbs ringing.",
192
+ "B0/B1 inhomogeneity causing intensity bias."
193
  ],
194
  "preprocessing": [
195
  "Bias-field correction (N4).",
196
+ "Denoising (NLM); registration/normalization.",
197
  "Skull stripping (brain); intensity standardization."
198
  ],
199
  "study_tips": [
200
+ "Know sequence emphases (T1 anatomy; T2 fluid; FLAIR edema).",
201
+ "Review diffusion for acute ischemia (check ADC).",
202
+ "Keep window/level consistent across timepoints."
203
  ],
204
  }
205
  if mod in ["ct"]:
206
  return {
207
  "acquisition": [
208
+ "Helical CT; HU reflect attenuation.",
209
+ "Recon kernels affect sharpness vs noise.",
210
+ "Contrast timing (arterial/venous) per question."
211
  ],
212
  "artifacts": [
213
  "Beam hardening streaks; partial volume; motion.",
214
+ "Metal artifacts; MAR/iterative recon help."
215
  ],
216
  "preprocessing": [
217
  "Denoising (bilateral/NLM) with edge preservation.",
218
  "Window/level by organ system (lung, mediastinum, bone).",
219
+ "Metal artifact reduction if available."
220
  ],
221
  "study_tips": [
222
  "Use standard planes; scroll systematically.",
 
224
  "Compare with priors when teaching cases."
225
  ],
226
  }
227
+ # Fallback (generic)
228
  return {
229
  "acquisition": [
230
+ "Acquisition parameters set contrast, resolution, noise.",
231
+ "Positioning & motion control drive image quality."
232
  ],
233
  "artifacts": [
234
+ "Motion blur or ghosting; foreign objects/hardware can streak.",
235
  "Under/overexposure or parameter misconfiguration."
236
  ],
237
  "preprocessing": [
238
+ "Denoising & contrast normalization aid teaching clarity.",
239
+ "Registration & standard planes for consistent review."
240
  ],
241
  "study_tips": [
242
  "Adopt a checklist; compare bilaterally or across time.",
243
+ "Understand modality-specific controls (window/level, sequences)."
244
  ],
245
  }
246
 
 
249
  """Return simple file facts (name and size)."""
250
  return {"filename": filename, "size_bytes": size_bytes}
251
 
252
+ # ========= Guardrails =========
 
 
253
  ALLOWED_COMMANDS = ("/help", "/policy")
 
254
  TOPIC_KEYWORDS = [
255
  "imaging","image","radiology","biomedical","device","equipment","oem","modality",
256
+ "acquisition","artifact","preprocessing","window","level","sequence","kVp","mAs",
257
  "mri","t1","t2","flair","dwi","adc","swi","ct","xray","x-ray","ultrasound","usg","echo"
258
  ]
 
259
  RE_FORBIDDEN_CLINICAL = re.compile(r"\b(diagnos(e|is|tic)|prescrib|medicat|treat(ment|ing)?|dose|drug|therapy)\b", re.I)
260
  RE_INVASIVE_REPAIR = re.compile(r"\b(open(ing)?\s+(device|casing|cover)|solder|board[- ]level|reflow|replace\s+(capacitor|ic))\b", re.I)
261
  RE_ALARM_BYPASS = re.compile(r"\b(bypass|disable|silence)\s+(alarm|alert|safety|interlock)\b", re.I)
 
283
  issues.append("phi_share_or_collect")
284
  return issues
285
 
286
+ # ========= Tutor Agent =========
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
287
  tutor_instructions = (
288
  "You are a Biomedical Imaging **Education** Tutor. Explain how images are acquired, common artifacts, "
289
+ "and preprocessing for study/teaching. Do NOT diagnose or give clinical advice.\n\n"
290
  "Output a concise, structured answer with sections in this order:\n"
291
  "1) Acquisition overview\n"
292
  "2) Common artifacts\n"
 
296
  "Use tools to infer modality (from filename) and fetch a modality-specific reference guide. "
297
  "If modality unclear, provide a generic overview and invite the user to specify."
298
  )
 
299
  tutor_agent = Agent(
300
  name="Biomedical Imaging Tutor",
301
  instructions=tutor_instructions,
 
303
  tools=[infer_modality_from_filename, imaging_reference_guide, file_facts],
304
  )
305
 
306
+ # ========= UI strings =========
 
 
307
  WELCOME = (
308
  "🎓 **Multimodal Biomedical Imaging Tutor**\n\n"
309
  "Upload an **MRI/X-ray/CT/Ultrasound** image (PNG/JPG), then ask what you’d like to learn.\n"
 
313
  POLICY = (
314
  "🛡️ **Safety & Scope Policy**\n"
315
  "- Scope: biomedical **imaging education/troubleshooting** only.\n"
316
+ "- No clinical advice (diagnosis/treatment/dosing/medications).\n"
317
  "- No invasive repair steps (opening casing, soldering, board-level).\n"
318
  "- No alarm bypass or firmware tampering.\n"
319
+ "- No collecting/sharing personal identifiers.\n"
320
  "- OEM manuals & local policy take priority."
321
  )
322
  REFUSAL = (
 
324
  "I can explain **imaging acquisition, artifacts, and preprocessing** for education."
325
  )
326
 
327
+ # ========= Chainlit flow =========
 
 
328
  @cl.on_chat_start
329
  async def on_chat_start():
330
  await cl.Message(content=WELCOME).send()
 
353
  if text.lower().startswith("/policy"):
354
  await cl.Message(content=POLICY).send(); return
355
 
356
+ # Topic & guardrails
357
  if not on_topic(text):
358
  await cl.Message(
359
  content="I only discuss **biomedical imaging education** (acquisition, artifacts, preprocessing). "
360
  "Please ask about MRI/X-ray/CT/Ultrasound imaging."
361
+ ).send(); return
 
362
 
 
363
  issues = local_guard(text)
364
  if issues:
365
+ await cl.Message(content=REFUSAL + "\n\n" + POLICY).send(); return
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
366
 
367
  # Context from uploaded file
368
  file_name = cl.user_session.get("last_file_name")
 
372
  if file_size is not None: context_lines.append(f"Size: {file_size} bytes")
373
  context_block = "\n".join(context_lines)
374
 
375
+ # Compose query for tutor
376
  user_query = text if not context_block else f"{text}\n\n[Context]\n{context_block}"
377
 
378
  # Run tutor