zousko-stark commited on
Commit
ad759b6
·
verified ·
1 Parent(s): 5fbcfff

Upload folder using huggingface_hub

Browse files
Files changed (2) hide show
  1. dicom_processor.py +4 -1
  2. explainability.py +22 -3
dicom_processor.py CHANGED
@@ -64,7 +64,10 @@ def anonymize_dicom(ds: pydicom.dataset.FileDataset) -> pydicom.dataset.FileData
64
  # Wipe other fields
65
  for tag in PHI_TAGS:
66
  if tag in ds:
67
- ds.data_element(tag).value = "ANONYMIZED"
 
 
 
68
 
69
  return ds
70
 
 
64
  # Wipe other fields
65
  for tag in PHI_TAGS:
66
  if tag in ds:
67
+ if 'Date' in tag: # VR DA requires YYYYMMDD
68
+ ds.data_element(tag).value = "19010101"
69
+ else:
70
+ ds.data_element(tag).value = "ANONYMIZED"
71
 
72
  return ds
73
 
explainability.py CHANGED
@@ -146,7 +146,12 @@ class ExplainabilityEngine:
146
  # 3. Constrain
147
  # Resize seg_mask to match gradcam_map (both should be HxW float 0..1)
148
  if gradcam_map is None:
149
- return {"heatmap": None, "original": None, "confidence": "LOW"}
 
 
 
 
 
150
 
151
  # Ensure shapes match
152
  if seg_mask.shape != gradcam_map.shape:
@@ -184,8 +189,16 @@ class ExplainabilityEngine:
184
  inputs = {k: v.to(self.model.device) for k, v in inputs.items()}
185
 
186
  # Wrapper
 
 
 
 
 
 
 
 
187
  model_wrapper_cam = HuggingFaceWeirdCLIPWrapper(
188
- self.model, inputs['input_ids'], inputs['attention_mask']
189
  )
190
 
191
  target_layers = [self.model.vision_model.post_layernorm]
@@ -196,7 +209,13 @@ class ExplainabilityEngine:
196
  reshape_transform=reshape_transform
197
  )
198
 
199
- grayscale_cam = cam(input_tensor=inputs['pixel_values'], targets=None)
 
 
 
 
 
 
200
  grayscale_cam = grayscale_cam[0, :]
201
 
202
  # Smoothing
 
146
  # 3. Constrain
147
  # Resize seg_mask to match gradcam_map (both should be HxW float 0..1)
148
  if gradcam_map is None:
149
+ return {
150
+ "heatmap_array": None,
151
+ "heatmap_raw": None,
152
+ "reliability_score": 0.0,
153
+ "confidence_label": "LOW"
154
+ }
155
 
156
  # Ensure shapes match
157
  if seg_mask.shape != gradcam_map.shape:
 
189
  inputs = {k: v.to(self.model.device) for k, v in inputs.items()}
190
 
191
  # Wrapper
192
+ # Robust get for attention_mask (some processors might not return it for image-only flows, though text is here)
193
+ input_ids = inputs.get('input_ids')
194
+ attention_mask = inputs.get('attention_mask')
195
+
196
+ if input_ids is None:
197
+ logger.error("Explainability: Missing input_ids in processor output")
198
+ return None
199
+
200
  model_wrapper_cam = HuggingFaceWeirdCLIPWrapper(
201
+ self.model, input_ids, attention_mask
202
  )
203
 
204
  target_layers = [self.model.vision_model.post_layernorm]
 
209
  reshape_transform=reshape_transform
210
  )
211
 
212
+ # GradCAM needs pixel_values
213
+ pixel_values = inputs.get('pixel_values')
214
+ if pixel_values is None:
215
+ logger.error("Explainability: Missing pixel_values")
216
+ return None
217
+
218
+ grayscale_cam = cam(input_tensor=pixel_values, targets=None)
219
  grayscale_cam = grayscale_cam[0, :]
220
 
221
  # Smoothing