kerzel commited on
Commit
9e3d421
·
1 Parent(s): dd24afe

remove some debug output

Browse files
Files changed (2) hide show
  1. app.py +9 -7
  2. utils.py +23 -23
app.py CHANGED
@@ -57,7 +57,8 @@ def damage_classification(SEM_image,image_threshold, model1_threshold, model2_th
57
  ##
58
  logging.debug('---------------: clustering :=====================')
59
  all_centroids = clustering.get_centroids(SEM_image, image_threshold=image_threshold,
60
- fill_holes=True, filter_close_centroids=True)
 
61
 
62
  for i in range(len(all_centroids)) :
63
  key = (all_centroids[i][0],all_centroids[i][1])
@@ -68,8 +69,10 @@ def damage_classification(SEM_image,image_threshold, model1_threshold, model2_th
68
  ##
69
  logging.debug('---------------: prepare model 1 :=====================')
70
  images_model1 = utils.prepare_classifier_input(SEM_image, all_centroids, window_size=model1_windowsize)
71
- from utils import debug_classification_input
72
- debug_classification_input(images_model1)
 
 
73
 
74
  logging.debug('---------------: run model 1 :=====================')
75
  #y1_pred = model1.predict(np.asarray(images_model1, float))
@@ -89,7 +92,6 @@ def damage_classification(SEM_image,image_threshold, model1_threshold, model2_th
89
  logging.debug(f"Model 1 predictions shape: {y1_pred.shape}")
90
  logging.debug(f"Model 1 predictions sample: {y1_pred[:3] if len(y1_pred) > 0 else 'Empty'}")
91
 
92
- logging.info('---------------: model1 threshold :=====================')
93
  # Handle predictions based on their shape
94
  if len(y1_pred.shape) == 2:
95
  # Predictions are 2D: (batch_size, num_classes)
@@ -101,10 +103,10 @@ def damage_classification(SEM_image,image_threshold, model1_threshold, model2_th
101
  raise ValueError(f"Unexpected prediction shape: {y1_pred.shape}")
102
 
103
 
104
- logging.info('---------------: model1 threshold :=====================')
105
  inclusions = np.where(inclusions > model1_threshold)
106
- logging.info('Inclusions found at indices:')
107
- logging.info(inclusions)
108
 
109
 
110
  logging.debug('---------------: model 1 update dict :=====================')
 
57
  ##
58
  logging.debug('---------------: clustering :=====================')
59
  all_centroids = clustering.get_centroids(SEM_image, image_threshold=image_threshold,
60
+ fill_holes=True, filter_close_centroids=True,
61
+ filter_radius=90)
62
 
63
  for i in range(len(all_centroids)) :
64
  key = (all_centroids[i][0],all_centroids[i][1])
 
69
  ##
70
  logging.debug('---------------: prepare model 1 :=====================')
71
  images_model1 = utils.prepare_classifier_input(SEM_image, all_centroids, window_size=model1_windowsize)
72
+
73
+ # debugging function to check the input to the classifier
74
+ #from utils import debug_classification_input
75
+ #debug_classification_input(images_model1)
76
 
77
  logging.debug('---------------: run model 1 :=====================')
78
  #y1_pred = model1.predict(np.asarray(images_model1, float))
 
92
  logging.debug(f"Model 1 predictions shape: {y1_pred.shape}")
93
  logging.debug(f"Model 1 predictions sample: {y1_pred[:3] if len(y1_pred) > 0 else 'Empty'}")
94
 
 
95
  # Handle predictions based on their shape
96
  if len(y1_pred.shape) == 2:
97
  # Predictions are 2D: (batch_size, num_classes)
 
103
  raise ValueError(f"Unexpected prediction shape: {y1_pred.shape}")
104
 
105
 
106
+ logging.debug('---------------: model1 threshold :=====================')
107
  inclusions = np.where(inclusions > model1_threshold)
108
+ logging.debug('Inclusions found at indices:')
109
+ logging.debug(inclusions)
110
 
111
 
112
  logging.debug('---------------: model 1 update dict :=====================')
utils.py CHANGED
@@ -48,19 +48,19 @@ def show_boxes(image : np.ndarray, damage_sites : dict, box_size = [250,250],
48
  save_image (bool, optional): Save the image with the boxes or not. Defaults to False.
49
  image_path (str, optional) : Full path and name of the output file to be saved.
50
  """
51
- logging.info(f"show_boxes: Input image type: {type(image)}") # Added logging
52
 
53
  # Ensure image is a NumPy array of appropriate type for matplotlib
54
  if isinstance(image, Image.Image):
55
  image_to_plot = np.array(image.convert('L')) # Convert to grayscale NumPy array
56
- logging.info("show_boxes: Converted PIL Image to grayscale NumPy array for plotting.")
57
  elif isinstance(image, np.ndarray):
58
  if image.ndim == 3 and image.shape[2] in [3,4]: # RGB or RGBA NumPy array
59
  image_to_plot = np.mean(image, axis=2).astype(image.dtype) # Convert to grayscale
60
- logging.info("show_boxes: Converted multi-channel NumPy array to grayscale for plotting.")
61
  else: # Assume grayscale already
62
  image_to_plot = image
63
- logging.info("show_boxes: Image is already a grayscale NumPy array.")
64
  else:
65
  logging.error("show_boxes: Unsupported image format received.")
66
  image_to_plot = np.zeros((100,100), dtype=np.uint8) # Fallback to black image
@@ -253,7 +253,7 @@ def prepare_classifier_input(
253
  List of extracted and normalized 3-channel image patches, each with shape (height, width, 3). Only
254
  centroids that allow full window extraction within image bounds are used.
255
  """
256
- logging.info(f"prepare_classifier_input: Input panorama type: {type(panorama)}")
257
 
258
  # Convert input to standardized NumPy array format
259
  panorama_array = _convert_to_grayscale_array(panorama)
@@ -261,14 +261,14 @@ def prepare_classifier_input(
261
  # Ensure we have the correct dimensions
262
  if panorama_array.ndim == 2:
263
  H, W = panorama_array.shape
264
- logging.info("prepare_classifier_input: Working with 2D grayscale array.")
265
  elif panorama_array.ndim == 3:
266
  H, W, C = panorama_array.shape
267
  if C == 1:
268
  # Squeeze the single channel dimension for easier processing
269
  panorama_array = panorama_array.squeeze(axis=2)
270
  H, W = panorama_array.shape
271
- logging.info("prepare_classifier_input: Squeezed single channel dimension.")
272
  else:
273
  logging.error(f"prepare_classifier_input: Unexpected number of channels: {C}")
274
  raise ValueError(f"Expected 1 channel, got {C}")
@@ -369,20 +369,20 @@ def _convert_to_grayscale_array(panorama: Union[Image.Image, np.ndarray]) -> np.
369
  if panorama.mode in ['RGB', 'RGBA']:
370
  # Convert to grayscale
371
  panorama_array = np.array(panorama.convert('L'))
372
- logging.info("_convert_to_grayscale_array: Converted RGB/RGBA PIL Image to grayscale.")
373
  elif panorama.mode == 'L':
374
  panorama_array = np.array(panorama)
375
- logging.info("_convert_to_grayscale_array: Converted grayscale PIL Image to NumPy array.")
376
  else:
377
  # Handle other modes by converting to grayscale
378
  panorama_array = np.array(panorama.convert('L'))
379
- logging.info(f"_convert_to_grayscale_array: Converted PIL Image mode '{panorama.mode}' to grayscale.")
380
 
381
  elif isinstance(panorama, np.ndarray):
382
  if panorama.ndim == 2:
383
  # Already grayscale
384
  panorama_array = panorama.copy()
385
- logging.info("_convert_to_grayscale_array: Using existing 2D grayscale array.")
386
  elif panorama.ndim == 3:
387
  if panorama.shape[2] in [3, 4]: # RGB or RGBA
388
  # Convert to grayscale using luminance weights
@@ -390,11 +390,11 @@ def _convert_to_grayscale_array(panorama: Union[Image.Image, np.ndarray]) -> np.
390
  panorama_array = np.dot(panorama, [0.299, 0.587, 0.114]).astype(panorama.dtype)
391
  else: # RGBA
392
  panorama_array = np.dot(panorama[:, :, :3], [0.299, 0.587, 0.114]).astype(panorama.dtype)
393
- logging.info("_convert_to_grayscale_array: Converted multi-channel NumPy array to grayscale using luminance weights.")
394
  elif panorama.shape[2] == 1:
395
  # Already single channel
396
  panorama_array = panorama.copy()
397
- logging.info("_convert_to_grayscale_array: Using existing single-channel array.")
398
  else:
399
  raise ValueError(f"Unsupported number of channels: {panorama.shape[2]}")
400
  else:
@@ -498,7 +498,7 @@ def safe_classify_patches(patches: List[np.ndarray], classify_func, **kwargs) ->
498
  Classification results or None if error occurred
499
  """
500
  try:
501
- logging.info("Starting safe classification...")
502
 
503
  # Debug the input
504
  debug_classification_input(patches)
@@ -513,15 +513,15 @@ def safe_classify_patches(patches: List[np.ndarray], classify_func, **kwargs) ->
513
  for i, patch in enumerate(patches):
514
  if not patch.flags.c_contiguous:
515
  patch_clean = np.ascontiguousarray(patch)
516
- logging.info(f"Made patch {i} contiguous")
517
  else:
518
  patch_clean = patch
519
  patches_clean.append(patch_clean)
520
 
521
  # Call the actual classification function
522
- logging.info("Calling classification function...")
523
  result = classify_func(patches_clean, **kwargs)
524
- logging.info("Classification completed successfully")
525
 
526
  return result
527
 
@@ -559,8 +559,8 @@ def extract_predictions_from_tfsm(model_output):
559
  Helper function to extract predictions from TFSMLayer output.
560
  TFSMLayer often returns a dictionary with multiple outputs.
561
  """
562
- logging.info(f"Model output type: {type(model_output)}")
563
- logging.info(f"Model output keys: {model_output.keys() if isinstance(model_output, dict) else 'Not a dict'}")
564
 
565
  if isinstance(model_output, dict):
566
  # Try common output key names
@@ -568,22 +568,22 @@ def extract_predictions_from_tfsm(model_output):
568
 
569
  # First, log all available keys
570
  available_keys = list(model_output.keys())
571
- logging.info(f"Available output keys: {available_keys}")
572
 
573
  # Try to find the right output
574
  for key in possible_keys:
575
  if key in model_output:
576
- logging.info(f"Using output key: {key}")
577
  return model_output[key].numpy()
578
 
579
  # If no standard key found, use the first available key
580
  if available_keys:
581
  first_key = available_keys[0]
582
- logging.info(f"Using first available key: {first_key}")
583
  return model_output[first_key].numpy()
584
  else:
585
  raise ValueError("No output keys found in model response")
586
  else:
587
  # If it's not a dictionary, assume it's already the tensor we need
588
- logging.info("Model output is not a dictionary, using directly")
589
  return model_output.numpy() if hasattr(model_output, 'numpy') else np.array(model_output)
 
48
  save_image (bool, optional): Save the image with the boxes or not. Defaults to False.
49
  image_path (str, optional) : Full path and name of the output file to be saved.
50
  """
51
+ logging.debug(f"show_boxes: Input image type: {type(image)}")
52
 
53
  # Ensure image is a NumPy array of appropriate type for matplotlib
54
  if isinstance(image, Image.Image):
55
  image_to_plot = np.array(image.convert('L')) # Convert to grayscale NumPy array
56
+ logging.debug("show_boxes: Converted PIL Image to grayscale NumPy array for plotting.")
57
  elif isinstance(image, np.ndarray):
58
  if image.ndim == 3 and image.shape[2] in [3,4]: # RGB or RGBA NumPy array
59
  image_to_plot = np.mean(image, axis=2).astype(image.dtype) # Convert to grayscale
60
+ logging.debug("show_boxes: Converted multi-channel NumPy array to grayscale for plotting.")
61
  else: # Assume grayscale already
62
  image_to_plot = image
63
+ logging.debug("show_boxes: Image is already a grayscale NumPy array.")
64
  else:
65
  logging.error("show_boxes: Unsupported image format received.")
66
  image_to_plot = np.zeros((100,100), dtype=np.uint8) # Fallback to black image
 
253
  List of extracted and normalized 3-channel image patches, each with shape (height, width, 3). Only
254
  centroids that allow full window extraction within image bounds are used.
255
  """
256
+ logging.debug(f"prepare_classifier_input: Input panorama type: {type(panorama)}")
257
 
258
  # Convert input to standardized NumPy array format
259
  panorama_array = _convert_to_grayscale_array(panorama)
 
261
  # Ensure we have the correct dimensions
262
  if panorama_array.ndim == 2:
263
  H, W = panorama_array.shape
264
+ logging.debug("prepare_classifier_input: Working with 2D grayscale array.")
265
  elif panorama_array.ndim == 3:
266
  H, W, C = panorama_array.shape
267
  if C == 1:
268
  # Squeeze the single channel dimension for easier processing
269
  panorama_array = panorama_array.squeeze(axis=2)
270
  H, W = panorama_array.shape
271
+ logging.debug("prepare_classifier_input: Squeezed single channel dimension.")
272
  else:
273
  logging.error(f"prepare_classifier_input: Unexpected number of channels: {C}")
274
  raise ValueError(f"Expected 1 channel, got {C}")
 
369
  if panorama.mode in ['RGB', 'RGBA']:
370
  # Convert to grayscale
371
  panorama_array = np.array(panorama.convert('L'))
372
+ logging.debug("_convert_to_grayscale_array: Converted RGB/RGBA PIL Image to grayscale.")
373
  elif panorama.mode == 'L':
374
  panorama_array = np.array(panorama)
375
+ logging.debug("_convert_to_grayscale_array: Converted grayscale PIL Image to NumPy array.")
376
  else:
377
  # Handle other modes by converting to grayscale
378
  panorama_array = np.array(panorama.convert('L'))
379
+ logging.debug(f"_convert_to_grayscale_array: Converted PIL Image mode '{panorama.mode}' to grayscale.")
380
 
381
  elif isinstance(panorama, np.ndarray):
382
  if panorama.ndim == 2:
383
  # Already grayscale
384
  panorama_array = panorama.copy()
385
+ logging.debug("_convert_to_grayscale_array: Using existing 2D grayscale array.")
386
  elif panorama.ndim == 3:
387
  if panorama.shape[2] in [3, 4]: # RGB or RGBA
388
  # Convert to grayscale using luminance weights
 
390
  panorama_array = np.dot(panorama, [0.299, 0.587, 0.114]).astype(panorama.dtype)
391
  else: # RGBA
392
  panorama_array = np.dot(panorama[:, :, :3], [0.299, 0.587, 0.114]).astype(panorama.dtype)
393
+ logging.debug("_convert_to_grayscale_array: Converted multi-channel NumPy array to grayscale using luminance weights.")
394
  elif panorama.shape[2] == 1:
395
  # Already single channel
396
  panorama_array = panorama.copy()
397
+ logging.debug("_convert_to_grayscale_array: Using existing single-channel array.")
398
  else:
399
  raise ValueError(f"Unsupported number of channels: {panorama.shape[2]}")
400
  else:
 
498
  Classification results or None if error occurred
499
  """
500
  try:
501
+ logging.debug("Starting safe classification...")
502
 
503
  # Debug the input
504
  debug_classification_input(patches)
 
513
  for i, patch in enumerate(patches):
514
  if not patch.flags.c_contiguous:
515
  patch_clean = np.ascontiguousarray(patch)
516
+ logging.debug(f"Made patch {i} contiguous")
517
  else:
518
  patch_clean = patch
519
  patches_clean.append(patch_clean)
520
 
521
  # Call the actual classification function
522
+ logging.debug("Calling classification function...")
523
  result = classify_func(patches_clean, **kwargs)
524
+ logging.debug("Classification completed successfully")
525
 
526
  return result
527
 
 
559
  Helper function to extract predictions from TFSMLayer output.
560
  TFSMLayer often returns a dictionary with multiple outputs.
561
  """
562
+ logging.debug(f"Model output type: {type(model_output)}")
563
+ logging.debug(f"Model output keys: {model_output.keys() if isinstance(model_output, dict) else 'Not a dict'}")
564
 
565
  if isinstance(model_output, dict):
566
  # Try common output key names
 
568
 
569
  # First, log all available keys
570
  available_keys = list(model_output.keys())
571
+ logging.debug(f"Available output keys: {available_keys}")
572
 
573
  # Try to find the right output
574
  for key in possible_keys:
575
  if key in model_output:
576
+ logging.debug(f"Using output key: {key}")
577
  return model_output[key].numpy()
578
 
579
  # If no standard key found, use the first available key
580
  if available_keys:
581
  first_key = available_keys[0]
582
+ logging.debug(f"Using first available key: {first_key}")
583
  return model_output[first_key].numpy()
584
  else:
585
  raise ValueError("No output keys found in model response")
586
  else:
587
  # If it's not a dictionary, assume it's already the tensor we need
588
+ logging.debug("Model output is not a dictionary, using directly")
589
  return model_output.numpy() if hasattr(model_output, 'numpy') else np.array(model_output)