tappyness1
commited on
Commit
·
264e65b
1
Parent(s):
a1027a2
default to side-by-side
Browse files- src/pred_analysis_STEE.py +0 -595
- src/st_image_tools.py +2 -2
src/pred_analysis_STEE.py
DELETED
|
@@ -1,595 +0,0 @@
|
|
| 1 |
-
import os
|
| 2 |
-
import re
|
| 3 |
-
import cv2
|
| 4 |
-
import time
|
| 5 |
-
|
| 6 |
-
import numpy as np
|
| 7 |
-
import pandas as pd
|
| 8 |
-
import xml.etree.ElementTree as ET
|
| 9 |
-
|
| 10 |
-
from pathlib import Path
|
| 11 |
-
from torchvision import transforms
|
| 12 |
-
from configparser import ConfigParser, ExtendedInterpolation
|
| 13 |
-
from ast import literal_eval
|
| 14 |
-
|
| 15 |
-
from src.models.model import Model
|
| 16 |
-
from src.models.eval.confusion_matrix import ConfusionMatrix
|
| 17 |
-
|
| 18 |
-
|
| 19 |
-
def generate_inference_from_img_folder(csv_file, model_cfg, img_folder, ckpt_file,
|
| 20 |
-
nms_thresh, conf_thresh, device="cuda" ,csv_path=None):
|
| 21 |
-
"""[Retrieve the inference information of the test images given a model checkpoint trained]
|
| 22 |
-
|
| 23 |
-
Parameters
|
| 24 |
-
----------
|
| 25 |
-
csv_file : [str]
|
| 26 |
-
[path of the csv file containing the information of the test images]
|
| 27 |
-
model_cfg : [str]
|
| 28 |
-
[path of the model config file to use, specific to the checkpoint file]
|
| 29 |
-
img_folder : [str]
|
| 30 |
-
[folder containing the images]
|
| 31 |
-
ckpt_file : [str]
|
| 32 |
-
[path of the model checkpoint file to use for model inference]
|
| 33 |
-
nms_thresh : [float]
|
| 34 |
-
[Non-maximum suppression threshold to use for the model inference, values between 0 to 1]
|
| 35 |
-
conf_thresh : [float]
|
| 36 |
-
[Confidence threshold to use for the model inference, values between 0 to 1]
|
| 37 |
-
device : str, optional
|
| 38 |
-
[device to use for inference, option: "cuda" or "cpu"], by default "cuda"
|
| 39 |
-
csv_path : [str], optional
|
| 40 |
-
[path to save the pandas.DataFrame output as a csv], by default None i.e. csv not generated
|
| 41 |
-
|
| 42 |
-
Returns
|
| 43 |
-
-------
|
| 44 |
-
df : [pandas.DataFrame]
|
| 45 |
-
[dataframe containing the inference information of the test images]
|
| 46 |
-
"""
|
| 47 |
-
|
| 48 |
-
pl_config = ConfigParser(interpolation=ExtendedInterpolation())
|
| 49 |
-
pl_config.read(model_cfg)
|
| 50 |
-
|
| 51 |
-
model_selected = Model(pl_config)
|
| 52 |
-
|
| 53 |
-
df_original = pd.read_csv(csv_file)
|
| 54 |
-
# Only perform inference on test images with at least 1 ground truth.
|
| 55 |
-
df_test = df_original[df_original['remarks_xml'] == 'Available xml file'].reset_index()
|
| 56 |
-
df_test = df_test[df_test['set_type'] == 'Test'].reset_index()
|
| 57 |
-
|
| 58 |
-
img_number = 0
|
| 59 |
-
prediction_info_list = []
|
| 60 |
-
for _,rows in df_test.iterrows():
|
| 61 |
-
img_file = rows["image_file_name"]
|
| 62 |
-
img_number += 1
|
| 63 |
-
inference_start_time = time.time()
|
| 64 |
-
img_file_path = os.path.join(img_folder,img_file)
|
| 65 |
-
|
| 66 |
-
# Perform inference on image with ckpt file with device either "cuda" or "cpu"
|
| 67 |
-
# img_inference = model_selected.inference(device='cpu', img_path=img_file_path, ckpt_path=ckpt_file)
|
| 68 |
-
img_inference = model_selected.inference(
|
| 69 |
-
device=device, img_path=img_file_path, ckpt_path=ckpt_file, nms_thresh=nms_thresh, conf_thresh=conf_thresh)
|
| 70 |
-
|
| 71 |
-
# Sieve out inference
|
| 72 |
-
predicted_boxes_unsorted = img_inference[0].tolist()
|
| 73 |
-
predicted_labels_unsorted = img_inference[1].tolist()
|
| 74 |
-
predicted_confidence_unsorted = img_inference[2].tolist()
|
| 75 |
-
|
| 76 |
-
# print(f"Pre Boxes: {predicted_boxes}")
|
| 77 |
-
# print(f"Pre Labels: {predicted_labels}")
|
| 78 |
-
# print(f"Pre Labels: {predicted_confidence}")
|
| 79 |
-
|
| 80 |
-
# Sorting input
|
| 81 |
-
predicted_boxes = [x for _,x in sorted(zip(predicted_confidence_unsorted,predicted_boxes_unsorted), reverse=True)]
|
| 82 |
-
predicted_labels = [x for _,x in sorted(zip(predicted_confidence_unsorted,predicted_labels_unsorted), reverse=True)]
|
| 83 |
-
predicted_confidence = sorted(predicted_confidence_unsorted, reverse=True)
|
| 84 |
-
|
| 85 |
-
# print(f"Post Boxes: {predicted_boxes}")
|
| 86 |
-
# print(f"Post Labels: {predicted_labels}")
|
| 87 |
-
# print(f"Post Labels: {predicted_confidence}")
|
| 88 |
-
|
| 89 |
-
predicted_boxes_int = []
|
| 90 |
-
for box in predicted_boxes:
|
| 91 |
-
box_int = [round(x) for x in box]
|
| 92 |
-
predicted_boxes_int.append(box_int)
|
| 93 |
-
|
| 94 |
-
# Prepare inputs for confusion matrix
|
| 95 |
-
cm_detections_list = []
|
| 96 |
-
for prediction in range(len(predicted_boxes)):
|
| 97 |
-
detection_list = predicted_boxes[prediction]
|
| 98 |
-
detection_list.append(predicted_confidence[prediction])
|
| 99 |
-
detection_list.append(predicted_labels[prediction])
|
| 100 |
-
cm_detections_list.append(detection_list)
|
| 101 |
-
|
| 102 |
-
# Re generate predicted boxes
|
| 103 |
-
predicted_boxes = [x for _,x in sorted(zip(predicted_confidence_unsorted,predicted_boxes_unsorted), reverse=True)]
|
| 104 |
-
|
| 105 |
-
inference_time_per_image = round(time.time() - inference_start_time, 2)
|
| 106 |
-
if img_number%100 == 0:
|
| 107 |
-
print(f'Performing inference on Image {img_number}: {img_file_path}')
|
| 108 |
-
print(f'Time taken for image: {inference_time_per_image}')
|
| 109 |
-
|
| 110 |
-
prediction_info = {
|
| 111 |
-
"image_file_path": img_file_path,
|
| 112 |
-
"image_file_name": img_file,
|
| 113 |
-
"number_of_predictions": len(predicted_boxes),
|
| 114 |
-
"predicted_boxes": predicted_boxes,
|
| 115 |
-
"predicted_boxes_int": predicted_boxes_int,
|
| 116 |
-
"predicted_labels": predicted_labels,
|
| 117 |
-
"predicted_confidence": predicted_confidence,
|
| 118 |
-
"cm_detections_list": cm_detections_list,
|
| 119 |
-
"inference_time": inference_time_per_image
|
| 120 |
-
}
|
| 121 |
-
prediction_info_list.append(prediction_info)
|
| 122 |
-
|
| 123 |
-
df = pd.DataFrame(prediction_info_list)
|
| 124 |
-
|
| 125 |
-
if csv_path is not None:
|
| 126 |
-
df.to_csv(csv_path, index=False)
|
| 127 |
-
print ("Dataframe saved as csv to " + csv_path)
|
| 128 |
-
|
| 129 |
-
return df
|
| 130 |
-
|
| 131 |
-
def get_gt_from_img_folder(csv_file, img_folder, xml_folder, names_file, map_start_index=1, csv_path=None):
|
| 132 |
-
"""[Retrieve the ground truth information of the test images]
|
| 133 |
-
|
| 134 |
-
Parameters
|
| 135 |
-
----------
|
| 136 |
-
csv_file : [str]
|
| 137 |
-
[path of the csv file containing the information of the test images]
|
| 138 |
-
img_folder : [str]
|
| 139 |
-
[folder containing the images]
|
| 140 |
-
xml_folder : [str]
|
| 141 |
-
[folder containing the xml files associated with the images]
|
| 142 |
-
names_file : [str]
|
| 143 |
-
[names file containing the class labels of interest]
|
| 144 |
-
map_start_index : int, optional
|
| 145 |
-
[attach a number to each class label listed in names file, starting from number given by map_start_index], by default 1
|
| 146 |
-
csv_path : [str], optional
|
| 147 |
-
[path to save the pandas.DataFrame output as a csv], by default None i.e. csv not generated
|
| 148 |
-
|
| 149 |
-
Returns
|
| 150 |
-
-------
|
| 151 |
-
df : [pandas.DataFrame]
|
| 152 |
-
[dataframe containing the ground truth information of the test images]
|
| 153 |
-
"""
|
| 154 |
-
|
| 155 |
-
df_original = pd.read_csv(csv_file)
|
| 156 |
-
|
| 157 |
-
# Only perform inference on test images with at least 1 ground truth.
|
| 158 |
-
df_test = df_original[df_original['remarks_xml'] == 'Available xml file'].reset_index()
|
| 159 |
-
df_test = df_test[df_test['set_type'] == 'Test'].reset_index()
|
| 160 |
-
|
| 161 |
-
# Create a dictionary to map numeric class as class labels
|
| 162 |
-
class_labels_dict = {}
|
| 163 |
-
with open(names_file) as f:
|
| 164 |
-
for index,line in enumerate(f):
|
| 165 |
-
idx = index + map_start_index
|
| 166 |
-
class_labels = line.splitlines()[0]
|
| 167 |
-
class_labels_dict[class_labels] = idx
|
| 168 |
-
|
| 169 |
-
gt_info_list = []
|
| 170 |
-
# for img_file in os.listdir(img_folder):
|
| 171 |
-
# if re.search(".jpg", img_file):
|
| 172 |
-
for _,rows in df_test.iterrows():
|
| 173 |
-
img_file = rows["image_file_name"]
|
| 174 |
-
# file_stem = Path(img_file_path).stem
|
| 175 |
-
|
| 176 |
-
# Get img tensor
|
| 177 |
-
img_file_path = os.path.join(img_folder,img_file)
|
| 178 |
-
img = cv2.imread(filename = img_file_path)
|
| 179 |
-
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
|
| 180 |
-
|
| 181 |
-
# Get associated xml file
|
| 182 |
-
file_stem = Path(img_file_path).stem
|
| 183 |
-
xml_file_path = xml_folder + file_stem + ".xml"
|
| 184 |
-
|
| 185 |
-
tree = ET.parse(xml_file_path)
|
| 186 |
-
root = tree.getroot()
|
| 187 |
-
|
| 188 |
-
for image_detail in root.findall('size'):
|
| 189 |
-
image_width = float(image_detail.find('width').text)
|
| 190 |
-
image_height = float(image_detail.find('height').text)
|
| 191 |
-
|
| 192 |
-
class_index_list = []
|
| 193 |
-
bb_list = []
|
| 194 |
-
truncated_list = []
|
| 195 |
-
occluded_list = []
|
| 196 |
-
for item in root.findall('object'):
|
| 197 |
-
if item.find('truncated') is not None:
|
| 198 |
-
truncated = int(item.find('truncated').text)
|
| 199 |
-
else:
|
| 200 |
-
truncated = 0
|
| 201 |
-
|
| 202 |
-
if item.find('occluded').text is not None:
|
| 203 |
-
occluded = int(item.find('occluded').text)
|
| 204 |
-
else:
|
| 205 |
-
occluded = 0
|
| 206 |
-
|
| 207 |
-
for bb_details in item.findall('bndbox'):
|
| 208 |
-
class_label = item.find('name').text
|
| 209 |
-
class_index = class_labels_dict[class_label]
|
| 210 |
-
xmin = float(bb_details.find('xmin').text)
|
| 211 |
-
ymin = float(bb_details.find('ymin').text)
|
| 212 |
-
xmax = float(bb_details.find('xmax').text)
|
| 213 |
-
ymax = float(bb_details.find('ymax').text)
|
| 214 |
-
|
| 215 |
-
class_index_list.append(class_index)
|
| 216 |
-
bb_list.append([xmin,ymin,xmax,ymax])
|
| 217 |
-
truncated_list.append(truncated)
|
| 218 |
-
occluded_list.append(occluded)
|
| 219 |
-
|
| 220 |
-
transform = A.Compose([
|
| 221 |
-
A.Resize(608,608),
|
| 222 |
-
ToTensor()
|
| 223 |
-
],
|
| 224 |
-
bbox_params=A.BboxParams(format='pascal_voc',
|
| 225 |
-
label_fields=['class_labels']),
|
| 226 |
-
)
|
| 227 |
-
|
| 228 |
-
augmented = transform(image=img, bboxes = bb_list, class_labels = class_index_list)
|
| 229 |
-
# img comes out as int, need to change to float.
|
| 230 |
-
img = augmented['image'].float()
|
| 231 |
-
gt_boxes = augmented['bboxes']
|
| 232 |
-
gt_boxes_list = [list(box) for box in gt_boxes]
|
| 233 |
-
gt_labels = augmented['class_labels']
|
| 234 |
-
|
| 235 |
-
gt_boxes_int = []
|
| 236 |
-
for box in gt_boxes:
|
| 237 |
-
box_int = [round(x) for x in box]
|
| 238 |
-
gt_boxes_int.append(box_int)
|
| 239 |
-
|
| 240 |
-
cm_gt_list = []
|
| 241 |
-
for gt in range(len(gt_boxes)):
|
| 242 |
-
gt_list = [gt_labels[gt]]
|
| 243 |
-
gt_list.extend(gt_boxes[gt])
|
| 244 |
-
cm_gt_list.append(gt_list)
|
| 245 |
-
|
| 246 |
-
# Calculate and Group by Size of Ground Truth
|
| 247 |
-
gt_area_list = []
|
| 248 |
-
gt_area_type = []
|
| 249 |
-
for gt_box in gt_boxes:
|
| 250 |
-
gt_area = (gt_box[3] - gt_box[1]) * (gt_box[2] - gt_box[0])
|
| 251 |
-
gt_area_list.append(gt_area)
|
| 252 |
-
|
| 253 |
-
if gt_area < 32*32:
|
| 254 |
-
area_type = "S"
|
| 255 |
-
gt_area_type.append(area_type)
|
| 256 |
-
elif gt_area < 96*96:
|
| 257 |
-
area_type = "M"
|
| 258 |
-
gt_area_type.append(area_type)
|
| 259 |
-
else:
|
| 260 |
-
area_type = "L"
|
| 261 |
-
gt_area_type.append(area_type)
|
| 262 |
-
|
| 263 |
-
gt_info = {
|
| 264 |
-
"image_file_path": img_file_path,
|
| 265 |
-
"image_file_name": img_file,
|
| 266 |
-
"image_width": image_width,
|
| 267 |
-
"image_height": image_height,
|
| 268 |
-
"number_of_gt": len(gt_boxes_list),
|
| 269 |
-
"gt_labels": gt_labels,
|
| 270 |
-
"gt_boxes": gt_boxes_list,
|
| 271 |
-
"gt_boxes_int": gt_boxes_int,
|
| 272 |
-
"cm_gt_list": cm_gt_list,
|
| 273 |
-
"gt_area_list": gt_area_list,
|
| 274 |
-
"gt_area_type": gt_area_type,
|
| 275 |
-
"truncated_list": truncated_list,
|
| 276 |
-
"occluded_list": occluded_list
|
| 277 |
-
}
|
| 278 |
-
gt_info_list.append(gt_info)
|
| 279 |
-
|
| 280 |
-
df = pd.DataFrame(gt_info_list)
|
| 281 |
-
|
| 282 |
-
if csv_path is not None:
|
| 283 |
-
df.to_csv(csv_path, index=False)
|
| 284 |
-
print ("Dataframe saved as csv to " + csv_path)
|
| 285 |
-
|
| 286 |
-
return df
|
| 287 |
-
|
| 288 |
-
def combine_gt_predictions(csv_file, img_folder, xml_folder, names_file, model_cfg, ckpt_file, csv_save_folder,
|
| 289 |
-
device="cuda", nms_threshold=0.1, confidence_threshold=0.7, iou_threshold=0.4, gt_statistics=True):
|
| 290 |
-
"""[Retrieve the combined inference and ground truth information of the test images]
|
| 291 |
-
|
| 292 |
-
Parameters
|
| 293 |
-
----------
|
| 294 |
-
csv_file : [str]
|
| 295 |
-
[path of the csv file containing the information of the test images]
|
| 296 |
-
img_folder : [str]
|
| 297 |
-
[folder containing the images]
|
| 298 |
-
xml_folder : [str]
|
| 299 |
-
[folder containing the xml files associated with the images]
|
| 300 |
-
names_file : [str]
|
| 301 |
-
[names file containing the class labels of interest]
|
| 302 |
-
model_cfg : [str]
|
| 303 |
-
[path of the model config file to use, specific to the checkpoint file]
|
| 304 |
-
ckpt_file : [str]
|
| 305 |
-
[path of the model checkpoint file to use for model inference]
|
| 306 |
-
csv_save_folder : [str]
|
| 307 |
-
[folder to save the generated csv files]
|
| 308 |
-
device : str, optional
|
| 309 |
-
[device to use for inference, option: "cuda" or "cpu"], by default "cuda"
|
| 310 |
-
nms_threshold : float, optional
|
| 311 |
-
[Non-maximum suppression threshold to use for the model inference, values between 0 to 1], by default 0.1
|
| 312 |
-
confidence_threshold : float, optional
|
| 313 |
-
[Confidence threshold to use for the model inference, values between 0 to 1], by default 0.7
|
| 314 |
-
iou_threshold : float, optional
|
| 315 |
-
[IOU threshold to use for identifying true positives from the predictions and ground truth], by default 0.4
|
| 316 |
-
gt_statistics : bool, optional
|
| 317 |
-
[option to generate the df_gt_analysis], by default True
|
| 318 |
-
|
| 319 |
-
Returns
|
| 320 |
-
-------
|
| 321 |
-
df_full : [pandas.DataFrame]
|
| 322 |
-
[dataframe containing the combined inference and ground truth information of the test images by image]
|
| 323 |
-
df_gt_analysis : pandas.DataFrame, optional
|
| 324 |
-
[dataframe containing the combined inference and ground truth information of the test images by ground truth]
|
| 325 |
-
"""
|
| 326 |
-
|
| 327 |
-
print(f"NMS Threshold: {nms_threshold}")
|
| 328 |
-
print(f"Confidence Threshold: {confidence_threshold}")
|
| 329 |
-
print(f"IOU Threshold: {iou_threshold}")
|
| 330 |
-
|
| 331 |
-
df_gt = get_gt_from_img_folder(
|
| 332 |
-
csv_file, img_folder, xml_folder, names_file)
|
| 333 |
-
print("Successful Generation of Ground Truth Information")
|
| 334 |
-
df_predictions = generate_inference_from_img_folder(
|
| 335 |
-
csv_file, model_cfg, img_folder, ckpt_file,
|
| 336 |
-
nms_thresh=nms_threshold, conf_thresh=confidence_threshold, device=device)
|
| 337 |
-
print("Successful Generation of Inference")
|
| 338 |
-
|
| 339 |
-
df_all = pd.merge(df_gt, df_predictions, how='left', on=["image_file_path", "image_file_name"])
|
| 340 |
-
print("Successful Merging")
|
| 341 |
-
|
| 342 |
-
class_labels_list = []
|
| 343 |
-
with open(names_file) as f:
|
| 344 |
-
for index,line in enumerate(f):
|
| 345 |
-
class_labels = line.splitlines()[0]
|
| 346 |
-
class_labels_list.append(class_labels)
|
| 347 |
-
|
| 348 |
-
combined_info_list = []
|
| 349 |
-
for _,rows in df_all.iterrows():
|
| 350 |
-
img_file = rows["image_file_name"]
|
| 351 |
-
predicted_boxes = rows["predicted_boxes"]
|
| 352 |
-
predicted_labels = rows["predicted_labels"]
|
| 353 |
-
predicted_confidence = rows["predicted_confidence"]
|
| 354 |
-
gt_boxes = rows["gt_boxes"]
|
| 355 |
-
gt_labels = rows["gt_labels"]
|
| 356 |
-
cm_gt_list = rows["cm_gt_list"]
|
| 357 |
-
cm_detections_list = rows["cm_detections_list"]
|
| 358 |
-
|
| 359 |
-
if rows["number_of_predictions"] == 0:
|
| 360 |
-
# Ground Truth Analysis
|
| 361 |
-
gt_summary_list = []
|
| 362 |
-
gt_match_list = []
|
| 363 |
-
gt_match_idx_list = []
|
| 364 |
-
gt_match_idx_conf_list = []
|
| 365 |
-
gt_match_idx_bb_list = []
|
| 366 |
-
for idx in range(len(gt_labels)):
|
| 367 |
-
gt_summary = "NO"
|
| 368 |
-
match = ["GT", idx, "-"]
|
| 369 |
-
match_idx = "-"
|
| 370 |
-
match_bb = "-"
|
| 371 |
-
gt_summary_list.append(gt_summary)
|
| 372 |
-
gt_match_list.append(tuple(match))
|
| 373 |
-
gt_match_idx_list.append(match_idx)
|
| 374 |
-
gt_match_idx_conf_list.append(match_idx)
|
| 375 |
-
gt_match_idx_bb_list.append(match_bb)
|
| 376 |
-
|
| 377 |
-
combined_info = {
|
| 378 |
-
"image_file_name": img_file,
|
| 379 |
-
"number_of_predictions_conf": [],
|
| 380 |
-
"predicted_labels_conf": [],
|
| 381 |
-
"predicted_confidence_conf": [],
|
| 382 |
-
"num_matches": [],
|
| 383 |
-
"num_mismatch": [],
|
| 384 |
-
"labels_hit": [],
|
| 385 |
-
"pairs_mislabel_gt_prediction": [],
|
| 386 |
-
"gt_match_idx_list": gt_match_idx_list,
|
| 387 |
-
"gt_match_idx_conf_list": gt_match_idx_conf_list,
|
| 388 |
-
"gt_match_idx_bb_list": gt_match_idx_bb_list,
|
| 389 |
-
"prediction_match": [],
|
| 390 |
-
"gt_analysis": gt_summary_list,
|
| 391 |
-
"prediction_analysis": [],
|
| 392 |
-
"gt_match": gt_match_list
|
| 393 |
-
}
|
| 394 |
-
|
| 395 |
-
else:
|
| 396 |
-
|
| 397 |
-
# Generate Confusion Matrix with their corresponding matches
|
| 398 |
-
CM = ConfusionMatrix(
|
| 399 |
-
num_classes=len(class_labels_list)+1,
|
| 400 |
-
CONF_THRESHOLD = confidence_threshold,
|
| 401 |
-
IOU_THRESHOLD = iou_threshold)
|
| 402 |
-
|
| 403 |
-
matching_boxes = CM.process_batch(
|
| 404 |
-
detections=np.asarray(cm_detections_list),
|
| 405 |
-
labels=np.asarray(cm_gt_list),
|
| 406 |
-
return_matches=True)
|
| 407 |
-
|
| 408 |
-
predicted_confidence_count = len([confidence for confidence in predicted_confidence if confidence > confidence_threshold])
|
| 409 |
-
predicted_confidence_round = [round(confidence, 4) for confidence in predicted_confidence]
|
| 410 |
-
|
| 411 |
-
predicted_confidence_conf = predicted_confidence_round[:predicted_confidence_count]
|
| 412 |
-
predicted_labels_conf = predicted_labels[:predicted_confidence_count]
|
| 413 |
-
predicted_boxes_conf = predicted_boxes[:predicted_confidence_count]
|
| 414 |
-
|
| 415 |
-
number_of_predictions_conf = len(predicted_labels_conf)
|
| 416 |
-
|
| 417 |
-
match_correct_list = []
|
| 418 |
-
match_wrong_list = []
|
| 419 |
-
gt_matched_idx_dict = {}
|
| 420 |
-
predicted_matched_idx_dict = {}
|
| 421 |
-
gt_mismatch_idx_dict = {}
|
| 422 |
-
predicted_mismatch_idx_dict = {}
|
| 423 |
-
labels_hit = []
|
| 424 |
-
pairs_mislabel_gt_prediction = []
|
| 425 |
-
|
| 426 |
-
for match in matching_boxes:
|
| 427 |
-
gt_idx = int(match[0])
|
| 428 |
-
predicted_idx = int(match[1])
|
| 429 |
-
iou = round(match[2], 4)
|
| 430 |
-
match = [gt_idx, predicted_idx, iou]
|
| 431 |
-
|
| 432 |
-
if gt_labels[gt_idx] == predicted_labels_conf[predicted_idx]:
|
| 433 |
-
match_correct_list.append(match)
|
| 434 |
-
gt_matched_idx_dict[gt_idx] = match
|
| 435 |
-
predicted_matched_idx_dict[predicted_idx] = match
|
| 436 |
-
labels_hit.append(gt_labels[gt_idx])
|
| 437 |
-
else:
|
| 438 |
-
match_wrong_list.append(match)
|
| 439 |
-
gt_mismatch_idx_dict[gt_idx] = match
|
| 440 |
-
predicted_mismatch_idx_dict[predicted_idx] = match
|
| 441 |
-
pairs_mislabel_gt_prediction.append(
|
| 442 |
-
[gt_labels[gt_idx],predicted_labels_conf[predicted_idx]])
|
| 443 |
-
|
| 444 |
-
# Ground Truth Analysis
|
| 445 |
-
gt_summary_list = []
|
| 446 |
-
gt_match_list = []
|
| 447 |
-
gt_match_idx_list = []
|
| 448 |
-
gt_match_idx_conf_list = []
|
| 449 |
-
gt_match_idx_bb_list = []
|
| 450 |
-
for idx in range(len(gt_labels)):
|
| 451 |
-
if idx in gt_matched_idx_dict.keys():
|
| 452 |
-
gt_summary = "MATCH"
|
| 453 |
-
match = gt_matched_idx_dict[idx]
|
| 454 |
-
match_idx = predicted_labels_conf[match[1]]
|
| 455 |
-
match_conf = predicted_confidence_conf[match[1]]
|
| 456 |
-
match_bb = predicted_boxes_conf[match[1]]
|
| 457 |
-
elif idx in gt_mismatch_idx_dict.keys():
|
| 458 |
-
gt_summary = "MISMATCH"
|
| 459 |
-
match = gt_mismatch_idx_dict[idx]
|
| 460 |
-
match_idx = predicted_labels_conf[match[1]]
|
| 461 |
-
match_conf = predicted_confidence_conf[match[1]]
|
| 462 |
-
match_bb = predicted_boxes_conf[match[1]]
|
| 463 |
-
else:
|
| 464 |
-
gt_summary = "NO"
|
| 465 |
-
match = ["GT", idx, "-"]
|
| 466 |
-
match_idx = "-"
|
| 467 |
-
match_conf = "-"
|
| 468 |
-
match_bb = "-"
|
| 469 |
-
gt_summary_list.append(gt_summary)
|
| 470 |
-
gt_match_list.append(tuple(match))
|
| 471 |
-
gt_match_idx_list.append(match_idx)
|
| 472 |
-
gt_match_idx_conf_list.append(match_conf)
|
| 473 |
-
gt_match_idx_bb_list.append(match_bb)
|
| 474 |
-
|
| 475 |
-
# Prediction Analysis
|
| 476 |
-
prediction_summary_list = []
|
| 477 |
-
prediction_match_list = []
|
| 478 |
-
for idx in range(len(predicted_labels_conf)):
|
| 479 |
-
if idx in predicted_matched_idx_dict.keys():
|
| 480 |
-
prediction_summary = "MATCH"
|
| 481 |
-
match = predicted_matched_idx_dict[idx]
|
| 482 |
-
elif idx in predicted_mismatch_idx_dict.keys():
|
| 483 |
-
prediction_summary = "MISMATCH"
|
| 484 |
-
match = predicted_mismatch_idx_dict[idx]
|
| 485 |
-
else:
|
| 486 |
-
prediction_summary = "NO"
|
| 487 |
-
match = [idx, "P", "-"]
|
| 488 |
-
prediction_summary_list.append(prediction_summary)
|
| 489 |
-
prediction_match_list.append(tuple(match))
|
| 490 |
-
|
| 491 |
-
combined_info = {
|
| 492 |
-
"image_file_name": img_file,
|
| 493 |
-
"number_of_predictions_conf": number_of_predictions_conf,
|
| 494 |
-
"predicted_labels_conf": predicted_labels_conf,
|
| 495 |
-
"predicted_confidence_conf": predicted_confidence_conf,
|
| 496 |
-
"num_matches": len(match_correct_list),
|
| 497 |
-
"num_mismatch": len(match_wrong_list),
|
| 498 |
-
"labels_hit": labels_hit,
|
| 499 |
-
"pairs_mislabel_gt_prediction": pairs_mislabel_gt_prediction,
|
| 500 |
-
"gt_match_idx_list": gt_match_idx_list,
|
| 501 |
-
"gt_match_idx_conf_list": gt_match_idx_conf_list,
|
| 502 |
-
"gt_match_idx_bb_list": gt_match_idx_bb_list,
|
| 503 |
-
"gt_match": gt_match_list,
|
| 504 |
-
"prediction_match": prediction_match_list,
|
| 505 |
-
"gt_analysis": gt_summary_list,
|
| 506 |
-
"prediction_analysis": prediction_summary_list
|
| 507 |
-
}
|
| 508 |
-
|
| 509 |
-
combined_info_list.append(combined_info)
|
| 510 |
-
|
| 511 |
-
df_combined = pd.DataFrame(combined_info_list)
|
| 512 |
-
|
| 513 |
-
df_full = pd.merge(df_all, df_combined , how='left', on=["image_file_name"])
|
| 514 |
-
|
| 515 |
-
csv_path_combined = f"{csv_save_folder}df_inference_details_nms_{nms_threshold}_conf_{confidence_threshold}_iou_{iou_threshold}.csv"
|
| 516 |
-
|
| 517 |
-
df_full.to_csv(csv_path_combined, index=False)
|
| 518 |
-
print ("Dataframe saved as csv to " + csv_path_combined)
|
| 519 |
-
|
| 520 |
-
if gt_statistics:
|
| 521 |
-
print("Generating Statistics for Single Ground Truth")
|
| 522 |
-
csv_path_gt = f"{csv_save_folder}df_gt_details_nms_{nms_threshold}_conf_{confidence_threshold}_iou_{iou_threshold}.csv"
|
| 523 |
-
df_gt_analysis = __get_single_gt_analysis(csv_output=csv_path_gt, df_input=df_full)
|
| 524 |
-
|
| 525 |
-
return df_full, df_gt_analysis
|
| 526 |
-
|
| 527 |
-
else:
|
| 528 |
-
return df_full
|
| 529 |
-
|
| 530 |
-
def __get_single_gt_analysis(csv_output, df_input=None,csv_input=None):
|
| 531 |
-
|
| 532 |
-
if df_input is None:
|
| 533 |
-
df_gt = pd.read_csv(csv_input)
|
| 534 |
-
|
| 535 |
-
# Apply literal eval of columns containing information on Ground Truth
|
| 536 |
-
df_gt.gt_labels = df_gt.gt_labels.apply(literal_eval)
|
| 537 |
-
df_gt.gt_boxes = df_gt.gt_boxes.apply(literal_eval)
|
| 538 |
-
df_gt.gt_boxes_int = df_gt.gt_boxes_int.apply(literal_eval)
|
| 539 |
-
df_gt.gt_area_list = df_gt.gt_area_list.apply(literal_eval)
|
| 540 |
-
df_gt.gt_area_type = df_gt.gt_area_type.apply(literal_eval)
|
| 541 |
-
df_gt.truncated_list = df_gt.truncated_list.apply(literal_eval)
|
| 542 |
-
df_gt.occluded_list = df_gt.occluded_list.apply(literal_eval)
|
| 543 |
-
df_gt.gt_match_idx_list = df_gt.gt_match_idx_list.apply(literal_eval)
|
| 544 |
-
df_gt.gt_match_idx_conf_list = df_gt.gt_match_idx_conf_list.apply(literal_eval)
|
| 545 |
-
df_gt.gt_match_idx_bb_list = df_gt.gt_match_idx_bb_list.apply(literal_eval)
|
| 546 |
-
df_gt.gt_match = df_gt.gt_match.apply(literal_eval)
|
| 547 |
-
df_gt.gt_analysis = df_gt.gt_analysis.apply(literal_eval)
|
| 548 |
-
|
| 549 |
-
else:
|
| 550 |
-
df_gt = df_input
|
| 551 |
-
|
| 552 |
-
gt_info_list = []
|
| 553 |
-
for _,rows in df_gt.iterrows():
|
| 554 |
-
# print(rows["image_file_name"])
|
| 555 |
-
for idx in range(rows["number_of_gt"]):
|
| 556 |
-
df_gt_image_dict = {
|
| 557 |
-
"GT_Image": rows["image_file_name"],
|
| 558 |
-
"GT_Label": rows["gt_labels"][idx],
|
| 559 |
-
"GT_Boxes": rows["gt_boxes"][idx],
|
| 560 |
-
"GT_Boxes_Int": rows["gt_boxes_int"][idx],
|
| 561 |
-
"GT_Area": rows["gt_area_list"][idx],
|
| 562 |
-
"GT_Area_Type": rows["gt_area_type"][idx],
|
| 563 |
-
"Truncated": rows["truncated_list"][idx],
|
| 564 |
-
"Occluded": rows["occluded_list"][idx],
|
| 565 |
-
"GT_Match": rows["gt_match"][idx],
|
| 566 |
-
"IOU": rows["gt_match"][idx][2],
|
| 567 |
-
"GT_Match_IDX": rows["gt_match_idx_list"][idx],
|
| 568 |
-
"GT_Confidence_IDX": rows["gt_match_idx_conf_list"][idx],
|
| 569 |
-
"GT_Predicted_Boxes_IDX": rows["gt_match_idx_bb_list"][idx],
|
| 570 |
-
"GT_Analysis": rows["gt_analysis"][idx]
|
| 571 |
-
}
|
| 572 |
-
gt_info_list.append(df_gt_image_dict)
|
| 573 |
-
|
| 574 |
-
df_final = pd.DataFrame(gt_info_list)
|
| 575 |
-
df_final = df_final.reset_index(drop=True)
|
| 576 |
-
|
| 577 |
-
df_final.to_csv(csv_output, index=False)
|
| 578 |
-
print ("Dataframe saved as csv to " + csv_output)
|
| 579 |
-
|
| 580 |
-
return df_final
|
| 581 |
-
|
| 582 |
-
if __name__ == '__main__':
|
| 583 |
-
|
| 584 |
-
combine_gt_predictions(
|
| 585 |
-
csv_file="/polyaxon-data/workspace/stee/voc_image_annotations_batch123.csv",
|
| 586 |
-
img_folder="/polyaxon-data/workspace/stee/data_batch123",
|
| 587 |
-
xml_folder="/polyaxon-data/workspace/stee/data_batch123/Annotations/",
|
| 588 |
-
names_file="/polyaxon-data/workspace/stee/data_batch123/obj.names",
|
| 589 |
-
model_cfg="cfg/cfg_frcn.ini",
|
| 590 |
-
ckpt_file="/polyaxon-data/workspace/stee/andy/epoch=99-step=61899.ckpt",
|
| 591 |
-
csv_save_folder="/polyaxon-data/workspace/stee/andy/generation/",
|
| 592 |
-
nms_threshold=0.9,
|
| 593 |
-
confidence_threshold=0.3,
|
| 594 |
-
iou_threshold=0.4,
|
| 595 |
-
gt_statistics=False)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
src/st_image_tools.py
CHANGED
|
@@ -279,7 +279,7 @@ class ImageTool:
|
|
| 279 |
masked_img = np.where(pred_mask[...,None], colour, img_pred)
|
| 280 |
masked_img = masked_img.astype(np.uint8)
|
| 281 |
|
| 282 |
-
img_pred = cv2.addWeighted(img_pred, 0.
|
| 283 |
|
| 284 |
def put_text_ina_mask(output, img):
|
| 285 |
|
|
@@ -319,7 +319,7 @@ class ImageTool:
|
|
| 319 |
|
| 320 |
return img
|
| 321 |
|
| 322 |
-
img_gt = cv2.addWeighted(img_gt, 0.
|
| 323 |
|
| 324 |
for output in gt_outputs:
|
| 325 |
img_gt = put_text_ina_mask(output, img_gt)
|
|
|
|
| 279 |
masked_img = np.where(pred_mask[...,None], colour, img_pred)
|
| 280 |
masked_img = masked_img.astype(np.uint8)
|
| 281 |
|
| 282 |
+
img_pred = cv2.addWeighted(img_pred, 0.7, masked_img, 0.3, 0)
|
| 283 |
|
| 284 |
def put_text_ina_mask(output, img):
|
| 285 |
|
|
|
|
| 319 |
|
| 320 |
return img
|
| 321 |
|
| 322 |
+
img_gt = cv2.addWeighted(img_gt, 0.7, masked_img, 0.3,0)
|
| 323 |
|
| 324 |
for output in gt_outputs:
|
| 325 |
img_gt = put_text_ina_mask(output, img_gt)
|