Spaces:
Sleeping
Sleeping
File size: 6,780 Bytes
f1640c3 a67ea41 5fe3fb1 56df201 4b06494 6f728c8 ae2089c 5fe3fb1 6f728c8 5fe3fb1 aef7d7f ae2089c 5fe3fb1 4aa953a aef7d7f 4b06494 a67ea41 aef7d7f efacbd3 aef7d7f efacbd3 9858610 5fe3fb1 ae2089c aef7d7f 659a4c2 aef7d7f ae2089c 4b06494 8f7b2c9 ab90404 8f7b2c9 ae2089c 8f7b2c9 ab90404 8f7b2c9 ab90404 8f7b2c9 ab90404 8f7b2c9 ab90404 8f7b2c9 ab90404 ae2089c 4b06494 5fe3fb1 ab90404 ae2089c ab90404 efacbd3 ab90404 4f65fde ab90404 ae2089c aef7d7f ae2089c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 |
import urllib.request
import tempfile
## Urls and model variables that might change.
## If changing any of these, think about other places in repos where they might need changing (e.g. weights url inside config file).
OPTIMAL_NMS_THRESHOLD = 0.7
model_page = "https://huggingface.co/TZTestAnalysis/final_tz_segmentor"
_model_config_url = model_page + "/resolve/main/final_model_config.yaml"
MODEL_VERSION = "v1.0"
discussion_url = 'https://huggingface.co/spaces/TZTestAnalysis/OrchAId/discussions'
github_repo_url = 'https://github.com/JATamura/TZSegmenting'
def get_set_up():
import torch
TORCH_VERSION = ".".join(torch.__version__.split(".")[:2])
CUDA_VERSION = torch.__version__.split("+")[-1]
print("torch: ", TORCH_VERSION, "; cuda: ", CUDA_VERSION)
print(f'GPU available: {torch.cuda.is_available()}')
print(torch.cuda.get_device_capability())
# print("detectron2:", detectron2.__version__)
def load_model(using_final_model: bool = True):
"""
Load and configure a Detectron2 model predictor. The method creates a configuration
object, merges it with a specified configuration file fetched from a remote URL,
and initializes the model using the `DefaultPredictor` from Detectron2. The model
will be set up to run on either a GPU or CPU depending on the system's capabilities.
:param using_final_model: A flag to indicate whether the final model should be used with
specific configurations. When this is set to True, adjustments are made to suppress
settings that are relevant during training but cause runtime errors during inference.
This includes disabling certain loss calculations that depend on training data.
:return: A Detectron2 predictor object configured and ready for inference.
"""
# return None
import torch
from detectron2.engine import DefaultPredictor
from detectron2.config import get_cfg
## define relevant parameters
cfg = get_cfg()
with tempfile.NamedTemporaryFile(suffix=".yaml") as tmp:
print(tmp.name)
urllib.request.urlretrieve(_model_config_url, filename=tmp.name)
cfg.merge_from_file(tmp.name)
if not torch.cuda.is_available():
cfg.MODEL.DEVICE = "cpu"
print('No GPU available, using CPU')
else:
cfg.MODEL.DEVICE = 'cuda'
print('Using GPU')
if using_final_model:
## when rerouting to use the final model (final_tz_segmentor) USE_FED_LOSS has to be set to False
## this setting requires the training data to calculate class imbalance that the app will not have access to and cause a runtime error
## some messages will appear when using the model that certain weights are not being used
## but these are used during training and not inference and shouldn't affect the model performance
## code below
cfg.MODEL.ROI_BOX_HEAD.USE_FED_LOSS = False
predictor = DefaultPredictor(cfg)
return predictor
def mask_nms(masks, scores, nms_threshold=OPTIMAL_NMS_THRESHOLD):
"""
Runs class agnostic NMS on masks/segmentations instead of the bounding boxes.
:param masks: (list float) List of coordinates that make up the mask output from the model.
:param scores: (list float) List of corresponding confidence scores given to each mask.
:param nms_threshold: (float) Threshold to apply mask-based class agnostic NMS.
:return masks_kept (list float): List of masks kept after applying NMS.
"""
import supervision as sv
from shapely.geometry.polygon import Polygon
polygons = []
for mask in masks:
contour = sv.mask_to_polygons(mask)
if len(contour) > 0:
polygons.append(Polygon(contour[0]))
else:
polygons.append(Polygon([]))
order = sorted(range(len(scores)), key=lambda i: scores[i], reverse=True)
masks_kept = []
while order:
i = order.pop(0)
masks_kept.append(i)
for j in order:
# Calculate the IoU between the two polygons
intersection = polygons[i].intersection(polygons[j]).area
union = polygons[i].union(polygons[j]).area
iou = intersection / union
# Remove masks with IoU greater than the threshold
if iou > nms_threshold:
order.remove(j)
return masks_kept
def apply_nms(prediction, mask=False, cls_agnostic_nms=OPTIMAL_NMS_THRESHOLD):
"""
Applies Non-Maximum Suppression (NMS) to filter redundant bounding boxes
from the prediction output produced by an object detection model. The method is compatible
with instances containing bounding boxes, scores, classes, and optionally mask predictions.
The NMS operation is performed based on a specific IoU threshold, configurable via the
`cls_agnostic_nms` parameter. If mask mode is enabled, a mask-based NMS step will be applied
to filter out redundant masks among the predictions.
:param prediction: The dictionary containing detection results, where the
"instances" key points to an `Instances` object. This object should
contain attributes including bounding boxes, scores, predicted classes,
and optionally predicted masks.
:type prediction: Dict[str, Instances]
:param mask: A boolean flag indicating whether NMS should additionally be
applied to instance masks. Defaults to ``False``.
:type mask: bool
:param cls_agnostic_nms: An IoU threshold for NMS, reflecting the
level of overlap above which boxes are considered for suppression.
Defaults to ``"""
from torchvision.ops import nms
from detectron2.structures import Instances
print(f'applying nms with threshold {cls_agnostic_nms} and mask {mask}... \n')
if mask:
# print(prediction["instances"].pred_masks)
# print(prediction["instances"].pred_masks.cpu())
nms_indices = mask_nms(prediction["instances"].pred_masks.cpu().numpy(),
prediction["instances"]._fields["scores"], cls_agnostic_nms)
else:
nms_indices = nms(prediction["instances"].pred_boxes.tensor,
prediction["instances"].scores, cls_agnostic_nms)
pred = {"instances": Instances(image_size=prediction["instances"].image_size,
pred_boxes=prediction["instances"].pred_boxes[nms_indices],
scores=prediction["instances"].scores[nms_indices],
pred_classes=prediction["instances"].pred_classes[nms_indices],
pred_masks=prediction["instances"].pred_masks[nms_indices])}
return pred
if __name__ == '__main__':
# get_set_up()
load_model()
|