jovian commited on
Commit
20f63c0
·
1 Parent(s): 6256b10

two models

Browse files
Files changed (2) hide show
  1. app.py +119 -16
  2. model/best.pt +0 -3
app.py CHANGED
@@ -6,7 +6,7 @@ from sahi import AutoDetectionModel
6
  from PIL import Image
7
  import plotly.graph_objects as go
8
  import torch
9
- import spaces
10
 
11
  import os
12
  import shutil
@@ -18,40 +18,143 @@ import subprocess
18
 
19
  device = "cuda:0" if torch.cuda.is_available() else "cpu"
20
 
 
 
 
21
 
22
  class Detection:
23
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
24
  def __init__(self):
25
- # Set the model path and confidence threshold
26
- yolov8_model_path = "./model/best_100epochs_latest.pt" # Update to your model path
 
27
 
28
- # Initialize the AutoDetectionModel
29
- self.model = AutoDetectionModel.from_pretrained(
30
  model_type='yolov8',
31
- model_path=yolov8_model_path,
32
  confidence_threshold=0.3,
33
- device='cpu' # Change to 'cuda:0' if you are using a GPU
 
 
 
 
 
 
 
34
  )
35
 
36
  def detect_from_image(self, image):
37
- # Perform sliced prediction with SAHI
38
- results = get_sliced_prediction(
39
  image=image,
40
- detection_model=self.model,
41
  slice_height=256,
42
  slice_width=256,
43
- overlap_height_ratio=0.2,
44
- overlap_width_ratio=0.2,
45
  postprocess_type='NMS',
46
  postprocess_match_metric='IOU',
47
  postprocess_match_threshold=0.1,
48
  postprocess_class_agnostic=True,
49
  )
50
 
51
- # Retrieve COCO annotations
52
- coco_annotations = results.to_coco_annotations()
53
- return coco_annotations
 
 
 
 
 
 
 
 
 
54
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
55
  def draw_annotations(self, image, annotations):
56
  """Draw bounding boxes on the image based on COCO annotations using OpenCV."""
57
  # Define colors for each category in BGR (OpenCV uses BGR format)
@@ -217,7 +320,7 @@ def upload_image(image):
217
  """Process the uploaded image (if needed) and display it."""
218
  return image
219
 
220
- @spaces.GPU
221
  def apply_detection(image):
222
  """Run object detection on the uploaded image and return the annotated image."""
223
  # Convert image from PIL to NumPy array
 
6
  from PIL import Image
7
  import plotly.graph_objects as go
8
  import torch
9
+ #import spaces
10
 
11
  import os
12
  import shutil
 
18
 
19
  device = "cuda:0" if torch.cuda.is_available() else "cpu"
20
 
21
+ from torchvision.ops import box_iou
22
+
23
+
24
 
25
  class Detection:
26
 
27
+ # def __init__(self):
28
+ # # Set the model path and confidence threshold
29
+ # yolov8_model_path = "./model/train_model.pt" # Update to your model path
30
+ # #yolov8_model_path = "./model/best_100epochs_latest.pt" # Update to your model path
31
+
32
+
33
+ # # Initialize the AutoDetectionModel
34
+ # self.model = AutoDetectionModel.from_pretrained(
35
+ # model_type='yolov8',
36
+ # model_path=yolov8_model_path,
37
+ # confidence_threshold=0.3,
38
+ # device='cpu' # Change to 'cuda:0' if you are using a GPU
39
+ # )
40
+
41
+ # def detect_from_image(self, image):
42
+ # # Perform sliced prediction with SAHI
43
+ # results = get_sliced_prediction(
44
+ # image=image,
45
+ # detection_model=self.model,
46
+ # slice_height=256,
47
+ # slice_width=256,
48
+ # overlap_height_ratio=0.5,
49
+ # overlap_width_ratio=0.5,
50
+ # postprocess_type='NMS',
51
+ # postprocess_match_metric='IOU',
52
+ # postprocess_match_threshold=0.1,
53
+ # postprocess_class_agnostic=True,
54
+ # )
55
+
56
+ # # Retrieve COCO annotations
57
+ # coco_annotations = results.to_coco_annotations()
58
+ # return coco_annotations
59
+
60
+
61
+
62
  def __init__(self):
63
+ # Set the paths for the two YOLOv8 models
64
+ yolov8_model_path1 = "./model/train_model.pt" # Update to your model path
65
+ yolov8_model_path2 = "./model/best_100epochs_latest.pt" # Update to the second model path
66
 
67
+ # Initialize the AutoDetectionModels
68
+ self.model1 = AutoDetectionModel.from_pretrained(
69
  model_type='yolov8',
70
+ model_path=yolov8_model_path1,
71
  confidence_threshold=0.3,
72
+ device='cuda:0' # Change to 'cpu' if not using GPU
73
+ )
74
+
75
+ self.model2 = AutoDetectionModel.from_pretrained(
76
+ model_type='yolov8',
77
+ model_path=yolov8_model_path2,
78
+ confidence_threshold=0.3,
79
+ device='cuda:0'
80
  )
81
 
82
  def detect_from_image(self, image):
83
+ # Perform sliced prediction with both models
84
+ results1 = get_sliced_prediction(
85
  image=image,
86
+ detection_model=self.model1,
87
  slice_height=256,
88
  slice_width=256,
89
+ overlap_height_ratio=0.5,
90
+ overlap_width_ratio=0.5,
91
  postprocess_type='NMS',
92
  postprocess_match_metric='IOU',
93
  postprocess_match_threshold=0.1,
94
  postprocess_class_agnostic=True,
95
  )
96
 
97
+ results2 = get_sliced_prediction(
98
+ image=image,
99
+ detection_model=self.model2,
100
+ slice_height=256,
101
+ slice_width=256,
102
+ overlap_height_ratio=0.5,
103
+ overlap_width_ratio=0.5,
104
+ postprocess_type='NMS',
105
+ postprocess_match_metric='IOU',
106
+ postprocess_match_threshold=0.1,
107
+ postprocess_class_agnostic=True,
108
+ )
109
 
110
+ # Convert results to COCO annotations
111
+ annotations1 = results1.to_coco_annotations()
112
+ annotations2 = results2.to_coco_annotations()
113
+
114
+ # Combine results from both models
115
+ combined_annotations = self.combine_results(annotations1, annotations2)
116
+
117
+ return combined_annotations
118
+
119
+ def combine_results(self, annotations1, annotations2, iou_threshold=0.5):
120
+ """
121
+ Combine the results of two sets of annotations, keeping only the higher-confidence
122
+ prediction when IoU between two bounding boxes is above the threshold.
123
+
124
+ :param annotations1: COCO annotations from model 1
125
+ :param annotations2: COCO annotations from model 2
126
+ :param iou_threshold: IoU threshold to consider two boxes overlapping
127
+ :return: Combined annotations list
128
+ """
129
+ combined = annotations1.copy()
130
+
131
+ for ann2 in annotations2:
132
+ box2 = ann2['bbox']
133
+ conf2 = ann2['score']
134
+
135
+ keep = True
136
+ for ann1 in combined:
137
+ box1 = ann1['bbox']
138
+ conf1 = ann1['score']
139
+
140
+ # Compute IoU between boxes
141
+ box1_array = np.array([[box1[0], box1[1], box1[0] + box1[2], box1[1] + box1[3]]])
142
+ box2_array = np.array([[box2[0], box2[1], box2[0] + box2[2], box2[1] + box2[3]]])
143
+
144
+ iou = box_iou(torch.tensor(box1_array), torch.tensor(box2_array)).item()
145
+
146
+ # If IoU is high and model 2 confidence is lower, discard the annotation from model 2
147
+ if iou > iou_threshold and conf2 <= conf1:
148
+ keep = False
149
+ break
150
+
151
+ if keep:
152
+ combined.append(ann2)
153
+
154
+ return combined
155
+
156
+ #-----------------------------------------------------------------------------------------------------------------------
157
+
158
  def draw_annotations(self, image, annotations):
159
  """Draw bounding boxes on the image based on COCO annotations using OpenCV."""
160
  # Define colors for each category in BGR (OpenCV uses BGR format)
 
320
  """Process the uploaded image (if needed) and display it."""
321
  return image
322
 
323
+ #@spaces.GPU
324
  def apply_detection(image):
325
  """Run object detection on the uploaded image and return the annotated image."""
326
  # Convert image from PIL to NumPy array
model/best.pt DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:67424dbaf2d9c3f07f356a59c37187ef1a7b9f59ebabf77c5cb7f9cb9507f107
3
- size 38138560