kiurtis commited on
Commit
1e05c4e
·
1 Parent(s): 54cd55e

back to dummy model to check cpu

Browse files
Files changed (1) hide show
  1. tasks/image.py +27 -153
tasks/image.py CHANGED
@@ -1,46 +1,22 @@
1
  from fastapi import APIRouter
2
- print(1)
3
  from datetime import datetime
4
- print(2)
5
  from datasets import load_dataset
6
- print(3)
7
  import numpy as np
8
- print(4)
9
  from sklearn.metrics import accuracy_score, precision_score, recall_score
10
- print(5)
11
  import random
12
- print(6)
13
  import os
14
- print(7)
15
- from ultralytics import YOLO
16
- print(8)
17
 
18
  from .utils.evaluation import ImageEvaluationRequest
19
- print(9)
20
  from .utils.emissions import tracker, clean_emissions_data, get_space_info
21
- print(10)
22
 
23
  from dotenv import load_dotenv
24
- print(11)
25
  load_dotenv()
26
- print(12)
27
 
28
  router = APIRouter()
29
- print(13)
30
 
31
- import torch
32
-
33
- # Get CUDA version (the one PyTorch was compiled with)
34
- print("CUDA version:", torch.version.cuda)
35
-
36
- # Get cuDNN version
37
- print("cuDNN version:", torch.backends.cudnn.version())
38
-
39
- #MODEL_TYPE = "YOLOv11n"
40
- DESCRIPTION = f"YOLOv11n 1280 not quantisized model with batch 1 inference on TensorRT"
41
- print(14)
42
  ROUTE = "/image"
43
- print(15)
44
  def parse_boxes(annotation_string):
45
  """Parse multiple boxes from a single annotation string.
46
  Each box has 5 values: class_id, x_center, y_center, width, height"""
@@ -53,7 +29,7 @@ def parse_boxes(annotation_string):
53
  box = values[i+1:i+5]
54
  boxes.append(box)
55
  return boxes
56
- print(16)
57
  def compute_iou(box1, box2):
58
  """Compute Intersection over Union (IoU) between two YOLO format boxes."""
59
  # Convert YOLO format (x_center, y_center, width, height) to corners
@@ -82,7 +58,7 @@ def compute_iou(box1, box2):
82
  union = box1_area + box2_area - intersection
83
 
84
  return intersection / (union + 1e-6)
85
- print(17)
86
  def compute_max_iou(true_boxes, pred_box):
87
  """Compute maximum IoU between a predicted box and all true boxes"""
88
  max_iou = 0
@@ -90,17 +66,7 @@ def compute_max_iou(true_boxes, pred_box):
90
  iou = compute_iou(true_box, pred_box)
91
  max_iou = max(max_iou, iou)
92
  return max_iou
93
- print(18)
94
- def load_model(path_to_model, model_type="YOLO"):
95
- if model_type == "YOLO":
96
- model = YOLO(path_to_model)
97
- else:
98
- raise NotImplementedError
99
- return model
100
- print(19)
101
- def get_boxes_list(predictions):
102
- return [box.tolist() for box in predictions.boxes.xywhn]
103
- print(20)
104
  @router.post(ROUTE, tags=["Image Task"],
105
  description=DESCRIPTION)
106
  async def evaluate_image(request: ImageEvaluationRequest):
@@ -134,135 +100,44 @@ async def evaluate_image(request: ImageEvaluationRequest):
134
  # Update the code below to replace the random baseline with your model inference
135
  #--------------------------------------------------------------------------------------------
136
 
137
- import cv2
138
- import onnxruntime
139
- import matplotlib.pyplot as plt
140
-
141
- #PATH_TO_MODEL = 'models/best_YOLOv11n_1280.onnx'
142
- #PATH_TO_MODEL = 'models/best_yolov6n_1280.pt'
143
- #PATH_TO_MODEL = 'models/best_YOLOv11n_1280_real_half.onnx'
144
- PATH_TO_MODEL = 'models/best_YOLOv11n_1280.pt'
145
- INFERENCE_ENGINE_TYPE = 'pt'
146
- INPUT_SIZE = 1280
147
- N_TEST_BATCHES = 2
148
- BATCH_SIZE = 32 # Can be adjusted as needed
149
- print("PATH_TO_MODEL", PATH_TO_MODEL)
150
-
151
- print("Starting inference")
152
  predictions = []
153
  true_labels = []
154
  pred_boxes = []
155
  true_boxes_list = [] # List of lists, each inner list contains boxes for one image
156
 
157
- n_examples = len(test_dataset)
158
- n_boxes = []
159
- model = YOLO(PATH_TO_MODEL)
160
- print("PATH_TO_MODEL", PATH_TO_MODEL)
161
-
162
- # First pass - process annotations
163
- has_smoke_list = []
164
- annotations_list = []
165
- for i, example in enumerate(test_dataset):
166
- if i % 200 == 0:
167
- print(f"Processing annotations {i+1} of {n_examples}")
168
  annotation = example.get("annotations", "").strip()
169
- n_annotations = len(annotation.split("\n"))
170
- n_boxes.append(n_annotations)
171
  has_smoke = len(annotation) > 0
172
- has_smoke_list.append(has_smoke)
173
  true_labels.append(int(has_smoke))
174
- annotations_list.append(annotation)
175
- if i == (N_TEST_BATCHES+1)*BATCH_SIZE-1:
176
- #break
177
- pass
178
-
179
- all_preds = []
180
- all_scores = []
181
- all_binary_classifications = []
182
- # Second pass - batch predictions
183
- for i, batch_start in enumerate(range(0, n_examples, BATCH_SIZE)):
184
- batch_end = min(batch_start + BATCH_SIZE, n_examples)
185
- if i % 100 == 0:
186
- print(f"Processing batch {batch_start//BATCH_SIZE + 1} of {(n_examples + BATCH_SIZE - 1)//BATCH_SIZE}")
187
- print(f"Batch start: {batch_start}, Batch end: {batch_end}")
188
-
189
- # Get batch of images and pad if needed
190
- batch_images = []
191
- for j in range(batch_start, batch_end):
192
- batch_images.append(test_dataset[j]['image'])
193
-
194
- # Pad the last batch if needed
195
- if len(batch_images) < BATCH_SIZE:
196
- print(f"Padding last batch from {len(batch_images)} to {BATCH_SIZE} images")
197
- padding_needed = BATCH_SIZE - len(batch_images)
198
- # Duplicate the last image to fill the batch
199
- batch_images.extend([batch_images[-1]] * padding_needed)
200
 
201
- print(f"Running predictions on {model.device}")
202
- # Get predictions for batch
203
- results = model.predict(batch_images, imgsz=INPUT_SIZE#, device="cuda"
204
- )
205
 
206
- # Only process the actual examples (not padding)
207
- actual_results = results[:batch_end-batch_start]
208
- # Process each result in batch to extract predictions, scores and classifications
209
- batch_preds = []
210
- batch_scores = []
211
- batch_binary_classifications = []
212
-
213
- for result in actual_results:
214
- boxes = result.boxes
215
- xywhn = boxes.xywhn.tolist()
216
-
217
- if len(xywhn) > 0:
218
- # Get first box coordinates and confidence score
219
- batch_preds.append(xywhn[0])
220
- batch_scores.append(boxes.conf.tolist()[0])
221
- batch_binary_classifications.append(1)
222
- else:
223
- # No boxes detected
224
- batch_preds.append([])
225
- batch_scores.append([])
226
- batch_binary_classifications.append(0)
227
-
228
- all_preds += batch_preds
229
- all_scores += batch_scores
230
- all_binary_classifications += batch_binary_classifications
231
-
232
- print("Processing predictions")
233
-
234
- if i == N_TEST_BATCHES:
235
- from collections import Counter
236
- n_box_distr = Counter(n_boxes)
237
- print(n_box_distr)
238
- #break
239
- pass
240
- pred_boxes = []
241
-
242
- for idx in range(len(all_preds)):
243
- if has_smoke_list[idx]:
244
- # Parse true boxes
245
- image_true_boxes = parse_boxes(annotations_list[idx])
246
  true_boxes_list.append(image_true_boxes)
247
 
248
- # Process predicted boxes
249
- try:
250
- if len(all_preds[idx]) < 1:
251
- model_preds = [0, 0, 0, 0]
252
- else:
253
- model_preds = all_preds[idx]
254
- except:
255
- model_preds = [0, 0, 0, 0]
256
- pred_boxes.append(model_preds)
257
-
258
- print("Processing completed with last index", idx)
259
  #--------------------------------------------------------------------------------------------
260
  # YOUR MODEL INFERENCE STOPS HERE
261
  #--------------------------------------------------------------------------------------------
262
 
263
  # Stop tracking emissions
264
  emissions_data = tracker.stop_task()
265
- predictions = all_binary_classifications
266
  # Calculate classification metrics
267
  classification_accuracy = accuracy_score(true_labels, predictions)
268
  classification_precision = precision_score(true_labels, predictions)
@@ -270,14 +145,13 @@ async def evaluate_image(request: ImageEvaluationRequest):
270
 
271
  # Calculate mean IoU for object detection (only for images with smoke)
272
  # For each image, we compute the max IoU between the predicted box and all true boxes
273
- print("Calculating mean IoU")
274
  ious = []
275
  for true_boxes, pred_box in zip(true_boxes_list, pred_boxes):
276
  max_iou = compute_max_iou(true_boxes, pred_box)
277
  ious.append(max_iou)
278
 
279
  mean_iou = float(np.mean(ious)) if ious else 0.0
280
- print("Mean IoU calculated")
281
  # Prepare results dictionary
282
  results = {
283
  "username": username,
@@ -298,5 +172,5 @@ async def evaluate_image(request: ImageEvaluationRequest):
298
  "test_seed": request.test_seed
299
  }
300
  }
301
- print("Result returned")
302
  return results
 
1
  from fastapi import APIRouter
 
2
  from datetime import datetime
 
3
  from datasets import load_dataset
 
4
  import numpy as np
 
5
  from sklearn.metrics import accuracy_score, precision_score, recall_score
 
6
  import random
 
7
  import os
 
 
 
8
 
9
  from .utils.evaluation import ImageEvaluationRequest
 
10
  from .utils.emissions import tracker, clean_emissions_data, get_space_info
 
11
 
12
  from dotenv import load_dotenv
 
13
  load_dotenv()
 
14
 
15
  router = APIRouter()
 
16
 
17
+ DESCRIPTION = "Random Baseline"
 
 
 
 
 
 
 
 
 
 
18
  ROUTE = "/image"
19
+
20
  def parse_boxes(annotation_string):
21
  """Parse multiple boxes from a single annotation string.
22
  Each box has 5 values: class_id, x_center, y_center, width, height"""
 
29
  box = values[i+1:i+5]
30
  boxes.append(box)
31
  return boxes
32
+
33
  def compute_iou(box1, box2):
34
  """Compute Intersection over Union (IoU) between two YOLO format boxes."""
35
  # Convert YOLO format (x_center, y_center, width, height) to corners
 
58
  union = box1_area + box2_area - intersection
59
 
60
  return intersection / (union + 1e-6)
61
+
62
  def compute_max_iou(true_boxes, pred_box):
63
  """Compute maximum IoU between a predicted box and all true boxes"""
64
  max_iou = 0
 
66
  iou = compute_iou(true_box, pred_box)
67
  max_iou = max(max_iou, iou)
68
  return max_iou
69
+
 
 
 
 
 
 
 
 
 
 
70
  @router.post(ROUTE, tags=["Image Task"],
71
  description=DESCRIPTION)
72
  async def evaluate_image(request: ImageEvaluationRequest):
 
100
  # Update the code below to replace the random baseline with your model inference
101
  #--------------------------------------------------------------------------------------------
102
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
103
  predictions = []
104
  true_labels = []
105
  pred_boxes = []
106
  true_boxes_list = [] # List of lists, each inner list contains boxes for one image
107
 
108
+ for example in test_dataset:
109
+ # Parse true annotation (YOLO format: class_id x_center y_center width height)
 
 
 
 
 
 
 
 
 
110
  annotation = example.get("annotations", "").strip()
 
 
111
  has_smoke = len(annotation) > 0
 
112
  true_labels.append(int(has_smoke))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
113
 
114
+ # Make random classification prediction
115
+ pred_has_smoke = random.random() > 0.5
116
+ predictions.append(int(pred_has_smoke))
 
117
 
118
+ # If there's a true box, parse it and make random box prediction
119
+ if has_smoke:
120
+ # Parse all true boxes from the annotation
121
+ image_true_boxes = parse_boxes(annotation)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
122
  true_boxes_list.append(image_true_boxes)
123
 
124
+ # For baseline, make one random box prediction per image
125
+ # In a real model, you might want to predict multiple boxes
126
+ random_box = [
127
+ random.random(), # x_center
128
+ random.random(), # y_center
129
+ random.random() * 0.5, # width (max 0.5)
130
+ random.random() * 0.5 # height (max 0.5)
131
+ ]
132
+ pred_boxes.append(random_box)
133
+
 
134
  #--------------------------------------------------------------------------------------------
135
  # YOUR MODEL INFERENCE STOPS HERE
136
  #--------------------------------------------------------------------------------------------
137
 
138
  # Stop tracking emissions
139
  emissions_data = tracker.stop_task()
140
+
141
  # Calculate classification metrics
142
  classification_accuracy = accuracy_score(true_labels, predictions)
143
  classification_precision = precision_score(true_labels, predictions)
 
145
 
146
  # Calculate mean IoU for object detection (only for images with smoke)
147
  # For each image, we compute the max IoU between the predicted box and all true boxes
 
148
  ious = []
149
  for true_boxes, pred_box in zip(true_boxes_list, pred_boxes):
150
  max_iou = compute_max_iou(true_boxes, pred_box)
151
  ious.append(max_iou)
152
 
153
  mean_iou = float(np.mean(ious)) if ious else 0.0
154
+
155
  # Prepare results dictionary
156
  results = {
157
  "username": username,
 
172
  "test_seed": request.test_seed
173
  }
174
  }
175
+
176
  return results