nn commited on
Commit
893ac60
·
verified ·
1 Parent(s): b7703f2

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +27 -30
app.py CHANGED
@@ -39,11 +39,9 @@ def read_yolo_boxes(file_path):
39
  with open(file_path, 'r') as f:
40
  for line in f:
41
  parts = line.strip().split()
42
- class_id = int(parts[0])
43
- if COCO_CLASSES[class_id] != 'traffic light':
44
- class_name = COCO_CLASSES[class_id]
45
- x, y, w, h = map(float, parts[1:5])
46
- boxes.append((class_name, x, y, w, h))
47
  return boxes
48
 
49
  def yolo_to_pixel_coord(x, y, img_width, img_height):
@@ -88,20 +86,18 @@ def plot_boxes_and_segment(image, yolo_boxes, segment, img_width, img_height, th
88
  labels = {'intersecting': 'Intersecting Box', 'obstructed': 'Obstructed Box', 'not touching': 'Non-interacting Box'}
89
 
90
  for yolo_box in yolo_boxes:
91
- class_name, x_center, y_center, width, height = yolo_box
92
- if class_name != 'traffic light':
93
- x1, y1, x2, y2 = yolo_to_pixel_coords(x_center, y_center, width, height, img_width, img_height)
94
- relationship = box_segment_relationship(yolo_box, segment, img_width, img_height, threshold)
95
- color = colors[relationship]
96
- label = labels[relationship]
97
- ax.add_patch(plt.Rectangle((x1, y1), x2-x1, y2-y1, fill=False, edgecolor=color, linewidth=2, label=label))
98
 
99
  ax.legend()
100
  ax.axis('off')
101
  plt.tight_layout()
102
  return fig
103
 
104
-
105
  # COCO classes
106
  COCO_CLASSES = [
107
  'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light',
@@ -181,16 +177,14 @@ def detect_objects(image, rail_segment):
181
 
182
  results = []
183
  for class_name, x, y, w, h in yolo_boxes:
184
- if class_name != 'traffic light':
185
- result = box_segment_relationship((0, x, y, w, h), rail_segment, img_width, img_height, threshold)
186
- results.append(f"{class_name} at ({x:.2f}, {y:.2f}) is {result} the segment.")
187
 
188
  os.remove(temp_image_path)
189
  os.remove(label_path)
190
 
191
  return fig, "\n".join(results), yolo_boxes
192
 
193
-
194
  def process_video(video_path, rail_segment, frame_skip=15):
195
  cap = cv2.VideoCapture(video_path)
196
  if not cap.isOpened():
@@ -220,30 +214,33 @@ def process_video(video_path, rail_segment, frame_skip=15):
220
 
221
  processed_count += 1
222
 
 
223
  pil_frame = Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
224
 
 
225
  _, _, yolo_boxes = detect_objects(pil_frame, rail_segment)
226
 
 
227
  pixel_segment = convert_segment_to_pixel(rail_segment, width, height)
228
  pts = np.array(list(zip(pixel_segment[::2], pixel_segment[1::2])), np.int32)
229
  pts = pts.reshape((-1, 1, 2))
230
  cv2.polylines(frame, [pts], True, (0, 0, 255), 2)
231
 
 
232
  for box in yolo_boxes:
233
  class_name, x, y, w, h = box
234
- if class_name != 'traffic light':
235
- relationship = box_segment_relationship((0, x, y, w, h), rail_segment, width, height, threshold)
236
- x1, y1, x2, y2 = yolo_to_pixel_coords(x, y, w, h, width, height)
237
-
238
- if relationship == "intersecting":
239
- color = (0, 0, 255) # Red for intersecting
240
- elif relationship == "obstructed":
241
- color = (0, 255, 255) # Yellow for obstructed
242
- else:
243
- color = (0, 255, 0) # Green for not touching
244
-
245
- cv2.rectangle(frame, (x1, y1), (x2, y2), color, 2)
246
- cv2.putText(frame, f"{class_name} ({relationship})", (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.9, color, 2)
247
 
248
  out.write(frame)
249
 
 
39
  with open(file_path, 'r') as f:
40
  for line in f:
41
  parts = line.strip().split()
42
+ class_name = COCO_CLASSES[int(parts[0])]
43
+ x, y, w, h = map(float, parts[1:5])
44
+ boxes.append((class_name, x, y, w, h))
 
 
45
  return boxes
46
 
47
  def yolo_to_pixel_coord(x, y, img_width, img_height):
 
86
  labels = {'intersecting': 'Intersecting Box', 'obstructed': 'Obstructed Box', 'not touching': 'Non-interacting Box'}
87
 
88
  for yolo_box in yolo_boxes:
89
+ class_id, x_center, y_center, width, height = yolo_box
90
+ x1, y1, x2, y2 = yolo_to_pixel_coords(x_center, y_center, width, height, img_width, img_height)
91
+ relationship = box_segment_relationship(yolo_box, segment, img_width, img_height, threshold)
92
+ color = colors[relationship]
93
+ label = labels[relationship]
94
+ ax.add_patch(plt.Rectangle((x1, y1), x2-x1, y2-y1, fill=False, edgecolor=color, linewidth=2, label=label))
 
95
 
96
  ax.legend()
97
  ax.axis('off')
98
  plt.tight_layout()
99
  return fig
100
 
 
101
  # COCO classes
102
  COCO_CLASSES = [
103
  'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light',
 
177
 
178
  results = []
179
  for class_name, x, y, w, h in yolo_boxes:
180
+ result = box_segment_relationship((0, x, y, w, h), rail_segment, img_width, img_height, threshold)
181
+ results.append(f"{class_name} at ({x:.2f}, {y:.2f}) is {result} the segment.")
 
182
 
183
  os.remove(temp_image_path)
184
  os.remove(label_path)
185
 
186
  return fig, "\n".join(results), yolo_boxes
187
 
 
188
  def process_video(video_path, rail_segment, frame_skip=15):
189
  cap = cv2.VideoCapture(video_path)
190
  if not cap.isOpened():
 
214
 
215
  processed_count += 1
216
 
217
+ # Convert frame to PIL Image for compatibility with detect_objects
218
  pil_frame = Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
219
 
220
+ # Detect objects in the frame
221
  _, _, yolo_boxes = detect_objects(pil_frame, rail_segment)
222
 
223
+ # Draw rail segment
224
  pixel_segment = convert_segment_to_pixel(rail_segment, width, height)
225
  pts = np.array(list(zip(pixel_segment[::2], pixel_segment[1::2])), np.int32)
226
  pts = pts.reshape((-1, 1, 2))
227
  cv2.polylines(frame, [pts], True, (0, 0, 255), 2)
228
 
229
+ # Check for obstructions and draw bounding boxes
230
  for box in yolo_boxes:
231
  class_name, x, y, w, h = box
232
+ relationship = box_segment_relationship((0, x, y, w, h), rail_segment, width, height, threshold)
233
+ x1, y1, x2, y2 = yolo_to_pixel_coords(x, y, w, h, width, height)
234
+
235
+ if relationship == "intersecting":
236
+ color = (0, 0, 255) # Red for intersecting
237
+ elif relationship == "obstructed":
238
+ color = (0, 255, 255) # Yellow for obstructed
239
+ else:
240
+ color = (0, 255, 0) # Green for not touching
241
+
242
+ cv2.rectangle(frame, (x1, y1), (x2, y2), color, 2)
243
+ cv2.putText(frame, f"{class_name} ({relationship})", (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.9, color, 2)
 
244
 
245
  out.write(frame)
246