Aanimated commited on
Commit
aa5e251
·
1 Parent(s): cb72d22

updated the object detection node to what was on the old github

Browse files
object_detection/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (153 Bytes). View file
 
object_detection/__pycache__/buoy_detection_node.cpython-310.pyc ADDED
Binary file (3.01 kB). View file
 
object_detection/buoy_detection_node.py CHANGED
@@ -6,116 +6,241 @@ from rclpy.qos import QoSProfile, QoSReliabilityPolicy, QoSHistoryPolicy
6
 
7
  # from realsense2_camera_msgs.msg import RGBD
8
  from sensor_msgs.msg import Image
9
- from sailbot_msgs.msg import ObjectDetectionResults
 
 
10
 
11
  from cv_bridge import CvBridge
12
  import torch
13
  from ultralytics import YOLO
14
  import cv2
15
  import numpy as np
 
16
 
17
-
18
- TRAINED_IMAGE_SIZE = (640, 640) # pixel width and height of the images that the model was trained on
19
  IMAGE_CONFIDENCE = 0.3
20
 
21
  SHOULD_SAVE_IMAGES = True
22
 
 
23
  class BuoyDetectionNode(Node):
24
-
25
  def __init__(self):
26
  super().__init__("buoy_detection")
27
- self.model = YOLO("/home/sailbot/sailbot_vt/src/object_detection/object_detection/weights/last3.pt")
28
- self.cv_bridge = CvBridge()
29
 
30
  self.current_image_rgb = None
31
- self.current_image_depth = None
32
-
33
- self.image_to_save_index = 0 # images are saved in the format name[index].jpg so this just keeps track of the current index of the image so that we don't overwrite other images
34
-
35
- sensor_qos_profile = QoSProfile(
36
- reliability=QoSReliabilityPolicy.BEST_EFFORT,
37
- history=QoSHistoryPolicy.KEEP_LAST,
38
- depth=1
 
 
 
 
 
 
 
 
 
39
  )
40
-
41
- self.depth_image_listener = self.create_subscription(msg_type=Image, topic="/camera/camera/aligned_depth_to_color/image_raw", callback=self.depth_image_callback, qos_profile=sensor_qos_profile)
42
- self.rgb_image_listener = self.create_subscription(msg_type=Image, topic="/camera/camera/color/image_raw", callback=self.rgb_image_callback, qos_profile=sensor_qos_profile)
43
  # self.object_detection_results_publisher = self.create_publisher(msg_type=ObjectDetectionResults, topic="/object_detection_results", qos_profile=sensor_qos_profile)
44
-
45
-
46
  self.create_timer(timer_period_sec=0.001, callback=self.perform_inference)
47
-
48
-
49
  def depth_image_callback(self, depth_image: Image):
50
- self.get_logger().info("got here depth")
51
-
52
- self.current_image_depth = self.cv_bridge.imgmsg_to_cv2(depth_image, "16UC1")
53
-
54
- # assert depth_image_cv.shape == rgb_image_cv.shape
55
-
56
- # self.get_logger().info(f"depth image shape: {rgbd_image.shape}")
57
-
58
- # smaller_size = min(depth_image_cv.shape)
59
- # larger_size = max(depth_image_cv.shape)
60
-
61
- # left = (larger_size-smaller_size)/2
62
- # right = left + smaller_size
63
  # top = 0
64
  # bottom = smaller_size
65
-
66
- # # TODO downscale the image so that the smallest dimension is 640p
67
- # # TODO: crop the image properly
68
  # cropped_depth_image_cv = depth_image_cv[left:right, top:bottom]
69
  # cropped_rgb_image_cv = rgb_image_cv
70
-
71
- # depth_image_cv.resize(TRAINED_IMAGE_SIZE)
72
  # rgb_image_cv.resize(TRAINED_IMAGE_SIZE)
73
-
74
- # print(f"cropped image shape: {depth_image_cv.shape}")
75
- # cv2.imwrite('rgb_image.jpg', rgb_image_cv)
76
- # cv2.imwrite('depth_image.jpg', depth_image_cv)
77
-
78
- # depth_image_cv.show()
79
-
80
-
81
-
 
 
82
  def rgb_image_callback(self, rgb_image: Image):
83
  self.get_logger().info("got here rgb")
84
 
85
  self.current_image_rgb = self.cv_bridge.imgmsg_to_cv2(rgb_image, "bgr8")
86
-
87
- self.current_image_rgb = self.current_image_rgb[80:1200,40:680] # crop the image to 640,640
88
-
89
 
 
90
 
91
  def perform_inference(self):
92
  # https://docs.ultralytics.com/modes/predict/#inference-sources
93
- if self.current_image_rgb is None: return
94
- results = self.model.predict([self.current_image_rgb,], conf=IMAGE_CONFIDENCE) # return a list of Results objects
 
 
 
 
 
 
 
 
 
95
 
96
  # Process results list
97
- for result in results:
98
- boxes = result.boxes # Boxes object for bounding box outputs
99
- print(f"boxes: {boxes}")
100
- masks = result.masks # Masks object for segmentation masks outputs
101
- keypoints = result.keypoints # Keypoints object for pose outputs
102
- probs = result.probs # Probs object for classification outputs
103
- obb = result.obb # Oriented boxes object for OBB outputs
104
-
105
- if SHOULD_SAVE_IMAGES:
106
- result.save(f"cv_results/result_{self.image_to_save_index}.png") # display to screen
107
- self.image_to_save_index += 1
108
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
109
  # TODO process these results properly
110
  # self.object_detection_results_publisher.publish()
111
 
112
-
113
-
114
  def main():
115
  rclpy.init()
116
  buoy_detection_node = BuoyDetectionNode()
117
  rclpy.spin(buoy_detection_node)
118
-
119
 
120
 
121
- if __name__ == "__main__": main()
 
 
6
 
7
  # from realsense2_camera_msgs.msg import RGBD
8
  from sensor_msgs.msg import Image
9
+ from std_msgs.msg import Float32
10
+ from std_msgs.msg import Int32
11
+ from autoboat_msgs.msg import ObjectDetectionResultsList
12
 
13
  from cv_bridge import CvBridge
14
  import torch
15
  from ultralytics import YOLO
16
  import cv2
17
  import numpy as np
18
+ from math import sqrt
19
 
20
+ TRAINED_IMAGE_SIZE = (640, 640) # pixel width and height of the images that the model was trained on
 
21
  IMAGE_CONFIDENCE = 0.3
22
 
23
  SHOULD_SAVE_IMAGES = True
24
 
25
+
26
  class BuoyDetectionNode(Node):
 
27
  def __init__(self):
28
  super().__init__("buoy_detection")
29
+ self.model = YOLO("/home/sailbot/sailbot_vt/src/object_detection/object_detection/weights/chris_pretty_good_11l_train32.pt")
30
+ self.cv_bridge = CvBridge()
31
 
32
  self.current_image_rgb = None
33
+ self.depth_image = None
34
+
35
+ self.image_to_save_index = 0 # images are saved in the format name[index].jpg so this just keeps track of the current index of the image so that we don't overwrite other images
36
+
37
+ sensor_qos_profile = QoSProfile(reliability=QoSReliabilityPolicy.BEST_EFFORT, history=QoSHistoryPolicy.KEEP_LAST, depth=1)
38
+ self.buoy_angle_pub = self.create_publisher(Float32, "/buoy_angle", 10)
39
+ self.buoy_depth_pixel_pub = self.create_publisher(Float32, "/buoy_depth_pixel", 10)
40
+
41
+ self.depth_image_listener = self.create_subscription(
42
+ msg_type=Image,
43
+ topic="/camera/camera/aligned_depth_to_color/image_raw",
44
+ callback=self.depth_image_callback,
45
+ qos_profile=sensor_qos_profile,
46
+ )
47
+ # self.depth_image_listener = self.create_subscription(msg_type=Image, topic="/camera/depth/image_rect_raw", callback=self.depth_image_callback, qos_profile=sensor_qos_profile)
48
+ self.rgb_image_listener = self.create_subscription(
49
+ msg_type=Image, topic="/camera/camera/color/image_raw", callback=self.rgb_image_callback, qos_profile=sensor_qos_profile
50
  )
 
 
 
51
  # self.object_detection_results_publisher = self.create_publisher(msg_type=ObjectDetectionResults, topic="/object_detection_results", qos_profile=sensor_qos_profile)
52
+
 
53
  self.create_timer(timer_period_sec=0.001, callback=self.perform_inference)
54
+
 
55
  def depth_image_callback(self, depth_image: Image):
56
+ self.get_logger().info("got here depth") # print(f"hihihihi")
57
+
 
 
 
 
 
 
 
 
 
 
 
58
  # top = 0
59
  # bottom = smaller_size
60
+
61
+ # TODO downscale the image so that the smallest dimension is 640p
62
+ # TODO: crop the image properly
63
  # cropped_depth_image_cv = depth_image_cv[left:right, top:bottom]
64
  # cropped_rgb_image_cv = rgb_image_cv
65
+
66
+ # depth_image_cv.resize(TRAINED_IMAGE_SIZE)f"The type of the cv image is
67
  # rgb_image_cv.resize(TRAINED_IMAGE_SIZE)
68
+
69
+ depth_image_cv = self.cv_bridge.imgmsg_to_cv2(depth_image, desired_encoding=depth_image.encoding)
70
+
71
+ print(f"cropped image shape: {depth_image_cv.shape}")
72
+ # cv2.imwrite('depth_image.jpg', depth_image_cv)
73
+ print(f"The type of the cv image is {type(depth_image_cv)}")
74
+ self.depth_image = depth_image_cv
75
+ # np.where(depth_image_cv > 0)
76
+
77
+ # print(depth_image_cv)
78
+
79
  def rgb_image_callback(self, rgb_image: Image):
80
  self.get_logger().info("got here rgb")
81
 
82
  self.current_image_rgb = self.cv_bridge.imgmsg_to_cv2(rgb_image, "bgr8")
 
 
 
83
 
84
+ self.current_image_rgb = self.current_image_rgb[80:1200, 40:680] # crop the image to 640,640
85
 
86
  def perform_inference(self):
87
  # https://docs.ultralytics.com/modes/predict/#inference-sources
88
+ if self.current_image_rgb is None:
89
+ return
90
+ results = self.model.predict(
91
+ [
92
+ self.current_image_rgb,
93
+ ],
94
+ conf=IMAGE_CONFIDENCE,
95
+ ) # return a list of Results objects
96
+
97
+ # Added variable for real-time inference
98
+ DIAGONAL_FIELD_OF_VIEW = 89
99
 
100
  # Process results list
101
+ print(f"The length of the results object is {len(results)}")
102
+ result = results[0]
103
+ boxes = result.boxes # Boxes object for bounding box outputs
104
+ # print(f"boxes: {boxes}")
105
+
106
+ # masks = result.masks # Masks object for segmentation masks outputs
107
+ # keypoints = result.keypoints # Keypoints object for pose outputs
108
+ # probs = result.probs # Probs object for classification outputs
109
+ # obb = result.obb # Oriented boxes object for OBB outputs
110
+
111
+ height = result.orig_shape[0]
112
+ width = result.orig_shape[1]
113
+ diagonal = sqrt(height**2 + width**2)
114
+ deg_per_pixel = DIAGONAL_FIELD_OF_VIEW / diagonal
115
+ boxes = result.boxes
116
+
117
+ # conf_angle = {}
118
+ angle_list = []
119
+ conf_list = []
120
+ x_list = []
121
+ y_list = []
122
+ box_y_center = 0
123
+ box_x_center = 0
124
+
125
+ if boxes.shape[0] == 0:
126
+ return
127
+
128
+ self.get_logger().info("We are finally getting something")
129
+ for box in boxes:
130
+ print(box.conf.item())
131
+ box_location = box.xywh
132
+ # The Y stuff is only for trying to get depth image values
133
+
134
+ # box_centerx_location = box_location[0][0].item() + box_location[0][2].item()/2
135
+ # box_centery_location = box_location[0][1].item() + box_location[0][3].item()/2
136
+
137
+ box_centerx_location = box_location[0][0].item()
138
+ box_centery_location = box_location[0][1].item()
139
+
140
+ print(f"X-coordinate: {box_centerx_location}")
141
+ print(f"Y-coordinate: {box_centery_location}")
142
+
143
+ # print(f"non-absolute-value-x-location: {(width/2)-box_centerx_location}")
144
+ # print(f"non-absolute-value-y-location: {(height/2)-box_centery_location}")
145
+
146
+ x_distance_from_center = box_centerx_location - (width / 2)
147
+ img_angle_from_center = x_distance_from_center * deg_per_pixel
148
+
149
+ # conf_angle[box.conf.item()]=img_angle_from_center
150
+ angle_list.append(img_angle_from_center)
151
+
152
+ conf_list.append(box.conf.item())
153
+ x_list.append(box_centerx_location)
154
+ y_list.append(box_centery_location)
155
+ # Trying to get the maximum confidence keys for the x and y locations for the conf box dictionaries -- FAILED, kept just in case
156
+ # Skip to after the returning of the angle to see this used
157
+
158
+ # max_conf_x_key = min(conf_x_box, key=conf_x_box.get)
159
+ # max_conf_y_key = min(conf_y_box, key=conf_y_box.get)
160
+ # print(f"The keys of the x dictionary are {conf_x_box.keys()}")
161
+ # print(f"The keys of the y dictionary are {conf_y_box.keys()}")
162
+ # print(f"max_conf_x_key: {max_conf_x_key}")
163
+ # print(f"max_conf_y_key: {max_conf_y_key}")
164
+
165
+ max_conf_index = np.argmax(conf_list)
166
+
167
+ # y_avg_distance_from_center = sum_y/count
168
+
169
+ # print(f"The confidence angle pairs sorted are {dict(sorted(conf_angle.items(), reverse=True))}")
170
+ # max_angle_key = max(conf_angle, key=conf_angle.get)
171
+
172
+ # print(f"The angle of the maximum confidence box is {conf_angle[max_angle_key]}")
173
+ max_conf_angle = angle_list[max_conf_index]
174
+ print(f"The most confident buoy angle is: {max_conf_angle}")
175
+ msg = Float32()
176
+ msg.data = max_conf_angle
177
+ self.buoy_angle_pub.publish(msg)
178
+
179
+ # Need to get the x,y location of the buoy/center of the bounding box
180
+ box_x_center = x_list[max_conf_index]
181
+ box_y_center = y_list[max_conf_index]
182
+ print(f"The type of the current image is {type(self.current_image_rgb)}")
183
+ print(f"The value at the index is {self.current_image_rgb[int(box_y_center), int(box_x_center)]}")
184
+ # self.current_image_rgb[int(box_x_center), int(box_y_center)]= (160, 32, 240)
185
+ # self.current_image_rgb[int(box_x_center+1), int(box_y_center)]= (160, 32, 240)
186
+ # self.current_image_rgb[int(box_x_center+2), int(box_y_center)]= (160, 32, 240)
187
+ # self.current_image_rgb[int(box_x_center-1), int(box_y_center)]= (160, 32, 240)
188
+ # self.current_image_rgb[int(box_x_center-2), int(box_y_center)]= (160, 32, 240)
189
+ # self.current_image_rgb[int(box_x_center), int(box_y_center-1)]= (160, 32, 240)
190
+ # self.current_image_rgb[int(box_x_center), int(box_y_center-2)]= (160, 32, 240)
191
+ # self.current_image_rgb[int(box_x_center), int(box_y_center+1)]= (160, 32, 240)
192
+ # self.current_image_rgb[int(box_x_center), int(box_y_center+2)]= (160, 32, 240)
193
+
194
+ # Then need to index into the depth image class variable saved as self.depth_image with the x,y coordinates
195
+ # And then print out the pixel value
196
+
197
+ ##TODO: make sure that y-center and x-center are not out of bounds (return nothing if out of bounds)
198
+ if not self.depth_image is None:
199
+ print(type(self.depth_image))
200
+ print(f"The boxcenterx variable is {int(box_x_center)}")
201
+ print(f"The boxcentery variable is {int(box_y_center)}")
202
+
203
+ print(f"The pixel value at the box location is hopefully {self.depth_image[int(box_y_center), int(box_x_center)]}")
204
+ # print(type(self.depth_image[int(box_x_center)][int(box_y_center)]))
205
+ msg = Float32()
206
+ msg.data = (self.depth_image[int(box_y_center + 50), int(box_x_center + 30)].item()) / 1000
207
+ # msg.data = int(box_x_center)
208
+ self.buoy_depth_pixel_pub.publish(msg)
209
+
210
+ # generate purple boxes around the center of the detected buoy
211
+ for x_value in range(self.current_image_rgb.shape[1]):
212
+ for y_value in range(self.current_image_rgb.shape[0]):
213
+ difference_vector = [abs(y_value - int(box_y_center)), abs(x_value - int(box_x_center))]
214
+ max_value = max(difference_vector[0], difference_vector[1])
215
+ if max_value <= 20:
216
+ self.current_image_rgb[y_value, x_value] = [160, 32, 240]
217
+
218
+ cv2.imwrite("rgb_image.jpg", self.current_image_rgb)
219
+
220
+ # For Depth Image
221
+ for x_value in range(self.depth_image.shape[1]):
222
+ for y_value in range(self.depth_image.shape[0]):
223
+ difference_vector = [abs(y_value - int(box_y_center + 50)), abs(x_value - int(box_x_center + 30))]
224
+ max_value = max(difference_vector[0], difference_vector[1])
225
+ if max_value <= 20:
226
+ self.depth_image[y_value, x_value] = 0
227
+
228
+ cv2.imwrite("depth_image.jpg", self.depth_image)
229
+
230
+ if SHOULD_SAVE_IMAGES:
231
+ print("GOT HERE")
232
+ result.save(f"cv_results2/result_{self.image_to_save_index}.png") # display to screen
233
+ self.image_to_save_index += 1
234
+
235
  # TODO process these results properly
236
  # self.object_detection_results_publisher.publish()
237
 
238
+
 
239
  def main():
240
  rclpy.init()
241
  buoy_detection_node = BuoyDetectionNode()
242
  rclpy.spin(buoy_detection_node)
 
243
 
244
 
245
+ if __name__ == "__main__":
246
+ main()