ABAO77 commited on
Commit
f113e60
·
verified ·
1 Parent(s): 82988b1

Upload 37 files

Browse files
.gitattributes CHANGED
@@ -1,35 +1,6 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
- *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bz2 filter=lfs diff=lfs merge=lfs -text
5
- *.ckpt filter=lfs diff=lfs merge=lfs -text
6
- *.ftz filter=lfs diff=lfs merge=lfs -text
7
- *.gz filter=lfs diff=lfs merge=lfs -text
8
- *.h5 filter=lfs diff=lfs merge=lfs -text
9
- *.joblib filter=lfs diff=lfs merge=lfs -text
10
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
- *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
- *.model filter=lfs diff=lfs merge=lfs -text
13
- *.msgpack filter=lfs diff=lfs merge=lfs -text
14
- *.npy filter=lfs diff=lfs merge=lfs -text
15
- *.npz filter=lfs diff=lfs merge=lfs -text
16
  *.onnx filter=lfs diff=lfs merge=lfs -text
17
- *.ot filter=lfs diff=lfs merge=lfs -text
18
- *.parquet filter=lfs diff=lfs merge=lfs -text
19
- *.pb filter=lfs diff=lfs merge=lfs -text
20
- *.pickle filter=lfs diff=lfs merge=lfs -text
21
- *.pkl filter=lfs diff=lfs merge=lfs -text
22
- *.pt filter=lfs diff=lfs merge=lfs -text
23
- *.pth filter=lfs diff=lfs merge=lfs -text
24
- *.rar filter=lfs diff=lfs merge=lfs -text
25
- *.safetensors filter=lfs diff=lfs merge=lfs -text
26
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
- *.tar.* filter=lfs diff=lfs merge=lfs -text
28
- *.tar filter=lfs diff=lfs merge=lfs -text
29
- *.tflite filter=lfs diff=lfs merge=lfs -text
30
- *.tgz filter=lfs diff=lfs merge=lfs -text
31
- *.wasm filter=lfs diff=lfs merge=lfs -text
32
- *.xz filter=lfs diff=lfs merge=lfs -text
33
- *.zip filter=lfs diff=lfs merge=lfs -text
34
- *.zst filter=lfs diff=lfs merge=lfs -text
35
- *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  *.onnx filter=lfs diff=lfs merge=lfs -text
2
+ images/1.png filter=lfs diff=lfs merge=lfs -text
3
+ images/2.png filter=lfs diff=lfs merge=lfs -text
4
+ images/3.png filter=lfs diff=lfs merge=lfs -text
5
+ images/4.png filter=lfs diff=lfs merge=lfs -text
6
+ videos/test_video.mp4 filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
AI_brain_TRT_go_str.py ADDED
@@ -0,0 +1,353 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import numpy as np
3
+ import pycuda.driver as cuda
4
+ import pycuda.autoinit
5
+ from __init__ import TensorrtBase
6
+ import cv2
7
+ import os
8
+ import cv2
9
+ from PIL import Image
10
+ import torchvision.transforms as transforms
11
+ import scipy.special
12
+ from setting_AI import *
13
+ from utils_func_go_str import CLEAN_DATA_CSV_DIRECTION, ADD_DATA_CSV_MASK_DIRECTION, ADD_DATA_CSV_DIRECTION_STRAIGHT, CLEAN_DATA_CSV_DIRECTION_STRAIGHT,CHECK_PUSH
14
+ import pandas as pd
15
+ import math
16
+ import matplotlib.pyplot as plt
17
+
18
+ tusimple_row_anchor = [ 64, 68, 72, 76, 80, 84, 88, 92, 96, 100, 104, 108, 112,
19
+ 116, 120, 124, 128, 132, 136, 140, 144, 148, 152, 156, 160, 164,
20
+ 168, 172, 176, 180, 184, 188, 192, 196, 200, 204, 208, 212, 216,
21
+ 220, 224, 228, 232, 236, 240, 244, 248, 252, 256, 260, 264, 268,
22
+ 272, 276, 280, 284]
23
+ lane_colors = [(0,0,255),(0,255,0),(255,0,0),(0,255,255)]
24
+
25
+ net = TensorrtBase(plan,
26
+ input_names=input_names,
27
+ output_names=output_names,
28
+ max_batch_size=batch,
29
+ )
30
+
31
+ images = np.random.rand(1, 288, 800, 3).astype(np.float32)
32
+
33
+ binding_shape_map = {
34
+ "tensor": images.shape,
35
+ }
36
+
37
+ def INFER_TRT(images):
38
+ # images = np.expand_dims(images, axis=0)
39
+ images = np.ascontiguousarray(images).astype(np.float32)
40
+ net.cuda_ctx.push()
41
+ inputs, outputs, bindings, stream = net.buffers
42
+ # Set optimization profile and input shape
43
+ net.context.set_optimization_profile_async(0, stream.handle)
44
+ net.context.set_input_shape(input_names[0], images.shape)
45
+
46
+ # Transfer input data to the GPU
47
+ cuda.memcpy_htod_async(inputs[0].device, images, stream)
48
+ # Execute inference
49
+ net.context.execute_async_v2(bindings=bindings, stream_handle=stream.handle)
50
+ # Transfer predictions back to the host
51
+ cuda.memcpy_dtoh_async(outputs[0].host, outputs[0].device, stream)
52
+ stream.synchronize()
53
+
54
+ # Copy outputs
55
+ trt_outputs = [out.host.copy() for out in outputs]
56
+ net.cuda_ctx.pop()
57
+ return trt_outputs[0].reshape(1, 101, 56, 4)
58
+
59
+ img_transforms = transforms.Compose([
60
+ transforms.Resize((288, 800)),
61
+ transforms.ToTensor(),
62
+ transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
63
+ ])
64
+
65
+ def prepare_input(img):
66
+ # Transform the image for inference
67
+ img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
68
+ img_pil = Image.fromarray(img)
69
+ input_img = img_transforms(img_pil)
70
+ input_tensor = input_img[None, ...]
71
+
72
+ return input_tensor
73
+
74
+ def process_output(output):
75
+ # Parse the output of the model
76
+ processed_output = np.array(output[0].data)
77
+ processed_output = processed_output[:, ::-1, :]
78
+ prob = scipy.special.softmax(processed_output[:-1, :, :], axis=0)
79
+ idx = np.arange(100) + 1
80
+ idx = idx.reshape(-1, 1, 1)
81
+ loc = np.sum(prob * idx, axis=0)
82
+ processed_output = np.argmax(processed_output, axis=0)
83
+ loc[processed_output == 100] = 0
84
+ processed_output = loc
85
+
86
+
87
+ col_sample = np.linspace(0, 800 - 1, 100)
88
+ col_sample_w = col_sample[1] - col_sample[0]
89
+
90
+ lanes_points = []
91
+ lanes_detected = []
92
+
93
+ max_lanes = processed_output.shape[1]
94
+ for lane_num in range(max_lanes):
95
+ lane_points = []
96
+ # Check if there are any points detected in the lane
97
+ if np.sum(processed_output[:, lane_num] != 0) > 2:
98
+
99
+ lanes_detected.append(True)
100
+
101
+ # Process each of the points for each lane
102
+ for point_num in range(processed_output.shape[0]):
103
+ if processed_output[point_num, lane_num] > 0:
104
+ lane_point = [int(processed_output[point_num, lane_num] * col_sample_w * 1280 / 800) - 1, int(720 * (tusimple_row_anchor[56-1-point_num]/288)) - 1 ]
105
+ lane_points.append(lane_point)
106
+ else:
107
+ lanes_detected.append(False)
108
+
109
+ lanes_points.append(lane_points)
110
+ return np.array(lanes_points, dtype=object), np.array(lanes_detected, dtype=object)
111
+
112
+ def draw_lanes(input_img, lanes_points, lanes_detected, draw_points=True):
113
+ left_top = None
114
+ right_top = None
115
+ left_bottom = None
116
+ right_bottom = None
117
+ Have_lane = True
118
+
119
+ # Resize ảnh đầu vào
120
+ visualization_img = cv2.resize(input_img, (1280, 720), interpolation=cv2.INTER_AREA)
121
+
122
+ # Kiểm tra nếu cả 2 lane (trái và phải) được phát hiện
123
+ if lanes_detected[1] and lanes_detected[2]:
124
+ lane_segment_img = visualization_img.copy()
125
+
126
+ # Chuyển các điểm của lane trái và phải sang numpy array
127
+ left_lane = np.array(lanes_points[1])
128
+ right_lane = np.array(lanes_points[2])
129
+
130
+ # Tính y_top và y_bottom của từng lane
131
+ y_top_left = np.min(left_lane[:, 1])
132
+ y_bottom_left = np.max(left_lane[:, 1])
133
+ y_top_right = np.min(right_lane[:, 1])
134
+ y_bottom_right = np.max(right_lane[:, 1])
135
+
136
+ # Xác định vùng giao nhau của 2 lane theo trục y
137
+ y_lane_top = max(y_top_left, y_top_right)
138
+ y_lane_bottom = min(y_bottom_left, y_bottom_right)
139
+ lane_length = y_lane_bottom - y_lane_top
140
+
141
+ # Xác định ngưỡng y cho 90% chiều dài (phần gần camera)
142
+ y_threshold = y_lane_bottom - per_len_lane * lane_length
143
+
144
+ # Lọc các điểm của lane theo ngưỡng y (chỉ lấy phần gần camera)
145
+ left_points_90 = [point for point in lanes_points[1] if point[1] >= y_threshold]
146
+ right_points_90 = [point for point in lanes_points[2] if point[1] >= y_threshold]
147
+ # Tính tọa độ của cạnh trên và cạnh dưới cho lane trái
148
+ if left_points_90:
149
+ left_top = min(left_points_90, key=lambda p: p[1]) # Điểm có y nhỏ nhất
150
+ left_bottom = max(left_points_90, key=lambda p: p[1]) # Điểm có y lớn nhất
151
+
152
+ # Tính tọa độ của cạnh trên và cạnh dưới cho lane phải
153
+ if right_points_90:
154
+ right_top = min(right_points_90, key=lambda p: p[1])
155
+ right_bottom = max(right_points_90, key=lambda p: p[1])
156
+
157
+
158
+ # Nếu có đủ điểm từ cả hai lane, tiến hành vẽ
159
+ if len(left_points_90) > 0 and len(right_points_90) > 0:
160
+ pts = np.vstack((np.array(left_points_90), np.flipud(np.array(right_points_90))))
161
+ cv2.fillPoly(lane_segment_img, pts=[pts], color=(255,191,0))
162
+ visualization_img = cv2.addWeighted(visualization_img, 0.7, lane_segment_img, 0.3, 0)
163
+ else:
164
+ Have_lane = False
165
+
166
+ if draw_points:
167
+ for lane_num, lane_points in enumerate(lanes_points):
168
+ for lane_point in lane_points:
169
+ cv2.circle(visualization_img, (lane_point[0], lane_point[1]), 3, lane_colors[lane_num], -1)
170
+
171
+ return visualization_img, left_top, right_top, left_bottom, right_bottom, Have_lane
172
+
173
+ def draw_direction_arrow(img, center, angle_deg, size=50, color=(0, 255, 255)):
174
+ """
175
+ Vẽ biểu tượng mũi tên chỉ hướng xoay theo góc angle_deg tại vị trí center.
176
+ Mũi tên mặc định chỉ lên trên, khi quay theo góc, biểu tượng sẽ phản ánh hướng lái.
177
+ """
178
+ # Định nghĩa các điểm của mũi tên (mặc định hướng lên trên)
179
+ pts = np.array([
180
+ [0, -size], # điểm mũi tên (đỉnh)
181
+ [-size // 4, size // 2], # góc trái dưới
182
+ [0, size // 4], # điểm giữa dưới
183
+ [size // 4, size // 2] # góc phải dưới
184
+ ], dtype=np.float32)
185
+
186
+ # Tạo ma trận xoay
187
+ M = cv2.getRotationMatrix2D((0, 0), angle_deg, 1)
188
+ rotated_pts = np.dot(pts, M[:, :2])
189
+ # Dịch các điểm về vị trí center
190
+ rotated_pts[:, 0] += center[0]
191
+ rotated_pts[:, 1] += center[1]
192
+ rotated_pts = rotated_pts.astype(np.int32)
193
+
194
+ cv2.fillPoly(img, [rotated_pts], color)
195
+
196
+ height = 720
197
+ width = 1280
198
+
199
+ car_point_left = (car_length_padding, height)
200
+ car_point_right = (width - car_length_padding, height)
201
+ car_center_bottom = ((car_point_left[0] + car_point_right[0]) // 2, height)
202
+ car_center_top = (car_center_bottom[0], 0)
203
+
204
+ # -------------------------------------------------------------------------------
205
+
206
+ CLEAN_DATA_CSV_DIRECTION()
207
+ CLEAN_DATA_CSV_DIRECTION_STRAIGHT()
208
+
209
+ dr_back_control = None
210
+ an_back_control = None
211
+ len_csv_control_back = None
212
+
213
+ def AI_TRT(frame, paint = False, resize_img = True):
214
+ global dr_back_control, an_back_control, len_csv_control_back
215
+ PUSH_RETURN = None
216
+
217
+ frame_ = prepare_input(frame)
218
+ frame_ = INFER_TRT(frame_)
219
+ lanes_points, lanes_detected = process_output(frame_)
220
+
221
+ visualization_img, lane_left_top, lane_right_top, lane_left_bottom, lane_right_bottom, Have_lane = draw_lanes(frame, lanes_points, lanes_detected, draw_points=True)
222
+
223
+ if Have_lane == False:
224
+ print("Không bắt có đường")
225
+ if paint:
226
+ cv2.circle(visualization_img, car_point_left, 10, (50, 100, 255), -1)
227
+ cv2.circle(visualization_img, car_center_bottom, 10, (50, 100, 255), -1)
228
+ cv2.circle(visualization_img, car_point_right, 10, (50, 100, 255), -1)
229
+ cv2.circle(visualization_img, car_center_top, 10, (50, 100, 255), -1)
230
+
231
+ if lane_left_top is not None and lane_right_top is not None:
232
+ top_center = ((lane_left_top[0] + lane_right_top[0]) // 2,
233
+ (lane_left_top[1] + lane_right_top[1]) // 2)
234
+ if paint:
235
+ cv2.circle(visualization_img, lane_left_top, 5, (0, 255, 255), -1)
236
+ cv2.circle(visualization_img, lane_right_top, 5, (0, 255, 255), -1)
237
+ cv2.circle(visualization_img, top_center, 7, (0, 0, 255), -1)
238
+
239
+ point_control_left = (lane_left_top[0], height)
240
+ point_control_right = (lane_right_top[0], height)
241
+
242
+ if paint:
243
+ cv2.circle(visualization_img, point_control_left, 10, (100, 255, 100), -1)
244
+ cv2.circle(visualization_img, point_control_right, 10, (100, 255, 100), -1)
245
+
246
+ dx = top_center[0] - car_center_bottom[0]
247
+ dy = car_center_bottom[1] - top_center[1]
248
+ angle_rad = math.atan2(dx, dy)
249
+ angle_deg = angle_rad * 180 / math.pi
250
+
251
+ threshold = 5
252
+ if angle_deg < -threshold:
253
+ direction = DIRECTION_LEFT
254
+
255
+ elif angle_deg > threshold:
256
+ direction = DIRECTION_RIGHT
257
+
258
+ else:
259
+ direction = DIRECTION_STRAIGHT
260
+
261
+ if paint:
262
+ text = f"{direction} ({angle_deg:.2f} deg)"
263
+ cv2.rectangle(visualization_img, (10, 10), (460, 70), (0, 0, 0), -1) # Nền cho text (để dễ đọc)
264
+ cv2.putText(visualization_img, text, (15, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2)
265
+ icon_center = (width - 80, 80)
266
+ draw_direction_arrow(visualization_img, icon_center, angle_deg, size=40, color=(0, 200, 200))
267
+ cv2.circle(visualization_img, icon_center, 45, (0, 200, 200), 2)
268
+
269
+ if direction != DIRECTION_STRAIGHT:
270
+ ADD_DATA_CSV_MASK_DIRECTION(direction, abs(int(angle_deg)))
271
+ else:
272
+ ADD_DATA_CSV_DIRECTION_STRAIGHT(direction, abs(int(angle_deg)))
273
+
274
+ push, dr_back, an_back = CHECK_PUSH()
275
+
276
+ if push is not None:
277
+
278
+ PUSH_RETURN = push
279
+
280
+ if resize_img:
281
+ visualization_img = cv2.resize(visualization_img, (visualization_img.shape[1] // 2, visualization_img.shape[0] // 2))
282
+
283
+
284
+ return visualization_img, PUSH_RETURN, Have_lane
285
+
286
+ # lower_yellow = np.array([20, 100, 100], dtype=np.uint8)
287
+ # upper_yellow = np.array([30, 255, 255], dtype=np.uint8)
288
+
289
+ # def Process_No_lane(frame):
290
+ # global lower_yellow, upper_yellow
291
+ # mask = cv2.inRange(frame, lower_yellow, upper_yellow)
292
+ # blurred = cv2.GaussianBlur(mask, (5, 5), 0)
293
+ # edges = cv2.Canny(blurred, 50, 150)
294
+ # lines = cv2.HoughLinesP(edges, 1, np.pi / 180, threshold=50, minLineLength=50, maxLineGap=200)
295
+ # if lines is not None:
296
+ # for line in lines:
297
+ # x1, y1, x2, y2 = line[0]
298
+ # cv2.line(frame, (x1, y1), (x2, y2), (255, 0, 0), 3)
299
+
300
+
301
+
302
+ # # return visualization_img, Direction_mask
303
+
304
+
305
+ # def detect_yellow_lane_video(video_path):
306
+ # global lower_yellow, upper_yellow
307
+ # # Mở video
308
+ # cap = cv2.VideoCapture(video_path)
309
+
310
+ # while cap.isOpened():
311
+ # ret, frame = cap.read()
312
+ # if not ret:
313
+ # break
314
+
315
+
316
+
317
+ # height, width = frame.shape[:2] # Lấy kích thước ảnh
318
+ # roi = frame[height//2:, :] # Chỉ lấy phần dưới của ảnh
319
+
320
+ # # Chuyển sang không gian màu HSV và lọc màu vàng
321
+ # hsv = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)
322
+ # mask = cv2.inRange(hsv, lower_yellow, upper_yellow)
323
+
324
+ # # Giảm nhiễu
325
+ # blurred = cv2.GaussianBlur(mask, (5, 5), 0)
326
+
327
+ # # Phát hiện cạnh
328
+ # edges = cv2.Canny(blurred, 50, 150)
329
+
330
+ # # Phát hiện đường bằng Hough Transform
331
+ # lines = cv2.HoughLinesP(edges, 1, np.pi / 180, threshold=50, minLineLength=50, maxLineGap=200)
332
+
333
+ # # Vẽ các đường phát hiện được lên ảnh gốc (cần dịch tọa độ Y lên để khớp với ảnh gốc)
334
+ # if lines is not None:
335
+ # for line in lines:
336
+ # x1, y1, x2, y2 = line[0]
337
+ # cv2.line(frame, (x1, y1 + height // 2), (x2, y2 + height // 2), (255, 0, 0), 3) # Điều chỉnh Y
338
+
339
+ # # Hiển thị video với đường line được phát hiện
340
+ # cv2.imshow('Yellow Lane Detection', frame)
341
+
342
+ # # Nhấn 'q' để thoát
343
+ # if cv2.waitKey(25) & 0xFF == ord('q'):
344
+ # break
345
+
346
+ # cap.release()
347
+ # cv2.destroyAllWindows()
348
+
349
+
350
+
351
+ # # Đường dẫn tới video
352
+ # video_path = "videos/a.mp4"
353
+ # detect_yellow_lane_video(video_path)
TestCamera.py ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import time
3
+
4
+ cap = cv2.VideoCapture(0, cv2.CAP_DSHOW)
5
+
6
+
7
+
8
+ if not cap.isOpened():
9
+ print("Không thể mở camera")
10
+ exit()
11
+
12
+ while True:
13
+ start_time = time.time()
14
+ ret, frame = cap.read()
15
+ print(frame.shape)
16
+ print(frame.shape)
17
+ if not ret:
18
+ print("Không thể nhận dữ liệu từ camera")
19
+ break
20
+
21
+
22
+ cv2.imshow('Camera', frame)
23
+ if cv2.waitKey(1) & 0xFF == ord('q'):
24
+ break
25
+
26
+ cap.release()
27
+ cv2.destroyAllWindows()
a_AI_brain_2_model_onnx.py ADDED
@@ -0,0 +1,303 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import numpy as np
3
+ # from __init__ import TensorrtBase
4
+ import cv2
5
+ import os
6
+ import cv2
7
+ from PIL import Image
8
+ import torchvision.transforms as transforms
9
+ import scipy.special
10
+ from setting_AI import *
11
+ from a_utils_func_2_model import CLEAN_DATA_CSV_DIRECTION, ADD_DATA_CSV_MASK_DIRECTION, ADD_DATA_CSV_DIRECTION_STRAIGHT, CLEAN_DATA_CSV_DIRECTION_STRAIGHT,CHECK_PUSH, ADD_DATA_CSV_CLASSIFICATION, CHECK_CSV_CLASSIFICATION, CLEAN_DATA_CSV_CLASSIFICATION
12
+ import pandas as pd
13
+ import math
14
+ import sys
15
+ sys.path.append("classification")
16
+
17
+ tusimple_row_anchor = [ 64, 68, 72, 76, 80, 84, 88, 92, 96, 100, 104, 108, 112,
18
+ 116, 120, 124, 128, 132, 136, 140, 144, 148, 152, 156, 160, 164,
19
+ 168, 172, 176, 180, 184, 188, 192, 196, 200, 204, 208, 212, 216,
20
+ 220, 224, 228, 232, 236, 240, 244, 248, 252, 256, 260, 264, 268,
21
+ 272, 276, 280, 284]
22
+ lane_colors = [(0,0,255),(0,255,0),(255,0,0),(0,255,255)]
23
+
24
+ net = TensorrtBase(plan,
25
+ input_names=input_names,
26
+ output_names=output_names,
27
+ max_batch_size=batch,
28
+ )
29
+
30
+ images = np.random.rand(1, 288, 800, 3).astype(np.float32)
31
+
32
+ binding_shape_map = {
33
+ "tensor": images.shape,
34
+ }
35
+
36
+ def INFER_TRT(images):
37
+ # images = np.expand_dims(images, axis=0)
38
+ images = np.ascontiguousarray(images).astype(np.float32)
39
+ net.cuda_ctx.push()
40
+ inputs, outputs, bindings, stream = net.buffers
41
+ # Set optimization profile and input shape
42
+ net.context.set_optimization_profile_async(0, stream.handle)
43
+ net.context.set_input_shape(input_names[0], images.shape)
44
+
45
+ # Transfer input data to the GPU
46
+ cuda.memcpy_htod_async(inputs[0].device, images, stream)
47
+ # Execute inference
48
+ net.context.execute_async_v2(bindings=bindings, stream_handle=stream.handle)
49
+ # Transfer predictions back to the host
50
+ cuda.memcpy_dtoh_async(outputs[0].host, outputs[0].device, stream)
51
+ stream.synchronize()
52
+
53
+ # Copy outputs
54
+ trt_outputs = [out.host.copy() for out in outputs]
55
+ net.cuda_ctx.pop()
56
+ return trt_outputs[0].reshape(1, 101, 56, 4)
57
+
58
+ img_transforms = transforms.Compose([
59
+ transforms.Resize((288, 800)),
60
+ transforms.ToTensor(),
61
+ transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
62
+ ])
63
+
64
+ def prepare_input(img):
65
+ # Transform the image for inference
66
+ img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
67
+ img_pil = Image.fromarray(img)
68
+ input_img = img_transforms(img_pil)
69
+ input_tensor = input_img[None, ...]
70
+
71
+ return input_tensor
72
+
73
+ def process_output(output):
74
+ # Parse the output of the model
75
+ processed_output = np.array(output[0].data)
76
+ processed_output = processed_output[:, ::-1, :]
77
+ prob = scipy.special.softmax(processed_output[:-1, :, :], axis=0)
78
+ idx = np.arange(100) + 1
79
+ idx = idx.reshape(-1, 1, 1)
80
+ loc = np.sum(prob * idx, axis=0)
81
+ processed_output = np.argmax(processed_output, axis=0)
82
+ loc[processed_output == 100] = 0
83
+ processed_output = loc
84
+
85
+
86
+ col_sample = np.linspace(0, 800 - 1, 100)
87
+ col_sample_w = col_sample[1] - col_sample[0]
88
+
89
+ lanes_points = []
90
+ lanes_detected = []
91
+
92
+ max_lanes = processed_output.shape[1]
93
+ for lane_num in range(max_lanes):
94
+ lane_points = []
95
+ # Check if there are any points detected in the lane
96
+ if np.sum(processed_output[:, lane_num] != 0) > 2:
97
+
98
+ lanes_detected.append(True)
99
+
100
+ # Process each of the points for each lane
101
+ for point_num in range(processed_output.shape[0]):
102
+ if processed_output[point_num, lane_num] > 0:
103
+ lane_point = [int(processed_output[point_num, lane_num] * col_sample_w * 1280 / 800) - 1, int(720 * (tusimple_row_anchor[56-1-point_num]/288)) - 1 ]
104
+ lane_points.append(lane_point)
105
+ else:
106
+ lanes_detected.append(False)
107
+
108
+ lanes_points.append(lane_points)
109
+ return np.array(lanes_points, dtype=object), np.array(lanes_detected, dtype=object)
110
+
111
+ def draw_lanes(input_img, lanes_points, lanes_detected, draw_points=True):
112
+ left_top = None
113
+ right_top = None
114
+ left_bottom = None
115
+ right_bottom = None
116
+ Have_lane = True
117
+
118
+ # Resize ảnh đầu vào
119
+ visualization_img = cv2.resize(input_img, (1280, 720), interpolation=cv2.INTER_AREA)
120
+
121
+ # Kiểm tra nếu cả 2 lane (trái và phải) được phát hiện
122
+ if lanes_detected[1] and lanes_detected[2]:
123
+ lane_segment_img = visualization_img.copy()
124
+
125
+ # Chuyển các điểm của lane trái và phải sang numpy array
126
+ left_lane = np.array(lanes_points[1])
127
+ right_lane = np.array(lanes_points[2])
128
+
129
+ # Tính y_top và y_bottom của từng lane
130
+ y_top_left = np.min(left_lane[:, 1])
131
+ y_bottom_left = np.max(left_lane[:, 1])
132
+ y_top_right = np.min(right_lane[:, 1])
133
+ y_bottom_right = np.max(right_lane[:, 1])
134
+
135
+ # Xác định vùng giao nhau của 2 lane theo trục y
136
+ y_lane_top = max(y_top_left, y_top_right)
137
+ y_lane_bottom = min(y_bottom_left, y_bottom_right)
138
+ lane_length = y_lane_bottom - y_lane_top
139
+
140
+ # Xác định ngưỡng y cho 90% chiều dài (phần gần camera)
141
+ y_threshold = y_lane_bottom - per_len_lane * lane_length
142
+
143
+ # Lọc các điểm của lane theo ngưỡng y (chỉ lấy phần gần camera)
144
+ left_points_90 = [point for point in lanes_points[1] if point[1] >= y_threshold]
145
+ right_points_90 = [point for point in lanes_points[2] if point[1] >= y_threshold]
146
+ # Tính tọa độ của cạnh trên và cạnh dưới cho lane trái
147
+ if left_points_90:
148
+ left_top = min(left_points_90, key=lambda p: p[1]) # Điểm có y nhỏ nhất
149
+ left_bottom = max(left_points_90, key=lambda p: p[1]) # Điểm có y lớn nhất
150
+
151
+ # Tính tọa độ của cạnh trên và cạnh dưới cho lane phải
152
+ if right_points_90:
153
+ right_top = min(right_points_90, key=lambda p: p[1])
154
+ right_bottom = max(right_points_90, key=lambda p: p[1])
155
+
156
+
157
+ # Nếu có đủ điểm từ cả hai lane, tiến hành vẽ
158
+ if len(left_points_90) > 0 and len(right_points_90) > 0:
159
+ pts = np.vstack((np.array(left_points_90), np.flipud(np.array(right_points_90))))
160
+ cv2.fillPoly(lane_segment_img, pts=[pts], color=(255,191,0))
161
+ visualization_img = cv2.addWeighted(visualization_img, 0.7, lane_segment_img, 0.3, 0)
162
+ else:
163
+ Have_lane = False
164
+
165
+ if draw_points:
166
+ for lane_num, lane_points in enumerate(lanes_points):
167
+ for lane_point in lane_points:
168
+ cv2.circle(visualization_img, (lane_point[0], lane_point[1]), 3, lane_colors[lane_num], -1)
169
+
170
+ return visualization_img, left_top, right_top, left_bottom, right_bottom, Have_lane
171
+
172
+ def draw_direction_arrow(img, center, angle_deg, size=50, color=(0, 255, 255)):
173
+ """
174
+ Vẽ biểu tượng mũi tên chỉ hướng xoay theo góc angle_deg tại vị trí center.
175
+ Mũi tên mặc định chỉ lên trên, khi quay theo góc, biểu tượng sẽ phản ánh hướng lái.
176
+ """
177
+ # Định nghĩa các điểm của mũi tên (mặc định hướng lên trên)
178
+ pts = np.array([
179
+ [0, -size], # điểm mũi tên (đỉnh)
180
+ [-size // 4, size // 2], # góc trái dưới
181
+ [0, size // 4], # điểm giữa dưới
182
+ [size // 4, size // 2] # góc phải dưới
183
+ ], dtype=np.float32)
184
+
185
+ # Tạo ma trận xoay
186
+ M = cv2.getRotationMatrix2D((0, 0), angle_deg, 1)
187
+ rotated_pts = np.dot(pts, M[:, :2])
188
+ # Dịch các điểm về vị trí center
189
+ rotated_pts[:, 0] += center[0]
190
+ rotated_pts[:, 1] += center[1]
191
+ rotated_pts = rotated_pts.astype(np.int32)
192
+
193
+ cv2.fillPoly(img, [rotated_pts], color)
194
+
195
+ height = 720
196
+ width = 1280
197
+
198
+ car_point_left = (car_length_padding, height)
199
+ car_point_right = (width - car_length_padding, height)
200
+ car_center_bottom = ((car_point_left[0] + car_point_right[0]) // 2, height)
201
+ car_center_top = (car_center_bottom[0], 0)
202
+
203
+ # -------------------------------------------------------------------------------
204
+
205
+ CLEAN_DATA_CSV_DIRECTION()
206
+ CLEAN_DATA_CSV_DIRECTION_STRAIGHT()
207
+ CLEAN_DATA_CSV_CLASSIFICATION()
208
+
209
+ dr_back_control = None
210
+ an_back_control = None
211
+ len_csv_control_back = None
212
+
213
+ def AI_TRT(frame, paint = False, resize_img = True):
214
+ global dr_back_control, an_back_control, len_csv_control_back
215
+ PUSH_RETURN = None
216
+
217
+ frame_ = prepare_input(frame)
218
+ frame_ = INFER_TRT(frame_)
219
+ lanes_points, lanes_detected = process_output(frame_)
220
+
221
+ visualization_img, lane_left_top, lane_right_top, lane_left_bottom, lane_right_bottom, Have_lane = draw_lanes(frame, lanes_points, lanes_detected, draw_points=True)
222
+
223
+ if Have_lane == False:
224
+ print("Không bắt có đường")
225
+ if paint:
226
+ cv2.circle(visualization_img, car_point_left, 10, (50, 100, 255), -1)
227
+ cv2.circle(visualization_img, car_center_bottom, 10, (50, 100, 255), -1)
228
+ cv2.circle(visualization_img, car_point_right, 10, (50, 100, 255), -1)
229
+ cv2.circle(visualization_img, car_center_top, 10, (50, 100, 255), -1)
230
+
231
+ if lane_left_top is not None and lane_right_top is not None:
232
+ top_center = ((lane_left_top[0] + lane_right_top[0]) // 2,
233
+ (lane_left_top[1] + lane_right_top[1]) // 2)
234
+ if paint:
235
+ cv2.circle(visualization_img, lane_left_top, 5, (0, 255, 255), -1)
236
+ cv2.circle(visualization_img, lane_right_top, 5, (0, 255, 255), -1)
237
+ cv2.circle(visualization_img, top_center, 7, (0, 0, 255), -1)
238
+
239
+ point_control_left = (lane_left_top[0], height)
240
+ point_control_right = (lane_right_top[0], height)
241
+
242
+ if paint:
243
+ cv2.circle(visualization_img, point_control_left, 10, (100, 255, 100), -1)
244
+ cv2.circle(visualization_img, point_control_right, 10, (100, 255, 100), -1)
245
+
246
+ dx = top_center[0] - car_center_bottom[0]
247
+ dy = car_center_bottom[1] - top_center[1]
248
+ angle_rad = math.atan2(dx, dy)
249
+ angle_deg = angle_rad * 180 / math.pi
250
+
251
+ threshold = 5
252
+ if angle_deg < -threshold:
253
+ direction = DIRECTION_LEFT
254
+
255
+ elif angle_deg > threshold:
256
+ direction = DIRECTION_RIGHT
257
+
258
+ else:
259
+ direction = DIRECTION_STRAIGHT
260
+
261
+ if paint:
262
+ text = f"{direction} ({angle_deg:.2f} deg)"
263
+ cv2.rectangle(visualization_img, (10, 10), (460, 70), (0, 0, 0), -1) # Nền cho text (để dễ đọc)
264
+ cv2.putText(visualization_img, text, (15, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2)
265
+ icon_center = (width - 80, 80)
266
+ draw_direction_arrow(visualization_img, icon_center, angle_deg, size=40, color=(0, 200, 200))
267
+ cv2.circle(visualization_img, icon_center, 45, (0, 200, 200), 2)
268
+
269
+ if direction != DIRECTION_STRAIGHT:
270
+ ADD_DATA_CSV_MASK_DIRECTION(direction, abs(int(angle_deg)))
271
+ else:
272
+ ADD_DATA_CSV_DIRECTION_STRAIGHT(direction, abs(int(angle_deg)))
273
+
274
+ push, dr_back, an_back = CHECK_PUSH()
275
+
276
+ if push is not None:
277
+
278
+ PUSH_RETURN = push
279
+
280
+ if resize_img:
281
+ visualization_img = cv2.resize(visualization_img, (visualization_img.shape[1] // 2, visualization_img.shape[0] // 2))
282
+
283
+
284
+ return visualization_img, PUSH_RETURN, Have_lane
285
+
286
+ from classification.inference_onnx import inference
287
+
288
+
289
+ def inference_classification(image):
290
+
291
+ predicted_class, probabilities = inference(image)
292
+ print(f"Predicted Class: {predicted_class}, Probabilities: {probabilities}")
293
+ ADD_DATA_CSV_CLASSIFICATION(predicted_class)
294
+ CHECK_CSV_CLASSIFICATION()
295
+
296
+
297
+
298
+
299
+
300
+
301
+
302
+
303
+
a_control_classification.py ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ class DIRECTION_CLASSIFICATION:
2
+ def __init__(self):
3
+ self.DIRECTION = "STRAIGHT"
4
+ self.DIRECTION_PREVIOUS = None
5
+
6
+ def change(self, dir_real):
7
+ self.DIRECTION = dir_real
8
+
9
+ def check(self):
10
+ return self.DIRECTION
11
+
12
+ def check_previous(self):
13
+ return self.DIRECTION_PREVIOUS
14
+
15
+ def change(self, new_direction):
16
+ self.DIRECTION_PREVIOUS = self.DIRECTION
17
+ self.DIRECTION = new_direction
18
+
19
+ USE_CLASSIFICATION = DIRECTION_CLASSIFICATION()
20
+
a_record.py ADDED
@@ -0,0 +1,112 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import os
3
+ import pygame
4
+ import cv2
5
+
6
+ pygame.init()
7
+
8
+ folder_path = "collect_data"
9
+
10
+
11
+ if not os.path.exists(folder_path):
12
+ os.makedirs(folder_path)
13
+
14
+ # Định nghĩa kích thước màn hình
15
+ screen_width = 1000
16
+ screen_height = 600
17
+ camera_width = 640 # Mặc định 640
18
+ camera_height = 480 # Mặc định 480
19
+
20
+ # Hàm tìm tên file video mới không trùng lặp
21
+ def get_next_filename():
22
+ index = 1
23
+ while os.path.exists(f"{folder_path}/{index}.mp4"):
24
+ index += 1
25
+ return f"{folder_path}/{index}.mp4"
26
+
27
+ # Khởi tạo camera
28
+ # camera = cv2.VideoCapture(1) # thầy thay đổi
29
+ camera = cv2.VideoCapture(0, cv2.CAP_DSHOW)
30
+
31
+
32
+ camera.set(3, camera_width)
33
+ camera.set(4, camera_height)
34
+
35
+ # Font chữ
36
+ font = pygame.font.Font(None, 36)
37
+
38
+ # Khởi tạo màn hình pygame
39
+ screen = pygame.display.set_mode((screen_width, screen_height))
40
+ pygame.display.set_caption("Camera App")
41
+
42
+ # Trạng thái quay video
43
+ recording = False
44
+ out = None
45
+ blink = False # Hiệu ứng nhấp nháy khi quay
46
+ frame_count = 0 # Đếm số frame để tạo hiệu ứng nhấp nháy
47
+
48
+ # Hàm vẽ nút với hiệu ứng bấm
49
+ def draw_button(text, pos, color, active=False):
50
+ rect = pygame.Rect(pos[0], pos[1], 150, 50)
51
+ pygame.draw.rect(screen, color, rect, border_radius=10)
52
+
53
+ if active:
54
+ pygame.draw.rect(screen, (255, 255, 255), rect, 3, border_radius=10) # Viền sáng khi đang quay
55
+
56
+ text_surf = font.render(text, True, (255, 255, 255))
57
+ screen.blit(text_surf, (pos[0] + 30, pos[1] + 10))
58
+
59
+ return rect
60
+
61
+ running = True
62
+ while running:
63
+ screen.fill((192, 192, 192))
64
+
65
+ for event in pygame.event.get():
66
+ if event.type == pygame.QUIT:
67
+ running = False
68
+ if event.type == pygame.MOUSEBUTTONDOWN:
69
+ x, y = event.pos
70
+ if start_button.collidepoint(x, y) and not recording:
71
+ filename = get_next_filename()
72
+ fourcc = cv2.VideoWriter_fourcc(*'mp4v')
73
+ out = cv2.VideoWriter(filename, fourcc, 25, (camera_width, camera_height))
74
+ recording = True
75
+ elif stop_button.collidepoint(x, y) and recording:
76
+ recording = False
77
+ out.release()
78
+ out = None
79
+
80
+ # Đọc hình ảnh từ camera
81
+ ret, frame = camera.read()
82
+ if ret:
83
+ frame = cv2.flip(frame, 1)
84
+ if recording:
85
+ out.write(frame)
86
+
87
+ frame = cv2.resize(frame, (350, 250))
88
+ frame = pygame.surfarray.make_surface(cv2.rotate(frame, cv2.ROTATE_90_COUNTERCLOCKWISE))
89
+ screen.blit(frame, (10, 10))
90
+
91
+ # Vẽ nút bấm với hiệu ứng
92
+ start_button = draw_button("RECORD", (800, 100), (0, 200, 0), recording)
93
+ stop_button = draw_button("STOP", (800, 200), (200, 0, 0))
94
+
95
+ # Hiển thị trạng thái quay
96
+ if recording:
97
+ frame_count += 1
98
+ if frame_count % 30 < 15: # Hiệu ứng nhấp nháy
99
+ blink = not blink
100
+
101
+ status_color = (255, 0, 0) if blink else (200, 0, 0)
102
+ pygame.draw.circle(screen, status_color, (screen_width - 50, 50), 15)
103
+ status_text = font.render("Recording...", True, (255, 0, 0))
104
+ screen.blit(status_text, (screen_width - 200, 40))
105
+
106
+ pygame.display.flip()
107
+
108
+ # Giải phóng tài nguyên
109
+ if recording:
110
+ out.release()
111
+ camera.release()
112
+ pygame.quit()
a_utils_func_2_model.py ADDED
@@ -0,0 +1,133 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import csv
2
+ import pandas as pd
3
+ from statistics import mode
4
+ from setting_AI import *
5
+ from a_control_classification import USE_CLASSIFICATION
6
+
7
+ csv_path = "dataCSV/direction_control.csv"
8
+ csv_mask_path = "dataCSV/direction_control_mask.csv"
9
+ csv_straight_path = "dataCSV/direction_straight.csv"
10
+ csv_back_control_path = "dataCSV/back_control.csv"
11
+ csv_classification_path = "dataCSV/classification.csv"
12
+
13
+ def ADD_DATA_CSV_CLASSIFICATION(direction):
14
+
15
+ with open(csv_classification_path, mode='a', newline='', encoding='utf-8') as csvfile:
16
+ writer = csv.writer(csvfile)
17
+ writer.writerow([direction])
18
+
19
+ data_csv = pd.read_csv(csv_classification_path)
20
+
21
+ if len(data_csv) == 10000:
22
+ file_start = pd.read_csv(csv_classification_path, nrows=0)
23
+ file_start_new = pd.DataFrame(columns=file_start.columns)
24
+ file_start_new.to_csv(csv_classification_path, index=False)
25
+
26
+ def CLEAN_DATA_CSV_CLASSIFICATION():
27
+ file_start = pd.read_csv(csv_classification_path, nrows=0)
28
+ file_start_new = pd.DataFrame(columns=file_start.columns)
29
+ file_start_new.to_csv(csv_classification_path, index=False)
30
+
31
+ file_start = pd.read_csv(csv_classification_path, nrows=0)
32
+ file_start_new = pd.DataFrame(columns=file_start.columns)
33
+ file_start_new.to_csv(csv_classification_path, index=False)
34
+
35
+ def CHECK_CSV_CLASSIFICATION():
36
+ data_csv = pd.read_csv(csv_classification_path)
37
+ direction_list_to_mode = list(data_csv['direction'][-THRESHOLD_CLASSIFICATION:])
38
+ direction_mode = mode(direction_list_to_mode)
39
+ USE_CLASSIFICATION.change(direction_mode)
40
+ if direction_mode != USE_CLASSIFICATION.check():
41
+ CLEAN_DATA_CSV_CLASSIFICATION()
42
+
43
+
44
+ def ADD_DATA_CSV_MASK_DIRECTION(direction, angle):
45
+ with open(csv_mask_path, mode='a', newline='', encoding='utf-8') as csvfile:
46
+ writer = csv.writer(csvfile)
47
+ writer.writerow([direction, angle])
48
+
49
+ data_csv = pd.read_csv(csv_mask_path)
50
+
51
+ if len(data_csv) == 10000:
52
+ file_start = pd.read_csv(csv_mask_path, nrows=0)
53
+ file_start_new = pd.DataFrame(columns=file_start.columns)
54
+ file_start_new.to_csv(csv_mask_path, index=False)
55
+
56
+ def ADD_DATA_CSV_DIRECTION(direction, angle):
57
+ with open(csv_path, mode='a', newline='', encoding='utf-8') as csvfile:
58
+ writer = csv.writer(csvfile)
59
+ writer.writerow([direction, angle])
60
+
61
+ def ADD_DATA_CSV_DIRECTION_STRAIGHT(direction, angle):
62
+ with open(csv_straight_path, mode='a', newline='', encoding='utf-8') as csvfile:
63
+ writer = csv.writer(csvfile)
64
+ writer.writerow([direction, angle])
65
+
66
+ data_csv = pd.read_csv(csv_straight_path)
67
+ if len(data_csv) == 500:
68
+ CLEAN_DATA_CSV_DIRECTION_STRAIGHT()
69
+
70
+ def CLEAN_DATA_CSV_DIRECTION():
71
+ # Clear "direction_control.csv"
72
+ file_start = pd.read_csv(csv_path, nrows=0)
73
+ file_start_new = pd.DataFrame(columns=file_start.columns)
74
+ file_start_new.to_csv(csv_path, index=False)
75
+
76
+ # Clear "direction_control_mask.csv"
77
+ file_start = pd.read_csv(csv_mask_path, nrows=0)
78
+ file_start_new = pd.DataFrame(columns=file_start.columns)
79
+ file_start_new.to_csv(csv_mask_path, index=False)
80
+
81
+ def ADD_DATA_CSV_BACK_CONTROL(direction, angle):
82
+ with open(csv_back_control_path, mode='a', newline='', encoding='utf-8') as csvfile:
83
+ writer = csv.writer(csvfile)
84
+ writer.writerow([direction, angle])
85
+
86
+ def CLEAN_DATA_CSV_BACK_CONTROL():
87
+ # Clear "back_control.csv"
88
+ file_start = pd.read_csv(csv_back_control_path, nrows=0)
89
+ file_start_new = pd.DataFrame(columns=file_start.columns)
90
+ file_start_new.to_csv(csv_back_control_path, index=False)
91
+
92
+ def CLEAN_DATA_CSV_DIRECTION_STRAIGHT():
93
+ # Clear "direction_control.csv"
94
+ file_start = pd.read_csv(csv_straight_path, nrows=0)
95
+ file_start_new = pd.DataFrame(columns=file_start.columns)
96
+ file_start_new.to_csv(csv_straight_path, index=False)
97
+
98
+ def BOTTOM_DATA_CSV_CHECK():
99
+ data_csv_ = pd.read_csv(csv_path)
100
+ last_row = data_csv_.iloc[-1]
101
+ return (last_row["direction"], last_row["angle"])
102
+
103
+
104
+
105
+ def CHECK_PUSH():
106
+ push_variable = None
107
+ dr_back, an_back = None, None
108
+ data_csv_ = pd.read_csv(csv_mask_path)
109
+ direction_list_to_mode = list(data_csv_['direction'][-count_control:])
110
+ if len(direction_list_to_mode) > 0:
111
+ direction_mode = mode(direction_list_to_mode)
112
+ max_angle = max(list(data_csv_['angle'][:count_control]))
113
+ if len(pd.read_csv(csv_path)) == 0:
114
+ dr_back, an_back = direction_mode, max_angle
115
+ ADD_DATA_CSV_DIRECTION(direction_mode, max_angle)
116
+ # ADD_DATA_CSV_BACK_CONTROL(direction_mode, max_angle)
117
+ return f"{direction_mode}:{max_angle:03d}", dr_back, an_back
118
+ else:
119
+ bottom_data_csv_check = BOTTOM_DATA_CSV_CHECK()
120
+ if bottom_data_csv_check[0] != direction_mode or (abs(bottom_data_csv_check[1] - max_angle) >= threshold_scale):
121
+ CLEAN_DATA_CSV_DIRECTION()
122
+ # ADD_DATA_CSV_DIRECTION(direction_mode, max_angle)
123
+ dr_back, an_back = direction_mode, max_angle
124
+ return f"{direction_mode}:{max_angle:03d}", dr_back, an_back
125
+ else:
126
+ return push_variable, dr_back, an_back
127
+
128
+ return push_variable, dr_back, an_back
129
+
130
+
131
+
132
+
133
+
app.py ADDED
@@ -0,0 +1,222 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pygame
2
+ import cv2
3
+ import time
4
+ import serial
5
+ import sys
6
+
7
+ from ultrafast.inference_onnx import AI_TRT
8
+ from classification.inference_onnx import inference_classification
9
+ from setting_AI import *
10
+ from a_utils_func_2_model import (
11
+ CLEAN_DATA_CSV_DIRECTION,
12
+ CLEAN_DATA_CSV_DIRECTION_STRAIGHT,
13
+ )
14
+ from a_control_classification import USE_CLASSIFICATION
15
+
16
+
17
+
18
+ serial_p = False
19
+ if serial_p:
20
+ serial_port = serial.Serial(
21
+ "COM8", 9600, serial.EIGHTBITS, serial.PARITY_NONE, serial.STOPBITS_ONE
22
+ )
23
+
24
+ # Initialize camera
25
+ cap = cv2.VideoCapture("./videos/test_video.mp4")
26
+ cap_ = cv2.VideoCapture("./videos/test_video.mp4")
27
+ if serial_p:
28
+ cap = cv2.VideoCapture(0, cv2.CAP_DSHOW)
29
+ cap_ = cv2.VideoCapture(1, cv2.CAP_DSHOW)
30
+
31
+ pygame.init()
32
+
33
+ # Screen settings
34
+ screen_width, screen_height = 1600, 900
35
+ screen = pygame.display.set_mode((screen_width, screen_height))
36
+ pygame.display.set_caption("AI Camera Control")
37
+
38
+ # Colors
39
+ WHITE = (240, 240, 240)
40
+ GREEN = (34, 177, 76)
41
+ RED = (200, 50, 50)
42
+ BLACK = (20, 20, 20)
43
+ GRAY = (180, 180, 180)
44
+ DARK_GRAY = (100, 100, 100)
45
+
46
+ # Fonts
47
+ font = pygame.font.Font(None, 50)
48
+ small_font = pygame.font.Font(None, 36)
49
+
50
+ # Buttons
51
+ start_button = pygame.Rect(100, 820, 220, 70)
52
+ end_button = pygame.Rect(400, 820, 220, 70)
53
+
54
+ # Slider settings
55
+ ROTATION_SPEED = 10
56
+ slider_rect = pygame.Rect(750, 850, 300, 10)
57
+ slider_knob_rect = pygame.Rect(750 + int((ROTATION_SPEED / 30) * 300) - 10, 840, 20, 30)
58
+ slider_dragging = False
59
+
60
+
61
+ time.sleep(1)
62
+
63
+ time_stop = sys.maxsize
64
+ sleep_time = sys.maxsize
65
+ running = True
66
+ active = False
67
+ clear = True
68
+ push_results = []
69
+
70
+ while running:
71
+ start_time = time.time()
72
+ screen.fill(WHITE)
73
+ pygame.draw.rect(screen, DARK_GRAY, (0, 800, screen_width, 100))
74
+
75
+ for event in pygame.event.get():
76
+ if event.type == pygame.QUIT:
77
+ running = False
78
+ elif event.type == pygame.MOUSEBUTTONDOWN:
79
+ if start_button.collidepoint(event.pos):
80
+ active = True
81
+ clear = True
82
+ elif end_button.collidepoint(event.pos):
83
+ active = False
84
+ print("Stopped pushing")
85
+ elif slider_knob_rect.collidepoint(event.pos):
86
+ slider_dragging = True
87
+ elif event.type == pygame.MOUSEBUTTONUP:
88
+ slider_dragging = False
89
+ elif event.type == pygame.MOUSEMOTION and slider_dragging:
90
+ slider_knob_rect.x = max(
91
+ slider_rect.x, min(event.pos[0] - 10, slider_rect.x + 300 - 20)
92
+ )
93
+ ROTATION_SPEED = int(((slider_knob_rect.x - slider_rect.x) / 300) * 50)
94
+
95
+ _, frame = cap.read()
96
+ _, frame_ = cap_.read()
97
+ frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
98
+ visualization_img = frame
99
+
100
+ inference_classification(frame_)
101
+
102
+ if active:
103
+ if USE_CLASSIFICATION.check() == "STRAIGHT":
104
+ if clear:
105
+ CLEAN_DATA_CSV_DIRECTION()
106
+ CLEAN_DATA_CSV_DIRECTION_STRAIGHT()
107
+ clear = False
108
+ visualization_img, PUSH_RETURN, Have_lane = AI_TRT(
109
+ frame, paint=True, resize_img=True
110
+ )
111
+ if PUSH_RETURN:
112
+ if serial_p:
113
+ serial_port.write(PUSH_RETURN.encode())
114
+ push_results.append(PUSH_RETURN)
115
+ if len(push_results) > 5:
116
+ push_results.pop(0)
117
+ angle = min(30, int(PUSH_RETURN.split(":")[1]))
118
+ sleep_time = angle / ROTATION_SPEED
119
+ time_stop = time.time()
120
+
121
+ if time.time() - time_stop >= sleep_time:
122
+ if serial_p:
123
+ serial_port.write(PUSH_STOP.encode())
124
+ push_results.append(PUSH_STOP)
125
+ if len(push_results) > 5:
126
+ push_results.pop(0)
127
+ time_stop = sys.maxsize
128
+
129
+ else:
130
+
131
+ hard_left = "X:000"
132
+ hard_right = "Y:000"
133
+
134
+ hard_time_1 = 0.5
135
+ hard_time_2 = 1.5
136
+ hard_time_3 = 0.5
137
+
138
+ if USE_CLASSIFICATION.check() == "LEFT":
139
+ serial_port.write(hard_right.encode())
140
+ print(hard_right)
141
+ time.sleep(0.5)
142
+ serial_port.write(PUSH_STOP.encode())
143
+ print(PUSH_STOP)
144
+
145
+ serial_port.write(hard_left.encode())
146
+ print(hard_left)
147
+ time.sleep(1.5)
148
+ serial_port.write(PUSH_STOP.encode())
149
+ print(PUSH_STOP)
150
+
151
+ serial_port.write(hard_right.encode())
152
+ print(hard_right)
153
+ time.sleep(0.5)
154
+ serial_port.write(PUSH_STOP.encode())
155
+ print(PUSH_STOP)
156
+
157
+ USE_CLASSIFICATION.change("STRAIGHT")
158
+
159
+ if USE_CLASSIFICATION.check() == "LEFT":
160
+ serial_port.write(hard_left.encode())
161
+ print(hard_left)
162
+ time.sleep(0.5)
163
+ serial_port.write(PUSH_STOP.encode())
164
+ print(PUSH_STOP)
165
+
166
+ serial_port.write(hard_right.encode())
167
+ print(hard_right)
168
+ time.sleep(1.5)
169
+ serial_port.write(PUSH_STOP.encode())
170
+ print(PUSH_STOP)
171
+
172
+ serial_port.write(hard_left.encode())
173
+ print(hard_left)
174
+ time.sleep(0.5)
175
+ serial_port.write(PUSH_STOP.encode())
176
+ print(PUSH_STOP)
177
+
178
+ USE_CLASSIFICATION.change("STRAIGHT")
179
+
180
+ text_cls = font.render(USE_CLASSIFICATION.check(), True, (0, 0, 255))
181
+ text_rect = text_cls.get_rect(center=(900, 200))
182
+ screen.blit(text_cls, text_rect)
183
+
184
+ elapsed_time = time.time() - start_time
185
+ fps = 1 / elapsed_time if elapsed_time > 0 else 0
186
+
187
+ text_fps = font.render(f"FPS: {fps:.2f}", True, (0, 0, 255))
188
+ text_rect = text_fps.get_rect(center=(900, 250))
189
+ screen.blit(text_fps, text_rect)
190
+
191
+ visualization_img = cv2.resize(
192
+ visualization_img,
193
+ (visualization_img.shape[1] // 2, visualization_img.shape[0] // 2),
194
+ )
195
+
196
+ pygame_frame = pygame.surfarray.make_surface(
197
+ cv2.rotate(cv2.flip(visualization_img, 1), cv2.ROTATE_90_COUNTERCLOCKWISE)
198
+ )
199
+ screen.blit(pygame_frame, (10, 10))
200
+
201
+ # Buttons
202
+ pygame.draw.rect(screen, GREEN if active else GRAY, start_button, border_radius=15)
203
+ pygame.draw.rect(screen, RED, end_button, border_radius=15)
204
+ screen.blit(
205
+ font.render("Start", True, WHITE), (start_button.x + 70, start_button.y + 20)
206
+ )
207
+ screen.blit(font.render("End", True, WHITE), (end_button.x + 80, end_button.y + 20))
208
+
209
+ # Slider
210
+ pygame.draw.rect(screen, GRAY, slider_rect, border_radius=5)
211
+ pygame.draw.ellipse(screen, BLACK, slider_knob_rect)
212
+ screen.blit(font.render(f"Speed: {ROTATION_SPEED}", True, WHITE), (1080, 820))
213
+
214
+ # Display push results
215
+ for i, result in enumerate(reversed(push_results)):
216
+ screen.blit(small_font.render(result, True, BLACK), (1200, 600 - i * 40))
217
+
218
+ pygame.display.flip()
219
+
220
+ cap.release()
221
+ cv2.destroyAllWindows()
222
+ pygame.quit()
classification/__init__.py ADDED
@@ -0,0 +1,97 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # import tensorrt as trt
2
+ # import pycuda.driver as cuda
3
+
4
+ # class HostDeviceMem(object):
5
+ # def __init__(self, host_mem, device_mem) -> None:
6
+ # self.host = host_mem
7
+ # self.device = device_mem
8
+
9
+
10
+ # def __str__(self) -> str:
11
+ # return "Host:\n" + str(self.host) + "\nDevice:\n" + str(self.device)
12
+
13
+ # def __repr__(self):
14
+ # return self.__str__()
15
+
16
+ # class TensorrtBase:
17
+ # def __init__(self, engine_file_path, input_names, output_names, *, gpu_id=0, dynamic_factor=1, max_batch_size=1) -> None:
18
+ # self.input_names = input_names
19
+ # self.output_names = output_names
20
+ # self.trt_logger = trt.Logger(trt.Logger.WARNING)
21
+ # self.cuda_ctx = cuda.Device(gpu_id).make_context()
22
+ # self.max_batch_size = max_batch_size
23
+ # self.engine = self._load_engine(engine_file_path)
24
+ # self.binding_names = self.input_names + self.output_names
25
+ # self.context = self.engine.create_execution_context()
26
+ # self.buffers = self._allocate_buffer(dynamic_factor)
27
+
28
+
29
+ # def _load_engine(self, engine_file_path):
30
+ # # Force init TensorRT plugins
31
+ # trt.init_libnvinfer_plugins(None, '')
32
+ # with open(engine_file_path, "rb") as f, \
33
+ # trt.Runtime(self.trt_logger) as runtime:
34
+ # engine = runtime.deserialize_cuda_engine(f.read())
35
+ # return engine
36
+
37
+
38
+ # def _allocate_buffer(self, dynamic_factor):
39
+ # """Allocate buffer
40
+ # :dynamic_factor: normally expand the buffer size for dynamic shape
41
+ # """
42
+ # inputs = []
43
+ # outputs = []
44
+ # bindings = [None] * len(self.binding_names)
45
+ # stream = cuda.Stream()
46
+ # for binding in self.binding_names:
47
+ # binding_idx = self.engine[binding]
48
+ # if binding_idx == -1:
49
+ # print("❌ Binding Names!")
50
+ # continue
51
+
52
+ # # trt.volume() return negtive volue if -1 in shape
53
+ # size = abs(trt.volume(self.engine.get_binding_shape(binding))) * \
54
+ # self.max_batch_size * dynamic_factor
55
+ # dtype = trt.nptype(self.engine.get_binding_dtype(binding))
56
+ # # Allocate host and device buffers
57
+ # host_mem = cuda.pagelocked_empty(size, dtype)
58
+ # device_mem = cuda.mem_alloc(host_mem.nbytes)
59
+ # # Append the device buffer to device bindings.
60
+ # bindings[binding_idx] = int(device_mem)
61
+ # # Append to the appropriate list.
62
+ # if self.engine.binding_is_input(binding):
63
+ # inputs.append(HostDeviceMem(host_mem, device_mem))
64
+ # else:
65
+ # outputs.append(HostDeviceMem(host_mem, device_mem))
66
+ # return inputs, outputs, bindings, stream
67
+
68
+ # # def do_inference(self, inf_in_list, *, binding_shape_map=None):
69
+ # # """Main function for inference
70
+ # # :inf_in_list: input list.
71
+ # # :binding_shape_map: {<binding_name>: <shape>}, leave it to None for fixed shape
72
+ # # """
73
+ # # inputs, outputs, bindings, stream = self.buffers
74
+ # # if binding_shape_map:
75
+ # # self.context.active_optimization_profile = 0
76
+ # # for binding_name, shape in binding_shape_map.items():
77
+ # # binding_idx = self.engine[binding_name]
78
+ # # self.context.set_binding_shape(binding_idx, shape)
79
+ # # # transfer input data to device
80
+ # # for i in range(len(inputs)):
81
+ # # inputs[i].host = inf_in_list[i]
82
+ # # cuda.memcpy_htod_async(inputs[i].device, inputs[i].host, stream)
83
+ # # # do inference
84
+ # # # context.profiler = trt.Profiler()
85
+ # # self.context.execute_async_v2(bindings=bindings,
86
+ # # stream_handle=stream.handle)
87
+ # # # copy data from device to host
88
+ # # for i in range(len(outputs)):
89
+ # # cuda.memcpy_dtoh_async(outputs[i].host, outputs[i].device, stream)
90
+
91
+ # # stream.synchronize()
92
+ # # trt_outputs = [out.host.copy() for out in outputs]
93
+ # # return trt_outputs
94
+
95
+ # def __del__(self):
96
+ # self.cuda_ctx.pop()
97
+ # del self.cuda_ctx
classification/__pycache__/INFER_TRT_CLASSIFICATION.cpython-39.pyc ADDED
Binary file (2.26 kB). View file
 
classification/__pycache__/__init__.cpython-311.pyc ADDED
Binary file (193 Bytes). View file
 
classification/__pycache__/__init__.cpython-39.pyc ADDED
Binary file (2.85 kB). View file
 
classification/__pycache__/inference_onnx.cpython-311.pyc ADDED
Binary file (8.74 kB). View file
 
classification/inference_onnx.py ADDED
@@ -0,0 +1,215 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import onnxruntime as ort
2
+ import cv2
3
+ import numpy as np
4
+ from numpy.typing import NDArray
5
+ import os
6
+ from a_utils_func_2_model import (
7
+ CLEAN_DATA_CSV_DIRECTION,
8
+ ADD_DATA_CSV_MASK_DIRECTION,
9
+ ADD_DATA_CSV_DIRECTION_STRAIGHT,
10
+ CLEAN_DATA_CSV_DIRECTION_STRAIGHT,
11
+ CHECK_PUSH,
12
+ ADD_DATA_CSV_CLASSIFICATION,
13
+ CHECK_CSV_CLASSIFICATION,
14
+ CLEAN_DATA_CSV_CLASSIFICATION,
15
+ )
16
+
17
+
18
+ def load_model(model_path: str):
19
+ """
20
+ Load ONNX model for inference with appropriate execution provider.
21
+
22
+ Args:
23
+ model_path: Path to the ONNX model file
24
+
25
+ Returns:
26
+ ONNX Runtime InferenceSession
27
+
28
+ Raises:
29
+ FileNotFoundError: If model file doesn't exist
30
+ RuntimeError: If model loading fails
31
+ """
32
+ if not os.path.exists(model_path):
33
+ raise FileNotFoundError(f"Model file not found: {model_path}")
34
+
35
+ try:
36
+ # Try CPU provider first
37
+ providers = ["CPUExecutionProvider"]
38
+ session = ort.InferenceSession(model_path, providers=providers)
39
+ return session
40
+
41
+ except Exception as e:
42
+ raise RuntimeError(f"Failed to load model: {str(e)}")
43
+
44
+
45
+ dirname = os.path.dirname(__file__)
46
+ session = load_model(os.path.join(dirname, "./model/model_16.onnx"))
47
+
48
+
49
+ def prepare_input(image: NDArray[np.uint8]) -> NDArray[np.float16]:
50
+ """
51
+ Prepare image input for model inference.
52
+
53
+ Args:
54
+ image: Input image in BGR format with shape (H, W, 3)
55
+
56
+ Returns:
57
+ Preprocessed image as float16 array with shape (1, 3, H, W)
58
+ """
59
+ img = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
60
+ img = cv2.resize(img, (224, 224)).astype(np.float16)
61
+
62
+ # Normalize pixel values to range [-1, 1]
63
+ mean = np.array([0.485, 0.456, 0.406], dtype=np.float16)
64
+ std = np.array([0.229, 0.224, 0.225], dtype=np.float16)
65
+ img = (img / 255.0 - mean) / std
66
+
67
+ # Convert to (1, 3, H, W) format
68
+ img = img.transpose(2, 0, 1)
69
+ img = np.expand_dims(img, axis=0)
70
+
71
+ return img.astype(np.float16)
72
+
73
+
74
+ def softmax(x):
75
+ """Apply softmax function to numpy array."""
76
+ exp_x = np.exp(x - np.max(x)) # Subtract max for numerical stability
77
+ return exp_x / exp_x.sum()
78
+
79
+
80
+ classes = ["LEFT", "RIGHT", "STRAIGHT"]
81
+
82
+
83
+ def inference(image: NDArray[np.uint8]) -> tuple[int, float, NDArray[np.float16]]:
84
+ """
85
+ Run inference on an image and return class prediction with probabilities.
86
+
87
+ Args:
88
+ session: ONNX runtime session
89
+ image: Input image in BGR format
90
+
91
+ Returns:
92
+ tuple containing:
93
+ - predicted class index (int)
94
+ - confidence score (float)
95
+ - probability distribution (numpy array)
96
+ """
97
+ input_tensor = prepare_input(image)
98
+ input_name = session.get_inputs()[0].name
99
+ output_name = session.get_outputs()[0].name
100
+ output = session.run([output_name], {input_name: input_tensor})[0]
101
+
102
+ # Apply softmax to get probabilities
103
+ probabilities = softmax(output[0])
104
+ predicted_class = classes[np.argmax(probabilities)]
105
+ confidence = np.max(probabilities)
106
+
107
+ # max_value = probabilities[max_index]
108
+
109
+ return predicted_class, confidence
110
+
111
+
112
+ def process_video(
113
+ video_path: str, session, output_path: str = None, display: bool = True
114
+ ):
115
+ """
116
+ Process video file and perform inference on each frame.
117
+
118
+ Args:
119
+ video_path: Path to input video file
120
+ session: ONNX runtime session
121
+ output_path: Path to save output video (optional)
122
+ display: Whether to display video while processing
123
+ """
124
+ cap = cv2.VideoCapture(video_path)
125
+ if not cap.isOpened():
126
+ raise ValueError("Error opening video file")
127
+
128
+ # Get video properties
129
+ frame_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
130
+ frame_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
131
+ fps = int(cap.get(cv2.CAP_PROP_FPS))
132
+
133
+ # Initialize video writer if output path is specified
134
+ writer = None
135
+ if output_path:
136
+ fourcc = cv2.VideoWriter_fourcc(*"mp4v")
137
+ writer = cv2.VideoWriter(output_path, fourcc, fps, (frame_width, frame_height))
138
+
139
+ classes = ["left", "right", "straight"]
140
+
141
+ try:
142
+ while cap.isOpened():
143
+ ret, frame = cap.read()
144
+ if not ret:
145
+ break
146
+
147
+ # Perform inference
148
+ max_index, confidence, probs = inference(session, frame)
149
+
150
+ # Draw prediction on frame
151
+ text = f"{classes[max_index]}: {confidence:.2f}"
152
+ cv2.putText(
153
+ frame,
154
+ text,
155
+ (50, 50),
156
+ cv2.FONT_HERSHEY_SIMPLEX,
157
+ 1,
158
+ (0, 255, 0),
159
+ 2,
160
+ cv2.LINE_AA,
161
+ )
162
+
163
+ if display:
164
+ cv2.imshow("Video Processing", frame)
165
+ if cv2.waitKey(1) & 0xFF == ord("q"):
166
+ break
167
+
168
+ if writer:
169
+ writer.write(frame)
170
+
171
+ finally:
172
+ cap.release()
173
+ if writer:
174
+ writer.release()
175
+ if display:
176
+ cv2.destroyAllWindows()
177
+
178
+
179
+ def inference_classification(image):
180
+
181
+ predicted_class, probabilities = inference(image)
182
+ print(f"Predicted Class: {predicted_class}, Probabilities: {probabilities}")
183
+ ADD_DATA_CSV_CLASSIFICATION(predicted_class)
184
+ CHECK_CSV_CLASSIFICATION()
185
+
186
+
187
+ if __name__ == "__main__":
188
+ model_path = "./model/model_16.onnx"
189
+ image_path = "../images/1.png"
190
+
191
+ # Load model
192
+ # session = load_model(model_path)
193
+
194
+ # # Load and preprocess image
195
+ image = cv2.imread(image_path)
196
+
197
+ # # Perform inference
198
+ predicted_class, probabilities = inference(image)
199
+
200
+ print(f"Predicted Class: {predicted_class}, Confidence: {probabilities}")
201
+
202
+ # video_path = "./data/IMG_2478.MOV" # Replace with your video path
203
+ # # output_path = "./output_video.mp4" # Optional output path
204
+
205
+ # try:
206
+ # process_video(
207
+ # video_path,
208
+ # session,
209
+ # )
210
+ # except Exception as e:
211
+ # print(f"Error processing video: {str(e)}")
212
+
213
+ # 0 left
214
+ # 1 right
215
+ # 2 straight
classification/model/model_16.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:07a3bc4ae79eea7ecc3c03c2029636f9cf67e0b90bf671b2453a04f9048c184f
3
+ size 22359724
convertONNX2RT.py ADDED
@@ -0,0 +1,137 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import tensorrt as trt
3
+ TRT_LOGGER = trt.Logger(trt.Logger.WARNING)
4
+
5
+ def initialize_builder(use_fp16=False, workspace_size=(1 << 31)): # 2GB expressed using bit shift
6
+ """
7
+ Khởi tạo và cấu hình builder cho TensorRT.
8
+
9
+ Args:
10
+ use_fp16 (bool): Sử dụng FP16 nếu có hỗ trợ và được yêu cầu.
11
+ workspace_size (int): Kích thước workspace tối đa cho builder.
12
+
13
+ Returns:
14
+ Tuple[trt.Builder, trt.BuilderConfig]: Trả về builder và cấu hình builder.
15
+ """
16
+ builder = trt.Builder(TRT_LOGGER)
17
+ config = builder.create_builder_config()
18
+ config.set_tactic_sources(trt.TacticSource.CUBLAS_LT)
19
+ config.max_workspace_size = workspace_size # 2GB using bit shift
20
+
21
+ if builder.platform_has_fast_fp16 and use_fp16:
22
+ config.set_flag(trt.BuilderFlag.FP16)
23
+
24
+ return builder, config
25
+
26
+ def parse_onnx_model(builder, onnx_file_path):
27
+ """
28
+ Phân tích mô hình ONNX và tạo network trong TensorRT.
29
+
30
+ Args:
31
+ builder (trt.Builder): Builder TensorRT.
32
+ onnx_file_path (str): Đường dẫn tới file mô hình ONNX.
33
+
34
+ Returns:
35
+ trt.INetworkDefinition: Trả về network TensorRT.
36
+ """
37
+ network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH))
38
+ parser = trt.OnnxParser(network, TRT_LOGGER )
39
+
40
+ with open(onnx_file_path, 'rb') as model:
41
+ if not parser.parse(model.read()):
42
+ print('❌ Failed to parse the ONNX file.')
43
+ for error in range(parser.num_errors):
44
+ print(parser.get_error(error))
45
+ return None
46
+ print("✅ Completed parsing ONNX file")
47
+ return network
48
+
49
+ def parse_onnx_model_static(builder, onnx_file_path, batch_size=2):
50
+ """
51
+ Phân tích mô hình ONNX và tạo network trong TensorRT với kích thước batch cố định.
52
+
53
+ Args:
54
+ builder (trt.Builder): Builder TensorRT.
55
+ onnx_file_path (str): Đường dẫn tới file mô hình ONNX.
56
+ batch_size (int): Kích thước batch cố định.
57
+
58
+ Returns:
59
+ trt.INetworkDefinition: Trả về network TensorRT.
60
+ """
61
+ network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH))
62
+ parser = trt.OnnxParser(network, TRT_LOGGER)
63
+
64
+ with open(onnx_file_path, 'rb') as model:
65
+ if not parser.parse(model.read()):
66
+ print('❌ Failed to parse the ONNX file.')
67
+ for error in range(parser.num_errors):
68
+ print(parser.get_error(error))
69
+ return None
70
+ print("✅ Completed parsing ONNX file")
71
+
72
+ # Thiết lập kích thước batch cố định cho tất cả các input
73
+ for i in range(network.num_inputs):
74
+ shape = list(network.get_input(i).shape)
75
+ shape[0] = batch_size
76
+ network.get_input(i).shape = shape
77
+
78
+ return network
79
+
80
+ def set_dynamic_shapes(builder, config, dynamic_shapes):
81
+ """
82
+ Thiết lập các kích thước động cho mô hình.
83
+
84
+ Args:
85
+ builder (trt.Builder): Builder TensorRT.
86
+ network (trt.INetworkDefinition): Network TensorRT.
87
+ config (trt.BuilderConfig): Cấu hình builder.
88
+ dynamic_shapes (dict): Từ điển các kích thước động cho mô hình.
89
+ """
90
+ if dynamic_shapes:
91
+ print(f"===> Using dynamic shapes: {str(dynamic_shapes)}")
92
+ profile = builder.create_optimization_profile()
93
+
94
+ for binding_name, dynamic_shape in dynamic_shapes.items():
95
+ min_shape, opt_shape, max_shape = dynamic_shape
96
+ profile.set_shape(binding_name, min_shape, opt_shape, max_shape)
97
+
98
+ config.add_optimization_profile(profile)
99
+
100
+ def build_and_save_engine(builder, network, config, engine_file_path):
101
+ """
102
+ Xây dựng và lưu engine TensorRT.
103
+
104
+ Args:
105
+ builder (trt.Builder): Builder TensorRT.
106
+ network (trt.INetworkDefinition): Network TensorRT.
107
+ config (trt.BuilderConfig): Cấu hình builder.
108
+ engine_file_path (str): Đường dẫn để lưu engine.
109
+ """
110
+ if os.path.isfile(engine_file_path):
111
+ try:
112
+ os.remove(engine_file_path)
113
+ except Exception as e:
114
+ print(f"Cannot remove existing file: {engine_file_path}. Error: {e}")
115
+
116
+ print("Creating TensorRT Engine...")
117
+ serialized_engine = builder.build_serialized_network(network, config)
118
+ if serialized_engine:
119
+ with open(engine_file_path, "wb") as f:
120
+ f.write(serialized_engine)
121
+ print(f"===> Serialized Engine Saved at: {engine_file_path}")
122
+ else:
123
+ print("❌ Failed to build engine")
124
+
125
+ # Fix batch_size
126
+ def main_fixed():
127
+ batch_size = 1
128
+ onnx_file_path = "models/tusimple_18.onnx"
129
+ engine_file_path = "models/tusimple_18_FP16.trt"
130
+
131
+ builder, config = initialize_builder(use_fp16=True)
132
+ network = parse_onnx_model_static(builder, onnx_file_path, batch_size=batch_size)
133
+ if network:
134
+ build_and_save_engine(builder, network, config, engine_file_path)
135
+
136
+ if __name__ == "__main__":
137
+ main_fixed()
data.json ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "state_1": [
3
+ ["S", 10],
4
+ ["X", 0.5],
5
+ ["x", 0],
6
+ ["Y", 1.5],
7
+ ["x", 0],
8
+ ["X", 0.5]
9
+ ],
10
+ "state_2": [
11
+ ["S", 10],
12
+ ["X", 0.5],
13
+ ["x", 0],
14
+ ["Y", 1.5],
15
+ ["x", 0],
16
+ ["X", 0.5]
17
+ ]
18
+ }
19
+
dataCSV/back_control.csv ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ direction,angle
2
+ X,14
3
+ Y,6
4
+ Y,6
5
+ Y,16
6
+ Y,5
7
+ Y,5
8
+ Y,5
9
+ Y,29
10
+ Y,38
11
+ Y,47
12
+ X,6
13
+ Y,12
14
+ X,5
15
+ X,7
16
+ Y,7
17
+ Y,19
18
+ X,5
19
+ X,15
20
+ X,18
21
+ X,29
22
+ Y,37
23
+ Y,39
dataCSV/classification.csv ADDED
@@ -0,0 +1,1735 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ direction
2
+ LEFT
3
+ LEFT
4
+ LEFT
5
+ LEFT
6
+ LEFT
7
+ LEFT
8
+ LEFT
9
+ LEFT
10
+ LEFT
11
+ LEFT
12
+ LEFT
13
+ LEFT
14
+ LEFT
15
+ LEFT
16
+ LEFT
17
+ LEFT
18
+ LEFT
19
+ LEFT
20
+ LEFT
21
+ LEFT
22
+ LEFT
23
+ LEFT
24
+ LEFT
25
+ LEFT
26
+ LEFT
27
+ LEFT
28
+ LEFT
29
+ LEFT
30
+ LEFT
31
+ LEFT
32
+ LEFT
33
+ LEFT
34
+ LEFT
35
+ LEFT
36
+ LEFT
37
+ LEFT
38
+ LEFT
39
+ LEFT
40
+ LEFT
41
+ LEFT
42
+ LEFT
43
+ LEFT
44
+ LEFT
45
+ LEFT
46
+ LEFT
47
+ RIGHT
48
+ RIGHT
49
+ RIGHT
50
+ RIGHT
51
+ RIGHT
52
+ RIGHT
53
+ RIGHT
54
+ RIGHT
55
+ RIGHT
56
+ RIGHT
57
+ RIGHT
58
+ RIGHT
59
+ RIGHT
60
+ RIGHT
61
+ RIGHT
62
+ RIGHT
63
+ RIGHT
64
+ RIGHT
65
+ RIGHT
66
+ LEFT
67
+ LEFT
68
+ RIGHT
69
+ RIGHT
70
+ RIGHT
71
+ RIGHT
72
+ RIGHT
73
+ RIGHT
74
+ RIGHT
75
+ RIGHT
76
+ RIGHT
77
+ RIGHT
78
+ RIGHT
79
+ RIGHT
80
+ RIGHT
81
+ RIGHT
82
+ RIGHT
83
+ RIGHT
84
+ RIGHT
85
+ RIGHT
86
+ RIGHT
87
+ RIGHT
88
+ RIGHT
89
+ RIGHT
90
+ RIGHT
91
+ RIGHT
92
+ RIGHT
93
+ RIGHT
94
+ RIGHT
95
+ RIGHT
96
+ RIGHT
97
+ RIGHT
98
+ RIGHT
99
+ RIGHT
100
+ RIGHT
101
+ RIGHT
102
+ RIGHT
103
+ RIGHT
104
+ RIGHT
105
+ RIGHT
106
+ RIGHT
107
+ RIGHT
108
+ RIGHT
109
+ RIGHT
110
+ RIGHT
111
+ RIGHT
112
+ RIGHT
113
+ RIGHT
114
+ RIGHT
115
+ RIGHT
116
+ RIGHT
117
+ RIGHT
118
+ RIGHT
119
+ RIGHT
120
+ RIGHT
121
+ RIGHT
122
+ RIGHT
123
+ RIGHT
124
+ RIGHT
125
+ RIGHT
126
+ RIGHT
127
+ RIGHT
128
+ RIGHT
129
+ RIGHT
130
+ RIGHT
131
+ RIGHT
132
+ RIGHT
133
+ RIGHT
134
+ RIGHT
135
+ RIGHT
136
+ RIGHT
137
+ RIGHT
138
+ RIGHT
139
+ RIGHT
140
+ RIGHT
141
+ RIGHT
142
+ RIGHT
143
+ RIGHT
144
+ RIGHT
145
+ RIGHT
146
+ RIGHT
147
+ RIGHT
148
+ RIGHT
149
+ RIGHT
150
+ RIGHT
151
+ RIGHT
152
+ RIGHT
153
+ RIGHT
154
+ RIGHT
155
+ RIGHT
156
+ RIGHT
157
+ RIGHT
158
+ RIGHT
159
+ RIGHT
160
+ RIGHT
161
+ RIGHT
162
+ RIGHT
163
+ RIGHT
164
+ RIGHT
165
+ RIGHT
166
+ RIGHT
167
+ RIGHT
168
+ RIGHT
169
+ RIGHT
170
+ RIGHT
171
+ RIGHT
172
+ RIGHT
173
+ RIGHT
174
+ RIGHT
175
+ RIGHT
176
+ RIGHT
177
+ RIGHT
178
+ RIGHT
179
+ RIGHT
180
+ RIGHT
181
+ RIGHT
182
+ RIGHT
183
+ RIGHT
184
+ RIGHT
185
+ RIGHT
186
+ RIGHT
187
+ RIGHT
188
+ RIGHT
189
+ RIGHT
190
+ RIGHT
191
+ RIGHT
192
+ RIGHT
193
+ RIGHT
194
+ STRAIGHT
195
+ STRAIGHT
196
+ STRAIGHT
197
+ STRAIGHT
198
+ STRAIGHT
199
+ STRAIGHT
200
+ STRAIGHT
201
+ STRAIGHT
202
+ STRAIGHT
203
+ STRAIGHT
204
+ STRAIGHT
205
+ STRAIGHT
206
+ STRAIGHT
207
+ STRAIGHT
208
+ STRAIGHT
209
+ STRAIGHT
210
+ STRAIGHT
211
+ LEFT
212
+ STRAIGHT
213
+ LEFT
214
+ STRAIGHT
215
+ LEFT
216
+ RIGHT
217
+ STRAIGHT
218
+ RIGHT
219
+ RIGHT
220
+ RIGHT
221
+ RIGHT
222
+ RIGHT
223
+ RIGHT
224
+ RIGHT
225
+ RIGHT
226
+ RIGHT
227
+ RIGHT
228
+ RIGHT
229
+ RIGHT
230
+ RIGHT
231
+ RIGHT
232
+ RIGHT
233
+ RIGHT
234
+ RIGHT
235
+ RIGHT
236
+ RIGHT
237
+ RIGHT
238
+ RIGHT
239
+ RIGHT
240
+ RIGHT
241
+ RIGHT
242
+ RIGHT
243
+ RIGHT
244
+ RIGHT
245
+ RIGHT
246
+ RIGHT
247
+ RIGHT
248
+ RIGHT
249
+ RIGHT
250
+ RIGHT
251
+ RIGHT
252
+ RIGHT
253
+ RIGHT
254
+ RIGHT
255
+ RIGHT
256
+ RIGHT
257
+ RIGHT
258
+ RIGHT
259
+ RIGHT
260
+ RIGHT
261
+ RIGHT
262
+ RIGHT
263
+ RIGHT
264
+ RIGHT
265
+ RIGHT
266
+ RIGHT
267
+ RIGHT
268
+ RIGHT
269
+ RIGHT
270
+ RIGHT
271
+ STRAIGHT
272
+ LEFT
273
+ RIGHT
274
+ RIGHT
275
+ RIGHT
276
+ RIGHT
277
+ RIGHT
278
+ RIGHT
279
+ RIGHT
280
+ RIGHT
281
+ LEFT
282
+ LEFT
283
+ LEFT
284
+ LEFT
285
+ RIGHT
286
+ RIGHT
287
+ RIGHT
288
+ RIGHT
289
+ RIGHT
290
+ RIGHT
291
+ RIGHT
292
+ RIGHT
293
+ RIGHT
294
+ RIGHT
295
+ RIGHT
296
+ RIGHT
297
+ RIGHT
298
+ RIGHT
299
+ RIGHT
300
+ RIGHT
301
+ RIGHT
302
+ RIGHT
303
+ RIGHT
304
+ RIGHT
305
+ RIGHT
306
+ RIGHT
307
+ RIGHT
308
+ RIGHT
309
+ RIGHT
310
+ RIGHT
311
+ RIGHT
312
+ RIGHT
313
+ RIGHT
314
+ RIGHT
315
+ RIGHT
316
+ RIGHT
317
+ RIGHT
318
+ RIGHT
319
+ RIGHT
320
+ RIGHT
321
+ RIGHT
322
+ RIGHT
323
+ RIGHT
324
+ RIGHT
325
+ RIGHT
326
+ RIGHT
327
+ RIGHT
328
+ RIGHT
329
+ RIGHT
330
+ RIGHT
331
+ RIGHT
332
+ RIGHT
333
+ RIGHT
334
+ RIGHT
335
+ RIGHT
336
+ RIGHT
337
+ RIGHT
338
+ RIGHT
339
+ RIGHT
340
+ RIGHT
341
+ RIGHT
342
+ RIGHT
343
+ RIGHT
344
+ RIGHT
345
+ RIGHT
346
+ RIGHT
347
+ RIGHT
348
+ RIGHT
349
+ STRAIGHT
350
+ STRAIGHT
351
+ STRAIGHT
352
+ STRAIGHT
353
+ STRAIGHT
354
+ STRAIGHT
355
+ STRAIGHT
356
+ STRAIGHT
357
+ STRAIGHT
358
+ STRAIGHT
359
+ RIGHT
360
+ STRAIGHT
361
+ STRAIGHT
362
+ STRAIGHT
363
+ STRAIGHT
364
+ STRAIGHT
365
+ RIGHT
366
+ RIGHT
367
+ RIGHT
368
+ RIGHT
369
+ RIGHT
370
+ RIGHT
371
+ RIGHT
372
+ RIGHT
373
+ RIGHT
374
+ RIGHT
375
+ RIGHT
376
+ LEFT
377
+ LEFT
378
+ RIGHT
379
+ RIGHT
380
+ RIGHT
381
+ RIGHT
382
+ RIGHT
383
+ RIGHT
384
+ RIGHT
385
+ RIGHT
386
+ STRAIGHT
387
+ STRAIGHT
388
+ STRAIGHT
389
+ RIGHT
390
+ RIGHT
391
+ RIGHT
392
+ RIGHT
393
+ RIGHT
394
+ RIGHT
395
+ RIGHT
396
+ STRAIGHT
397
+ RIGHT
398
+ RIGHT
399
+ RIGHT
400
+ RIGHT
401
+ RIGHT
402
+ RIGHT
403
+ RIGHT
404
+ RIGHT
405
+ RIGHT
406
+ RIGHT
407
+ RIGHT
408
+ RIGHT
409
+ RIGHT
410
+ RIGHT
411
+ RIGHT
412
+ RIGHT
413
+ RIGHT
414
+ RIGHT
415
+ RIGHT
416
+ RIGHT
417
+ RIGHT
418
+ RIGHT
419
+ RIGHT
420
+ RIGHT
421
+ RIGHT
422
+ RIGHT
423
+ RIGHT
424
+ RIGHT
425
+ RIGHT
426
+ RIGHT
427
+ RIGHT
428
+ RIGHT
429
+ RIGHT
430
+ RIGHT
431
+ RIGHT
432
+ RIGHT
433
+ RIGHT
434
+ RIGHT
435
+ RIGHT
436
+ RIGHT
437
+ RIGHT
438
+ RIGHT
439
+ RIGHT
440
+ RIGHT
441
+ RIGHT
442
+ RIGHT
443
+ RIGHT
444
+ RIGHT
445
+ STRAIGHT
446
+ RIGHT
447
+ RIGHT
448
+ STRAIGHT
449
+ RIGHT
450
+ RIGHT
451
+ RIGHT
452
+ RIGHT
453
+ STRAIGHT
454
+ STRAIGHT
455
+ STRAIGHT
456
+ LEFT
457
+ STRAIGHT
458
+ STRAIGHT
459
+ STRAIGHT
460
+ STRAIGHT
461
+ RIGHT
462
+ RIGHT
463
+ STRAIGHT
464
+ STRAIGHT
465
+ STRAIGHT
466
+ RIGHT
467
+ RIGHT
468
+ RIGHT
469
+ LEFT
470
+ STRAIGHT
471
+ LEFT
472
+ RIGHT
473
+ RIGHT
474
+ RIGHT
475
+ RIGHT
476
+ RIGHT
477
+ RIGHT
478
+ RIGHT
479
+ RIGHT
480
+ RIGHT
481
+ RIGHT
482
+ RIGHT
483
+ RIGHT
484
+ RIGHT
485
+ RIGHT
486
+ RIGHT
487
+ RIGHT
488
+ RIGHT
489
+ RIGHT
490
+ RIGHT
491
+ RIGHT
492
+ RIGHT
493
+ RIGHT
494
+ RIGHT
495
+ RIGHT
496
+ RIGHT
497
+ RIGHT
498
+ RIGHT
499
+ RIGHT
500
+ RIGHT
501
+ RIGHT
502
+ RIGHT
503
+ RIGHT
504
+ RIGHT
505
+ RIGHT
506
+ RIGHT
507
+ RIGHT
508
+ RIGHT
509
+ RIGHT
510
+ RIGHT
511
+ RIGHT
512
+ STRAIGHT
513
+ STRAIGHT
514
+ STRAIGHT
515
+ STRAIGHT
516
+ STRAIGHT
517
+ STRAIGHT
518
+ STRAIGHT
519
+ STRAIGHT
520
+ STRAIGHT
521
+ RIGHT
522
+ RIGHT
523
+ RIGHT
524
+ STRAIGHT
525
+ STRAIGHT
526
+ STRAIGHT
527
+ STRAIGHT
528
+ STRAIGHT
529
+ STRAIGHT
530
+ STRAIGHT
531
+ STRAIGHT
532
+ STRAIGHT
533
+ STRAIGHT
534
+ STRAIGHT
535
+ STRAIGHT
536
+ LEFT
537
+ LEFT
538
+ LEFT
539
+ LEFT
540
+ LEFT
541
+ LEFT
542
+ LEFT
543
+ LEFT
544
+ LEFT
545
+ LEFT
546
+ LEFT
547
+ LEFT
548
+ RIGHT
549
+ RIGHT
550
+ RIGHT
551
+ RIGHT
552
+ RIGHT
553
+ RIGHT
554
+ RIGHT
555
+ RIGHT
556
+ RIGHT
557
+ RIGHT
558
+ RIGHT
559
+ RIGHT
560
+ RIGHT
561
+ RIGHT
562
+ RIGHT
563
+ RIGHT
564
+ RIGHT
565
+ RIGHT
566
+ RIGHT
567
+ RIGHT
568
+ RIGHT
569
+ RIGHT
570
+ RIGHT
571
+ RIGHT
572
+ RIGHT
573
+ RIGHT
574
+ RIGHT
575
+ RIGHT
576
+ RIGHT
577
+ RIGHT
578
+ RIGHT
579
+ RIGHT
580
+ RIGHT
581
+ RIGHT
582
+ RIGHT
583
+ RIGHT
584
+ RIGHT
585
+ RIGHT
586
+ RIGHT
587
+ RIGHT
588
+ RIGHT
589
+ RIGHT
590
+ RIGHT
591
+ RIGHT
592
+ RIGHT
593
+ RIGHT
594
+ RIGHT
595
+ RIGHT
596
+ RIGHT
597
+ RIGHT
598
+ RIGHT
599
+ RIGHT
600
+ RIGHT
601
+ RIGHT
602
+ RIGHT
603
+ RIGHT
604
+ RIGHT
605
+ RIGHT
606
+ RIGHT
607
+ RIGHT
608
+ RIGHT
609
+ RIGHT
610
+ RIGHT
611
+ RIGHT
612
+ RIGHT
613
+ RIGHT
614
+ RIGHT
615
+ RIGHT
616
+ RIGHT
617
+ RIGHT
618
+ RIGHT
619
+ RIGHT
620
+ RIGHT
621
+ RIGHT
622
+ RIGHT
623
+ RIGHT
624
+ RIGHT
625
+ RIGHT
626
+ RIGHT
627
+ RIGHT
628
+ RIGHT
629
+ RIGHT
630
+ RIGHT
631
+ RIGHT
632
+ RIGHT
633
+ RIGHT
634
+ RIGHT
635
+ RIGHT
636
+ RIGHT
637
+ RIGHT
638
+ RIGHT
639
+ RIGHT
640
+ RIGHT
641
+ RIGHT
642
+ RIGHT
643
+ RIGHT
644
+ RIGHT
645
+ RIGHT
646
+ RIGHT
647
+ RIGHT
648
+ RIGHT
649
+ RIGHT
650
+ RIGHT
651
+ RIGHT
652
+ RIGHT
653
+ RIGHT
654
+ RIGHT
655
+ RIGHT
656
+ RIGHT
657
+ RIGHT
658
+ RIGHT
659
+ RIGHT
660
+ RIGHT
661
+ RIGHT
662
+ RIGHT
663
+ RIGHT
664
+ RIGHT
665
+ RIGHT
666
+ RIGHT
667
+ RIGHT
668
+ RIGHT
669
+ RIGHT
670
+ RIGHT
671
+ RIGHT
672
+ RIGHT
673
+ RIGHT
674
+ RIGHT
675
+ RIGHT
676
+ RIGHT
677
+ RIGHT
678
+ RIGHT
679
+ RIGHT
680
+ RIGHT
681
+ RIGHT
682
+ RIGHT
683
+ RIGHT
684
+ RIGHT
685
+ RIGHT
686
+ RIGHT
687
+ RIGHT
688
+ RIGHT
689
+ RIGHT
690
+ RIGHT
691
+ RIGHT
692
+ RIGHT
693
+ RIGHT
694
+ RIGHT
695
+ RIGHT
696
+ RIGHT
697
+ RIGHT
698
+ RIGHT
699
+ RIGHT
700
+ RIGHT
701
+ RIGHT
702
+ RIGHT
703
+ RIGHT
704
+ RIGHT
705
+ RIGHT
706
+ RIGHT
707
+ RIGHT
708
+ RIGHT
709
+ RIGHT
710
+ RIGHT
711
+ STRAIGHT
712
+ STRAIGHT
713
+ STRAIGHT
714
+ STRAIGHT
715
+ STRAIGHT
716
+ STRAIGHT
717
+ STRAIGHT
718
+ STRAIGHT
719
+ RIGHT
720
+ RIGHT
721
+ RIGHT
722
+ RIGHT
723
+ RIGHT
724
+ RIGHT
725
+ RIGHT
726
+ RIGHT
727
+ RIGHT
728
+ RIGHT
729
+ RIGHT
730
+ RIGHT
731
+ RIGHT
732
+ RIGHT
733
+ RIGHT
734
+ RIGHT
735
+ RIGHT
736
+ RIGHT
737
+ RIGHT
738
+ RIGHT
739
+ RIGHT
740
+ LEFT
741
+ LEFT
742
+ LEFT
743
+ STRAIGHT
744
+ RIGHT
745
+ RIGHT
746
+ RIGHT
747
+ RIGHT
748
+ RIGHT
749
+ RIGHT
750
+ RIGHT
751
+ RIGHT
752
+ RIGHT
753
+ RIGHT
754
+ RIGHT
755
+ RIGHT
756
+ LEFT
757
+ RIGHT
758
+ RIGHT
759
+ RIGHT
760
+ RIGHT
761
+ LEFT
762
+ LEFT
763
+ LEFT
764
+ LEFT
765
+ LEFT
766
+ LEFT
767
+ LEFT
768
+ RIGHT
769
+ LEFT
770
+ LEFT
771
+ RIGHT
772
+ RIGHT
773
+ RIGHT
774
+ RIGHT
775
+ RIGHT
776
+ RIGHT
777
+ RIGHT
778
+ RIGHT
779
+ RIGHT
780
+ RIGHT
781
+ RIGHT
782
+ RIGHT
783
+ RIGHT
784
+ RIGHT
785
+ STRAIGHT
786
+ STRAIGHT
787
+ STRAIGHT
788
+ STRAIGHT
789
+ STRAIGHT
790
+ STRAIGHT
791
+ STRAIGHT
792
+ STRAIGHT
793
+ STRAIGHT
794
+ STRAIGHT
795
+ STRAIGHT
796
+ RIGHT
797
+ RIGHT
798
+ RIGHT
799
+ RIGHT
800
+ RIGHT
801
+ RIGHT
802
+ RIGHT
803
+ RIGHT
804
+ RIGHT
805
+ RIGHT
806
+ RIGHT
807
+ STRAIGHT
808
+ STRAIGHT
809
+ STRAIGHT
810
+ RIGHT
811
+ STRAIGHT
812
+ STRAIGHT
813
+ RIGHT
814
+ STRAIGHT
815
+ STRAIGHT
816
+ STRAIGHT
817
+ RIGHT
818
+ RIGHT
819
+ STRAIGHT
820
+ STRAIGHT
821
+ RIGHT
822
+ RIGHT
823
+ RIGHT
824
+ RIGHT
825
+ RIGHT
826
+ RIGHT
827
+ RIGHT
828
+ STRAIGHT
829
+ STRAIGHT
830
+ STRAIGHT
831
+ STRAIGHT
832
+ STRAIGHT
833
+ STRAIGHT
834
+ STRAIGHT
835
+ STRAIGHT
836
+ STRAIGHT
837
+ STRAIGHT
838
+ STRAIGHT
839
+ STRAIGHT
840
+ STRAIGHT
841
+ STRAIGHT
842
+ STRAIGHT
843
+ STRAIGHT
844
+ STRAIGHT
845
+ RIGHT
846
+ RIGHT
847
+ RIGHT
848
+ RIGHT
849
+ RIGHT
850
+ RIGHT
851
+ RIGHT
852
+ RIGHT
853
+ RIGHT
854
+ RIGHT
855
+ RIGHT
856
+ RIGHT
857
+ RIGHT
858
+ RIGHT
859
+ RIGHT
860
+ RIGHT
861
+ RIGHT
862
+ RIGHT
863
+ RIGHT
864
+ RIGHT
865
+ RIGHT
866
+ RIGHT
867
+ RIGHT
868
+ RIGHT
869
+ RIGHT
870
+ RIGHT
871
+ RIGHT
872
+ RIGHT
873
+ RIGHT
874
+ RIGHT
875
+ RIGHT
876
+ RIGHT
877
+ RIGHT
878
+ RIGHT
879
+ RIGHT
880
+ RIGHT
881
+ RIGHT
882
+ RIGHT
883
+ RIGHT
884
+ RIGHT
885
+ RIGHT
886
+ RIGHT
887
+ RIGHT
888
+ RIGHT
889
+ RIGHT
890
+ RIGHT
891
+ RIGHT
892
+ RIGHT
893
+ RIGHT
894
+ RIGHT
895
+ RIGHT
896
+ RIGHT
897
+ RIGHT
898
+ RIGHT
899
+ RIGHT
900
+ RIGHT
901
+ RIGHT
902
+ RIGHT
903
+ RIGHT
904
+ RIGHT
905
+ RIGHT
906
+ RIGHT
907
+ RIGHT
908
+ RIGHT
909
+ RIGHT
910
+ RIGHT
911
+ RIGHT
912
+ RIGHT
913
+ RIGHT
914
+ RIGHT
915
+ RIGHT
916
+ RIGHT
917
+ RIGHT
918
+ RIGHT
919
+ RIGHT
920
+ RIGHT
921
+ RIGHT
922
+ RIGHT
923
+ RIGHT
924
+ RIGHT
925
+ RIGHT
926
+ RIGHT
927
+ RIGHT
928
+ STRAIGHT
929
+ STRAIGHT
930
+ RIGHT
931
+ RIGHT
932
+ RIGHT
933
+ RIGHT
934
+ RIGHT
935
+ RIGHT
936
+ RIGHT
937
+ RIGHT
938
+ RIGHT
939
+ RIGHT
940
+ RIGHT
941
+ RIGHT
942
+ RIGHT
943
+ RIGHT
944
+ RIGHT
945
+ RIGHT
946
+ RIGHT
947
+ RIGHT
948
+ RIGHT
949
+ RIGHT
950
+ RIGHT
951
+ RIGHT
952
+ RIGHT
953
+ RIGHT
954
+ RIGHT
955
+ RIGHT
956
+ RIGHT
957
+ RIGHT
958
+ RIGHT
959
+ RIGHT
960
+ STRAIGHT
961
+ STRAIGHT
962
+ STRAIGHT
963
+ STRAIGHT
964
+ STRAIGHT
965
+ RIGHT
966
+ STRAIGHT
967
+ STRAIGHT
968
+ STRAIGHT
969
+ STRAIGHT
970
+ STRAIGHT
971
+ STRAIGHT
972
+ STRAIGHT
973
+ STRAIGHT
974
+ STRAIGHT
975
+ STRAIGHT
976
+ STRAIGHT
977
+ STRAIGHT
978
+ STRAIGHT
979
+ STRAIGHT
980
+ STRAIGHT
981
+ STRAIGHT
982
+ STRAIGHT
983
+ STRAIGHT
984
+ STRAIGHT
985
+ STRAIGHT
986
+ STRAIGHT
987
+ STRAIGHT
988
+ STRAIGHT
989
+ STRAIGHT
990
+ STRAIGHT
991
+ STRAIGHT
992
+ STRAIGHT
993
+ STRAIGHT
994
+ STRAIGHT
995
+ STRAIGHT
996
+ STRAIGHT
997
+ STRAIGHT
998
+ STRAIGHT
999
+ STRAIGHT
1000
+ STRAIGHT
1001
+ STRAIGHT
1002
+ STRAIGHT
1003
+ STRAIGHT
1004
+ LEFT
1005
+ LEFT
1006
+ LEFT
1007
+ LEFT
1008
+ LEFT
1009
+ STRAIGHT
1010
+ STRAIGHT
1011
+ STRAIGHT
1012
+ STRAIGHT
1013
+ LEFT
1014
+ STRAIGHT
1015
+ STRAIGHT
1016
+ STRAIGHT
1017
+ STRAIGHT
1018
+ STRAIGHT
1019
+ STRAIGHT
1020
+ STRAIGHT
1021
+ STRAIGHT
1022
+ STRAIGHT
1023
+ STRAIGHT
1024
+ STRAIGHT
1025
+ STRAIGHT
1026
+ STRAIGHT
1027
+ STRAIGHT
1028
+ STRAIGHT
1029
+ STRAIGHT
1030
+ STRAIGHT
1031
+ STRAIGHT
1032
+ LEFT
1033
+ STRAIGHT
1034
+ LEFT
1035
+ LEFT
1036
+ LEFT
1037
+ LEFT
1038
+ LEFT
1039
+ LEFT
1040
+ LEFT
1041
+ LEFT
1042
+ LEFT
1043
+ LEFT
1044
+ LEFT
1045
+ LEFT
1046
+ LEFT
1047
+ LEFT
1048
+ LEFT
1049
+ LEFT
1050
+ LEFT
1051
+ LEFT
1052
+ LEFT
1053
+ LEFT
1054
+ LEFT
1055
+ LEFT
1056
+ LEFT
1057
+ LEFT
1058
+ LEFT
1059
+ LEFT
1060
+ LEFT
1061
+ LEFT
1062
+ LEFT
1063
+ LEFT
1064
+ LEFT
1065
+ LEFT
1066
+ LEFT
1067
+ LEFT
1068
+ LEFT
1069
+ LEFT
1070
+ LEFT
1071
+ LEFT
1072
+ LEFT
1073
+ LEFT
1074
+ LEFT
1075
+ LEFT
1076
+ LEFT
1077
+ LEFT
1078
+ LEFT
1079
+ LEFT
1080
+ LEFT
1081
+ LEFT
1082
+ LEFT
1083
+ LEFT
1084
+ LEFT
1085
+ LEFT
1086
+ LEFT
1087
+ LEFT
1088
+ LEFT
1089
+ LEFT
1090
+ LEFT
1091
+ LEFT
1092
+ LEFT
1093
+ LEFT
1094
+ LEFT
1095
+ LEFT
1096
+ LEFT
1097
+ LEFT
1098
+ LEFT
1099
+ LEFT
1100
+ LEFT
1101
+ LEFT
1102
+ LEFT
1103
+ LEFT
1104
+ LEFT
1105
+ LEFT
1106
+ LEFT
1107
+ LEFT
1108
+ LEFT
1109
+ RIGHT
1110
+ RIGHT
1111
+ RIGHT
1112
+ RIGHT
1113
+ RIGHT
1114
+ RIGHT
1115
+ RIGHT
1116
+ RIGHT
1117
+ RIGHT
1118
+ RIGHT
1119
+ RIGHT
1120
+ RIGHT
1121
+ RIGHT
1122
+ RIGHT
1123
+ RIGHT
1124
+ LEFT
1125
+ LEFT
1126
+ LEFT
1127
+ LEFT
1128
+ LEFT
1129
+ LEFT
1130
+ LEFT
1131
+ LEFT
1132
+ LEFT
1133
+ LEFT
1134
+ LEFT
1135
+ LEFT
1136
+ LEFT
1137
+ LEFT
1138
+ LEFT
1139
+ LEFT
1140
+ LEFT
1141
+ LEFT
1142
+ LEFT
1143
+ LEFT
1144
+ LEFT
1145
+ LEFT
1146
+ LEFT
1147
+ LEFT
1148
+ LEFT
1149
+ LEFT
1150
+ LEFT
1151
+ LEFT
1152
+ LEFT
1153
+ LEFT
1154
+ LEFT
1155
+ LEFT
1156
+ LEFT
1157
+ LEFT
1158
+ LEFT
1159
+ LEFT
1160
+ RIGHT
1161
+ RIGHT
1162
+ RIGHT
1163
+ RIGHT
1164
+ RIGHT
1165
+ RIGHT
1166
+ RIGHT
1167
+ RIGHT
1168
+ RIGHT
1169
+ RIGHT
1170
+ RIGHT
1171
+ RIGHT
1172
+ RIGHT
1173
+ RIGHT
1174
+ RIGHT
1175
+ RIGHT
1176
+ RIGHT
1177
+ RIGHT
1178
+ STRAIGHT
1179
+ RIGHT
1180
+ RIGHT
1181
+ RIGHT
1182
+ RIGHT
1183
+ RIGHT
1184
+ RIGHT
1185
+ LEFT
1186
+ LEFT
1187
+ LEFT
1188
+ RIGHT
1189
+ RIGHT
1190
+ LEFT
1191
+ RIGHT
1192
+ RIGHT
1193
+ RIGHT
1194
+ RIGHT
1195
+ RIGHT
1196
+ RIGHT
1197
+ RIGHT
1198
+ RIGHT
1199
+ RIGHT
1200
+ STRAIGHT
1201
+ STRAIGHT
1202
+ STRAIGHT
1203
+ STRAIGHT
1204
+ STRAIGHT
1205
+ STRAIGHT
1206
+ STRAIGHT
1207
+ STRAIGHT
1208
+ STRAIGHT
1209
+ STRAIGHT
1210
+ STRAIGHT
1211
+ LEFT
1212
+ STRAIGHT
1213
+ STRAIGHT
1214
+ STRAIGHT
1215
+ STRAIGHT
1216
+ RIGHT
1217
+ STRAIGHT
1218
+ RIGHT
1219
+ RIGHT
1220
+ STRAIGHT
1221
+ STRAIGHT
1222
+ STRAIGHT
1223
+ STRAIGHT
1224
+ STRAIGHT
1225
+ RIGHT
1226
+ STRAIGHT
1227
+ STRAIGHT
1228
+ STRAIGHT
1229
+ STRAIGHT
1230
+ STRAIGHT
1231
+ STRAIGHT
1232
+ STRAIGHT
1233
+ STRAIGHT
1234
+ RIGHT
1235
+ STRAIGHT
1236
+ STRAIGHT
1237
+ STRAIGHT
1238
+ STRAIGHT
1239
+ STRAIGHT
1240
+ STRAIGHT
1241
+ STRAIGHT
1242
+ STRAIGHT
1243
+ STRAIGHT
1244
+ STRAIGHT
1245
+ STRAIGHT
1246
+ STRAIGHT
1247
+ STRAIGHT
1248
+ STRAIGHT
1249
+ STRAIGHT
1250
+ STRAIGHT
1251
+ STRAIGHT
1252
+ STRAIGHT
1253
+ STRAIGHT
1254
+ STRAIGHT
1255
+ STRAIGHT
1256
+ STRAIGHT
1257
+ STRAIGHT
1258
+ STRAIGHT
1259
+ STRAIGHT
1260
+ STRAIGHT
1261
+ STRAIGHT
1262
+ STRAIGHT
1263
+ STRAIGHT
1264
+ STRAIGHT
1265
+ STRAIGHT
1266
+ STRAIGHT
1267
+ STRAIGHT
1268
+ STRAIGHT
1269
+ STRAIGHT
1270
+ STRAIGHT
1271
+ STRAIGHT
1272
+ STRAIGHT
1273
+ STRAIGHT
1274
+ STRAIGHT
1275
+ STRAIGHT
1276
+ STRAIGHT
1277
+ STRAIGHT
1278
+ STRAIGHT
1279
+ STRAIGHT
1280
+ STRAIGHT
1281
+ STRAIGHT
1282
+ STRAIGHT
1283
+ STRAIGHT
1284
+ STRAIGHT
1285
+ STRAIGHT
1286
+ STRAIGHT
1287
+ STRAIGHT
1288
+ STRAIGHT
1289
+ STRAIGHT
1290
+ STRAIGHT
1291
+ STRAIGHT
1292
+ STRAIGHT
1293
+ STRAIGHT
1294
+ STRAIGHT
1295
+ STRAIGHT
1296
+ RIGHT
1297
+ STRAIGHT
1298
+ STRAIGHT
1299
+ RIGHT
1300
+ STRAIGHT
1301
+ STRAIGHT
1302
+ STRAIGHT
1303
+ STRAIGHT
1304
+ STRAIGHT
1305
+ STRAIGHT
1306
+ STRAIGHT
1307
+ STRAIGHT
1308
+ STRAIGHT
1309
+ STRAIGHT
1310
+ STRAIGHT
1311
+ STRAIGHT
1312
+ STRAIGHT
1313
+ STRAIGHT
1314
+ STRAIGHT
1315
+ STRAIGHT
1316
+ STRAIGHT
1317
+ RIGHT
1318
+ STRAIGHT
1319
+ STRAIGHT
1320
+ STRAIGHT
1321
+ STRAIGHT
1322
+ RIGHT
1323
+ STRAIGHT
1324
+ STRAIGHT
1325
+ STRAIGHT
1326
+ STRAIGHT
1327
+ STRAIGHT
1328
+ STRAIGHT
1329
+ STRAIGHT
1330
+ STRAIGHT
1331
+ STRAIGHT
1332
+ STRAIGHT
1333
+ STRAIGHT
1334
+ STRAIGHT
1335
+ STRAIGHT
1336
+ RIGHT
1337
+ RIGHT
1338
+ STRAIGHT
1339
+ STRAIGHT
1340
+ STRAIGHT
1341
+ STRAIGHT
1342
+ STRAIGHT
1343
+ STRAIGHT
1344
+ STRAIGHT
1345
+ STRAIGHT
1346
+ STRAIGHT
1347
+ STRAIGHT
1348
+ STRAIGHT
1349
+ STRAIGHT
1350
+ STRAIGHT
1351
+ STRAIGHT
1352
+ RIGHT
1353
+ RIGHT
1354
+ RIGHT
1355
+ RIGHT
1356
+ STRAIGHT
1357
+ RIGHT
1358
+ RIGHT
1359
+ RIGHT
1360
+ RIGHT
1361
+ RIGHT
1362
+ RIGHT
1363
+ RIGHT
1364
+ RIGHT
1365
+ RIGHT
1366
+ RIGHT
1367
+ RIGHT
1368
+ STRAIGHT
1369
+ STRAIGHT
1370
+ STRAIGHT
1371
+ STRAIGHT
1372
+ STRAIGHT
1373
+ STRAIGHT
1374
+ STRAIGHT
1375
+ STRAIGHT
1376
+ STRAIGHT
1377
+ STRAIGHT
1378
+ STRAIGHT
1379
+ STRAIGHT
1380
+ STRAIGHT
1381
+ STRAIGHT
1382
+ STRAIGHT
1383
+ STRAIGHT
1384
+ STRAIGHT
1385
+ STRAIGHT
1386
+ STRAIGHT
1387
+ STRAIGHT
1388
+ STRAIGHT
1389
+ STRAIGHT
1390
+ STRAIGHT
1391
+ STRAIGHT
1392
+ STRAIGHT
1393
+ STRAIGHT
1394
+ STRAIGHT
1395
+ STRAIGHT
1396
+ STRAIGHT
1397
+ STRAIGHT
1398
+ STRAIGHT
1399
+ STRAIGHT
1400
+ STRAIGHT
1401
+ STRAIGHT
1402
+ STRAIGHT
1403
+ STRAIGHT
1404
+ STRAIGHT
1405
+ STRAIGHT
1406
+ STRAIGHT
1407
+ STRAIGHT
1408
+ STRAIGHT
1409
+ STRAIGHT
1410
+ STRAIGHT
1411
+ STRAIGHT
1412
+ STRAIGHT
1413
+ STRAIGHT
1414
+ STRAIGHT
1415
+ STRAIGHT
1416
+ STRAIGHT
1417
+ STRAIGHT
1418
+ STRAIGHT
1419
+ STRAIGHT
1420
+ STRAIGHT
1421
+ STRAIGHT
1422
+ STRAIGHT
1423
+ STRAIGHT
1424
+ RIGHT
1425
+ RIGHT
1426
+ RIGHT
1427
+ RIGHT
1428
+ RIGHT
1429
+ RIGHT
1430
+ RIGHT
1431
+ RIGHT
1432
+ RIGHT
1433
+ RIGHT
1434
+ RIGHT
1435
+ RIGHT
1436
+ RIGHT
1437
+ RIGHT
1438
+ RIGHT
1439
+ RIGHT
1440
+ RIGHT
1441
+ RIGHT
1442
+ RIGHT
1443
+ RIGHT
1444
+ RIGHT
1445
+ RIGHT
1446
+ RIGHT
1447
+ RIGHT
1448
+ RIGHT
1449
+ RIGHT
1450
+ RIGHT
1451
+ RIGHT
1452
+ RIGHT
1453
+ RIGHT
1454
+ LEFT
1455
+ RIGHT
1456
+ RIGHT
1457
+ RIGHT
1458
+ RIGHT
1459
+ RIGHT
1460
+ RIGHT
1461
+ RIGHT
1462
+ LEFT
1463
+ LEFT
1464
+ LEFT
1465
+ LEFT
1466
+ LEFT
1467
+ LEFT
1468
+ LEFT
1469
+ LEFT
1470
+ LEFT
1471
+ LEFT
1472
+ RIGHT
1473
+ RIGHT
1474
+ RIGHT
1475
+ RIGHT
1476
+ RIGHT
1477
+ RIGHT
1478
+ RIGHT
1479
+ RIGHT
1480
+ RIGHT
1481
+ RIGHT
1482
+ RIGHT
1483
+ LEFT
1484
+ LEFT
1485
+ LEFT
1486
+ RIGHT
1487
+ LEFT
1488
+ RIGHT
1489
+ LEFT
1490
+ RIGHT
1491
+ RIGHT
1492
+ RIGHT
1493
+ RIGHT
1494
+ RIGHT
1495
+ RIGHT
1496
+ RIGHT
1497
+ RIGHT
1498
+ RIGHT
1499
+ RIGHT
1500
+ RIGHT
1501
+ RIGHT
1502
+ RIGHT
1503
+ RIGHT
1504
+ RIGHT
1505
+ RIGHT
1506
+ RIGHT
1507
+ RIGHT
1508
+ RIGHT
1509
+ RIGHT
1510
+ RIGHT
1511
+ RIGHT
1512
+ RIGHT
1513
+ RIGHT
1514
+ RIGHT
1515
+ LEFT
1516
+ LEFT
1517
+ RIGHT
1518
+ RIGHT
1519
+ LEFT
1520
+ LEFT
1521
+ LEFT
1522
+ LEFT
1523
+ LEFT
1524
+ LEFT
1525
+ LEFT
1526
+ RIGHT
1527
+ RIGHT
1528
+ RIGHT
1529
+ RIGHT
1530
+ RIGHT
1531
+ STRAIGHT
1532
+ STRAIGHT
1533
+ RIGHT
1534
+ RIGHT
1535
+ RIGHT
1536
+ RIGHT
1537
+ RIGHT
1538
+ STRAIGHT
1539
+ RIGHT
1540
+ RIGHT
1541
+ STRAIGHT
1542
+ STRAIGHT
1543
+ RIGHT
1544
+ RIGHT
1545
+ RIGHT
1546
+ STRAIGHT
1547
+ RIGHT
1548
+ STRAIGHT
1549
+ RIGHT
1550
+ RIGHT
1551
+ RIGHT
1552
+ RIGHT
1553
+ RIGHT
1554
+ RIGHT
1555
+ RIGHT
1556
+ RIGHT
1557
+ RIGHT
1558
+ RIGHT
1559
+ STRAIGHT
1560
+ STRAIGHT
1561
+ RIGHT
1562
+ STRAIGHT
1563
+ STRAIGHT
1564
+ STRAIGHT
1565
+ RIGHT
1566
+ STRAIGHT
1567
+ STRAIGHT
1568
+ STRAIGHT
1569
+ RIGHT
1570
+ RIGHT
1571
+ STRAIGHT
1572
+ RIGHT
1573
+ RIGHT
1574
+ RIGHT
1575
+ LEFT
1576
+ LEFT
1577
+ LEFT
1578
+ LEFT
1579
+ LEFT
1580
+ LEFT
1581
+ LEFT
1582
+ LEFT
1583
+ LEFT
1584
+ RIGHT
1585
+ RIGHT
1586
+ RIGHT
1587
+ RIGHT
1588
+ RIGHT
1589
+ RIGHT
1590
+ RIGHT
1591
+ RIGHT
1592
+ RIGHT
1593
+ RIGHT
1594
+ RIGHT
1595
+ RIGHT
1596
+ RIGHT
1597
+ RIGHT
1598
+ RIGHT
1599
+ RIGHT
1600
+ RIGHT
1601
+ RIGHT
1602
+ RIGHT
1603
+ STRAIGHT
1604
+ RIGHT
1605
+ RIGHT
1606
+ RIGHT
1607
+ STRAIGHT
1608
+ RIGHT
1609
+ RIGHT
1610
+ RIGHT
1611
+ STRAIGHT
1612
+ STRAIGHT
1613
+ RIGHT
1614
+ RIGHT
1615
+ STRAIGHT
1616
+ RIGHT
1617
+ RIGHT
1618
+ RIGHT
1619
+ STRAIGHT
1620
+ STRAIGHT
1621
+ STRAIGHT
1622
+ STRAIGHT
1623
+ STRAIGHT
1624
+ STRAIGHT
1625
+ STRAIGHT
1626
+ STRAIGHT
1627
+ RIGHT
1628
+ RIGHT
1629
+ RIGHT
1630
+ RIGHT
1631
+ RIGHT
1632
+ RIGHT
1633
+ RIGHT
1634
+ STRAIGHT
1635
+ RIGHT
1636
+ RIGHT
1637
+ STRAIGHT
1638
+ RIGHT
1639
+ RIGHT
1640
+ RIGHT
1641
+ RIGHT
1642
+ RIGHT
1643
+ RIGHT
1644
+ RIGHT
1645
+ RIGHT
1646
+ RIGHT
1647
+ RIGHT
1648
+ RIGHT
1649
+ RIGHT
1650
+ RIGHT
1651
+ RIGHT
1652
+ RIGHT
1653
+ RIGHT
1654
+ RIGHT
1655
+ RIGHT
1656
+ RIGHT
1657
+ RIGHT
1658
+ RIGHT
1659
+ RIGHT
1660
+ RIGHT
1661
+ RIGHT
1662
+ RIGHT
1663
+ STRAIGHT
1664
+ RIGHT
1665
+ RIGHT
1666
+ STRAIGHT
1667
+ STRAIGHT
1668
+ STRAIGHT
1669
+ RIGHT
1670
+ STRAIGHT
1671
+ STRAIGHT
1672
+ STRAIGHT
1673
+ STRAIGHT
1674
+ STRAIGHT
1675
+ RIGHT
1676
+ RIGHT
1677
+ RIGHT
1678
+ RIGHT
1679
+ RIGHT
1680
+ RIGHT
1681
+ RIGHT
1682
+ RIGHT
1683
+ RIGHT
1684
+ RIGHT
1685
+ RIGHT
1686
+ RIGHT
1687
+ RIGHT
1688
+ RIGHT
1689
+ RIGHT
1690
+ RIGHT
1691
+ RIGHT
1692
+ RIGHT
1693
+ RIGHT
1694
+ RIGHT
1695
+ RIGHT
1696
+ RIGHT
1697
+ RIGHT
1698
+ RIGHT
1699
+ RIGHT
1700
+ RIGHT
1701
+ RIGHT
1702
+ RIGHT
1703
+ RIGHT
1704
+ RIGHT
1705
+ RIGHT
1706
+ RIGHT
1707
+ RIGHT
1708
+ RIGHT
1709
+ RIGHT
1710
+ RIGHT
1711
+ RIGHT
1712
+ RIGHT
1713
+ RIGHT
1714
+ LEFT
1715
+ LEFT
1716
+ LEFT
1717
+ LEFT
1718
+ LEFT
1719
+ LEFT
1720
+ LEFT
1721
+ LEFT
1722
+ RIGHT
1723
+ RIGHT
1724
+ LEFT
1725
+ LEFT
1726
+ LEFT
1727
+ LEFT
1728
+ LEFT
1729
+ LEFT
1730
+ LEFT
1731
+ LEFT
1732
+ LEFT
1733
+ RIGHT
1734
+ RIGHT
1735
+ RIGHT
dataCSV/direction_control.csv ADDED
@@ -0,0 +1 @@
 
 
1
+ direction,angle
dataCSV/direction_control_mask.csv ADDED
@@ -0,0 +1 @@
 
 
1
+ direction,angle
dataCSV/direction_straight.csv ADDED
@@ -0,0 +1 @@
 
 
1
+ direction,angle
images/1.png ADDED

Git LFS Details

  • SHA256: d679caaad4a1dae6884857a6451c2b9b8f1dd67ab82caa8efa110a55cc142080
  • Pointer size: 132 Bytes
  • Size of remote file: 2.34 MB
images/2.png ADDED

Git LFS Details

  • SHA256: 95022111aab8e054a896cf27b530b6ed58931bd5b0ce6d2d0097effdfaf6cae3
  • Pointer size: 132 Bytes
  • Size of remote file: 2.48 MB
images/3.png ADDED

Git LFS Details

  • SHA256: d0be0c798bb4fce74cc9901cc1b5522fa5b34a960dbc7992f68f543021d9b604
  • Pointer size: 132 Bytes
  • Size of remote file: 2.38 MB
images/4.png ADDED

Git LFS Details

  • SHA256: c58d08c86e83cfd735b592e404731ed12efd2b95649708ec563229241bcc0317
  • Pointer size: 132 Bytes
  • Size of remote file: 2.33 MB
label_tool.py ADDED
@@ -0,0 +1,104 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import os
3
+ import tkinter as tk
4
+ from PIL import Image, ImageTk
5
+
6
+ root_path = "data_label"
7
+
8
+ # Create folders for labeled images
9
+ os.makedirs(f"{root_path}/left", exist_ok=True)
10
+ os.makedirs(f"{root_path}/right", exist_ok=True)
11
+ os.makedirs(f"{root_path}/straight", exist_ok=True)
12
+
13
+ # Load video
14
+ video_path = "collect_data/1.mp4" # Change this to your video file
15
+ cap = cv2.VideoCapture(video_path)
16
+
17
+ frame_count = 0
18
+ current_frame = None
19
+ highlight_label = None # Biến để lưu nhãn đang được chọn
20
+
21
+ # Read first frame
22
+ def read_frame():
23
+ global current_frame, frame_count
24
+ ret, frame = cap.read()
25
+ if ret:
26
+ current_frame = frame
27
+ frame_count += 1
28
+ show_frame()
29
+ else:
30
+ print("Video ended.")
31
+ cap.release()
32
+ root.quit()
33
+
34
+ # Save frame to respective folder
35
+ def save_frame(label):
36
+ global highlight_label
37
+ if current_frame is not None:
38
+ frame_resized = cv2.resize(current_frame, (640, 480))
39
+ filename = f"{label}/{frame_count}.jpg"
40
+ cv2.imwrite(filename, frame_resized)
41
+ highlight_label = label # Cập nhật nhãn đang chọn
42
+ update_label_highlight()
43
+ read_frame()
44
+
45
+ # Xử lý sự kiện nhấn phím
46
+ def key_press(event):
47
+ if event.char == '1':
48
+ save_frame("left")
49
+ elif event.char == '2':
50
+ save_frame("right")
51
+ elif event.char == '3':
52
+ save_frame("straight")
53
+
54
+ # Hiển thị hình ảnh từ video lên GUI
55
+ def show_frame():
56
+ frame_rgb = cv2.cvtColor(current_frame, cv2.COLOR_BGR2RGB)
57
+ frame_resized = cv2.resize(frame_rgb, (640, 480))
58
+ img = Image.fromarray(frame_resized)
59
+ img = ImageTk.PhotoImage(img)
60
+ panel.config(image=img)
61
+ panel.image = img
62
+
63
+ # Cập nhật giao diện khi nhấn nút
64
+ def update_label_highlight():
65
+ global highlight_label
66
+ if highlight_label == "left":
67
+ label_status.config(text="Selected: LEFT", bg="red")
68
+ elif highlight_label == "right":
69
+ label_status.config(text="Selected: RIGHT", bg="blue")
70
+ elif highlight_label == "straight":
71
+ label_status.config(text="Selected: STRAIGHT", bg="green")
72
+
73
+ # Tạo hiệu ứng biến mất sau 500ms
74
+ root.after(500, reset_label_highlight)
75
+
76
+ # Reset màu nền sau khi chọn label
77
+ def reset_label_highlight():
78
+ label_status.config(text="Press 1, 2, 3 to label", bg="white")
79
+
80
+ # Setup GUI
81
+ root = tk.Tk()
82
+ root.title("Autonomous Car Label Tool")
83
+
84
+ # Set window size to match frame size
85
+ root.geometry("640x580") # Tăng chiều cao để thêm thông báo trạng thái
86
+
87
+ # Add instructions label
88
+ instructions = tk.Label(root, text="Press: 1 for Left | 2 for Right | 3 for Straight", font=("Arial", 12))
89
+ instructions.pack()
90
+
91
+ # Add label status indicator
92
+ label_status = tk.Label(root, text="Press 1, 2, 3 to label", font=("Arial", 14, "bold"), bg="white", width=30)
93
+ label_status.pack(pady=5)
94
+
95
+ # Panel để hiển thị hình ảnh
96
+ panel = tk.Label(root)
97
+ panel.pack()
98
+
99
+ # Bind keyboard events
100
+ root.bind('<Key>', key_press)
101
+
102
+ # Start processing
103
+ read_frame()
104
+ root.mainloop()
requirements.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ pygame
2
+ onnx
3
+ onnxruntime
4
+ opencv-python
5
+ serial
6
+ pandas
7
+ matplotlib
setting_AI.py ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ DIRECTION_LEFT = "X"
2
+ DIRECTION_RIGHT = "Y"
3
+ DIRECTION_STRAIGHT = "S"
4
+ PUSH_STOP = "x:000"
5
+
6
+
7
+ THRESHOLD_CLASSIFICATION = 30
8
+
9
+ # Phần trăm mặt đường sẽ lấy
10
+ per_len_lane = 0.9
11
+
12
+ # Ngưỡng quay bánh lại
13
+ back_threshold = 5
14
+
15
+ # ngưỡng lệch góc thì phải push ngay
16
+ threshold_scale = 3
17
+
18
+ # Ngưỡng thu report
19
+ count_control = 25
20
+
21
+ ROTATION_SPEED = 40
22
+
23
+ # Các điểm liên quan đến xe (điểm trụ sở, padding từ 2 bên)
24
+ car_length_padding = 100
25
+
26
+ # Setting TensorRT
27
+ input_names = ['images']
28
+ output_names = ['output']
29
+ batch = 1
30
+ plan = "models/tusimple_18_FP16.trt"
31
+
ultrafast/__pycache__/inference_onnx.cpython-311.pyc ADDED
Binary file (11.3 kB). View file
 
ultrafast/__pycache__/ultrafastLaneDetector.cpython-311.pyc ADDED
Binary file (10.3 kB). View file
 
ultrafast/inference_onnx.py ADDED
@@ -0,0 +1,316 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .ultrafastLaneDetector import UltrafastLaneDetector, ModelType
2
+ from setting_AI import (
3
+ car_length_padding,
4
+ DIRECTION_LEFT,
5
+ DIRECTION_RIGHT,
6
+ DIRECTION_STRAIGHT,
7
+ PUSH_STOP,
8
+ THRESHOLD_CLASSIFICATION,
9
+ per_len_lane,
10
+ back_threshold,
11
+ threshold_scale,
12
+ count_control,
13
+ ROTATION_SPEED,
14
+ input_names,
15
+ output_names,
16
+ batch,
17
+ plan,
18
+ )
19
+ import cv2
20
+ import numpy as np
21
+ import math
22
+ import os
23
+ from a_utils_func_2_model import (
24
+ CLEAN_DATA_CSV_DIRECTION,
25
+ ADD_DATA_CSV_MASK_DIRECTION,
26
+ ADD_DATA_CSV_DIRECTION_STRAIGHT,
27
+ CLEAN_DATA_CSV_DIRECTION_STRAIGHT,
28
+ CHECK_PUSH,
29
+ ADD_DATA_CSV_CLASSIFICATION,
30
+ CHECK_CSV_CLASSIFICATION,
31
+ CLEAN_DATA_CSV_CLASSIFICATION,
32
+ )
33
+
34
+ model_type = ModelType.TUSIMPLE
35
+
36
+ dirname = os.path.dirname(__file__)
37
+
38
+ model_path = os.path.join(dirname, "./models/tusimple_18_V1_fp16.onnx")
39
+ lane_detector = UltrafastLaneDetector(model_path, model_type)
40
+
41
+
42
+ def inference_detect_lane(image):
43
+ return lane_detector.detect_lanes(image)
44
+
45
+
46
+ tusimple_row_anchor = [
47
+ 64,
48
+ 68,
49
+ 72,
50
+ 76,
51
+ 80,
52
+ 84,
53
+ 88,
54
+ 92,
55
+ 96,
56
+ 100,
57
+ 104,
58
+ 108,
59
+ 112,
60
+ 116,
61
+ 120,
62
+ 124,
63
+ 128,
64
+ 132,
65
+ 136,
66
+ 140,
67
+ 144,
68
+ 148,
69
+ 152,
70
+ 156,
71
+ 160,
72
+ 164,
73
+ 168,
74
+ 172,
75
+ 176,
76
+ 180,
77
+ 184,
78
+ 188,
79
+ 192,
80
+ 196,
81
+ 200,
82
+ 204,
83
+ 208,
84
+ 212,
85
+ 216,
86
+ 220,
87
+ 224,
88
+ 228,
89
+ 232,
90
+ 236,
91
+ 240,
92
+ 244,
93
+ 248,
94
+ 252,
95
+ 256,
96
+ 260,
97
+ 264,
98
+ 268,
99
+ 272,
100
+ 276,
101
+ 280,
102
+ 284,
103
+ ]
104
+ lane_colors = [(0, 0, 255), (0, 255, 0), (255, 0, 0), (0, 255, 255)]
105
+ height = 720
106
+ width = 1280
107
+
108
+ car_point_left = (car_length_padding, height)
109
+ car_point_right = (width - car_length_padding, height)
110
+ car_center_bottom = ((car_point_left[0] + car_point_right[0]) // 2, height)
111
+ car_center_top = (car_center_bottom[0], 0)
112
+
113
+ # -------------------------------------------------------------------------------
114
+
115
+ CLEAN_DATA_CSV_DIRECTION()
116
+ CLEAN_DATA_CSV_DIRECTION_STRAIGHT()
117
+ CLEAN_DATA_CSV_CLASSIFICATION()
118
+
119
+ dr_back_control = None
120
+ an_back_control = None
121
+ len_csv_control_back = None
122
+
123
+
124
+ def draw_lanes(input_img, lanes_points, lanes_detected, draw_points=True):
125
+ left_top = None
126
+ right_top = None
127
+ left_bottom = None
128
+ right_bottom = None
129
+ Have_lane = True
130
+
131
+ # Resize ảnh đầu vào
132
+ visualization_img = cv2.resize(input_img, (1280, 720), interpolation=cv2.INTER_AREA)
133
+
134
+ # Kiểm tra nếu cả 2 lane (trái và phải) được phát hiện
135
+ if lanes_detected[1] and lanes_detected[2]:
136
+ lane_segment_img = visualization_img.copy()
137
+
138
+ # Chuyển các điểm của lane trái và phải sang numpy array
139
+ left_lane = np.array(lanes_points[1])
140
+ right_lane = np.array(lanes_points[2])
141
+
142
+ # Tính y_top và y_bottom của từng lane
143
+ y_top_left = np.min(left_lane[:, 1])
144
+ y_bottom_left = np.max(left_lane[:, 1])
145
+ y_top_right = np.min(right_lane[:, 1])
146
+ y_bottom_right = np.max(right_lane[:, 1])
147
+
148
+ # Xác định vùng giao nhau của 2 lane theo trục y
149
+ y_lane_top = max(y_top_left, y_top_right)
150
+ y_lane_bottom = min(y_bottom_left, y_bottom_right)
151
+ lane_length = y_lane_bottom - y_lane_top
152
+
153
+ # Xác định ngưỡng y cho 90% chiều dài (phần gần camera)
154
+ y_threshold = y_lane_bottom - per_len_lane * lane_length
155
+
156
+ # Lọc các điểm của lane theo ngưỡng y (chỉ lấy phần gần camera)
157
+ left_points_90 = [point for point in lanes_points[1] if point[1] >= y_threshold]
158
+ right_points_90 = [
159
+ point for point in lanes_points[2] if point[1] >= y_threshold
160
+ ]
161
+ # Tính tọa độ của cạnh trên và cạnh dưới cho lane trái
162
+ if left_points_90:
163
+ left_top = min(left_points_90, key=lambda p: p[1]) # Điểm có y nhỏ nhất
164
+ left_bottom = max(left_points_90, key=lambda p: p[1]) # Điểm có y lớn nhất
165
+
166
+ # Tính tọa độ của cạnh trên và cạnh dưới cho lane phải
167
+ if right_points_90:
168
+ right_top = min(right_points_90, key=lambda p: p[1])
169
+ right_bottom = max(right_points_90, key=lambda p: p[1])
170
+
171
+ # Nếu có đủ điểm từ cả hai lane, tiến hành vẽ
172
+ if len(left_points_90) > 0 and len(right_points_90) > 0:
173
+ pts = np.vstack(
174
+ (np.array(left_points_90), np.flipud(np.array(right_points_90)))
175
+ )
176
+ cv2.fillPoly(lane_segment_img, pts=[pts], color=(255, 191, 0))
177
+ visualization_img = cv2.addWeighted(
178
+ visualization_img, 0.7, lane_segment_img, 0.3, 0
179
+ )
180
+ else:
181
+ Have_lane = False
182
+
183
+ if draw_points:
184
+ for lane_num, lane_points in enumerate(lanes_points):
185
+ for lane_point in lane_points:
186
+ cv2.circle(
187
+ visualization_img,
188
+ (lane_point[0], lane_point[1]),
189
+ 3,
190
+ lane_colors[lane_num],
191
+ -1,
192
+ )
193
+
194
+ return visualization_img, left_top, right_top, left_bottom, right_bottom, Have_lane
195
+
196
+
197
+ def draw_direction_arrow(img, center, angle_deg, size=50, color=(0, 255, 255)):
198
+ """
199
+ Vẽ biểu tượng mũi tên chỉ hướng xoay theo góc angle_deg tại vị trí center.
200
+ Mũi tên mặc định chỉ lên trên, khi quay theo góc, biểu tượng sẽ phản ánh hướng lái.
201
+ """
202
+ # Định nghĩa các điểm của mũi tên (mặc định hướng lên trên)
203
+ pts = np.array(
204
+ [
205
+ [0, -size], # điểm mũi tên (đỉnh)
206
+ [-size // 4, size // 2], # góc trái dưới
207
+ [0, size // 4], # điểm giữa dưới
208
+ [size // 4, size // 2], # góc phải dưới
209
+ ],
210
+ dtype=np.float32,
211
+ )
212
+
213
+ # Tạo ma trận xoay
214
+ M = cv2.getRotationMatrix2D((0, 0), angle_deg, 1)
215
+ rotated_pts = np.dot(pts, M[:, :2])
216
+ # Dịch các điểm về vị trí center
217
+ rotated_pts[:, 0] += center[0]
218
+ rotated_pts[:, 1] += center[1]
219
+ rotated_pts = rotated_pts.astype(np.int32)
220
+
221
+ cv2.fillPoly(img, [rotated_pts], color)
222
+
223
+
224
+ def AI_TRT(frame, paint=False, resize_img=True):
225
+ global dr_back_control, an_back_control, len_csv_control_back
226
+ PUSH_RETURN = None
227
+
228
+ lanes_points, lanes_detected = lane_detector.detect_lanes(frame)
229
+
230
+ (
231
+ visualization_img,
232
+ lane_left_top,
233
+ lane_right_top,
234
+ lane_left_bottom,
235
+ lane_right_bottom,
236
+ Have_lane,
237
+ ) = draw_lanes(frame, lanes_points, lanes_detected, draw_points=True)
238
+
239
+ if Have_lane == False:
240
+ print("Không bắt có đường")
241
+ if paint:
242
+ cv2.circle(visualization_img, car_point_left, 10, (50, 100, 255), -1)
243
+ cv2.circle(visualization_img, car_center_bottom, 10, (50, 100, 255), -1)
244
+ cv2.circle(visualization_img, car_point_right, 10, (50, 100, 255), -1)
245
+ cv2.circle(visualization_img, car_center_top, 10, (50, 100, 255), -1)
246
+
247
+ if lane_left_top is not None and lane_right_top is not None:
248
+ top_center = (
249
+ (lane_left_top[0] + lane_right_top[0]) // 2,
250
+ (lane_left_top[1] + lane_right_top[1]) // 2,
251
+ )
252
+ if paint:
253
+ cv2.circle(visualization_img, lane_left_top, 5, (0, 255, 255), -1)
254
+ cv2.circle(visualization_img, lane_right_top, 5, (0, 255, 255), -1)
255
+ cv2.circle(visualization_img, top_center, 7, (0, 0, 255), -1)
256
+
257
+ point_control_left = (lane_left_top[0], height)
258
+ point_control_right = (lane_right_top[0], height)
259
+
260
+ if paint:
261
+ cv2.circle(visualization_img, point_control_left, 10, (100, 255, 100), -1)
262
+ cv2.circle(visualization_img, point_control_right, 10, (100, 255, 100), -1)
263
+
264
+ dx = top_center[0] - car_center_bottom[0]
265
+ dy = car_center_bottom[1] - top_center[1]
266
+ angle_rad = math.atan2(dx, dy)
267
+ angle_deg = angle_rad * 180 / math.pi
268
+
269
+ threshold = 5
270
+ if angle_deg < -threshold:
271
+ direction = DIRECTION_LEFT
272
+
273
+ elif angle_deg > threshold:
274
+ direction = DIRECTION_RIGHT
275
+
276
+ else:
277
+ direction = DIRECTION_STRAIGHT
278
+
279
+ if paint:
280
+ text = f"{direction} ({angle_deg:.2f} deg)"
281
+ cv2.rectangle(
282
+ visualization_img, (10, 10), (460, 70), (0, 0, 0), -1
283
+ ) # Nền cho text (để dễ đọc)
284
+ cv2.putText(
285
+ visualization_img,
286
+ text,
287
+ (15, 50),
288
+ cv2.FONT_HERSHEY_SIMPLEX,
289
+ 1,
290
+ (255, 255, 255),
291
+ 2,
292
+ )
293
+ icon_center = (width - 80, 80)
294
+ draw_direction_arrow(
295
+ visualization_img, icon_center, angle_deg, size=40, color=(0, 200, 200)
296
+ )
297
+ cv2.circle(visualization_img, icon_center, 45, (0, 200, 200), 2)
298
+
299
+ if direction != DIRECTION_STRAIGHT:
300
+ ADD_DATA_CSV_MASK_DIRECTION(direction, abs(int(angle_deg)))
301
+ else:
302
+ ADD_DATA_CSV_DIRECTION_STRAIGHT(direction, abs(int(angle_deg)))
303
+
304
+ push, dr_back, an_back = CHECK_PUSH()
305
+
306
+ if push is not None:
307
+
308
+ PUSH_RETURN = push
309
+
310
+ if resize_img:
311
+ visualization_img = cv2.resize(
312
+ visualization_img,
313
+ (visualization_img.shape[1] // 2, visualization_img.shape[0] // 2),
314
+ )
315
+
316
+ return visualization_img, PUSH_RETURN, Have_lane
ultrafast/models/tusimple_18_V1_fp16.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8be553e46a7e2fa2ebb6fecaa0e258a9e32acf90a2f84c6db011c8affc2f4178
3
+ size 122715955
ultrafast/ultrafastLaneDetector.py ADDED
@@ -0,0 +1,277 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import onnxruntime
2
+ import scipy.special
3
+ from enum import Enum
4
+ import cv2
5
+ import time
6
+ import numpy as np
7
+
8
+ lane_colors = [(0, 0, 255), (0, 255, 0), (255, 0, 0), (0, 255, 255)]
9
+
10
+ tusimple_row_anchor = [
11
+ 64,
12
+ 68,
13
+ 72,
14
+ 76,
15
+ 80,
16
+ 84,
17
+ 88,
18
+ 92,
19
+ 96,
20
+ 100,
21
+ 104,
22
+ 108,
23
+ 112,
24
+ 116,
25
+ 120,
26
+ 124,
27
+ 128,
28
+ 132,
29
+ 136,
30
+ 140,
31
+ 144,
32
+ 148,
33
+ 152,
34
+ 156,
35
+ 160,
36
+ 164,
37
+ 168,
38
+ 172,
39
+ 176,
40
+ 180,
41
+ 184,
42
+ 188,
43
+ 192,
44
+ 196,
45
+ 200,
46
+ 204,
47
+ 208,
48
+ 212,
49
+ 216,
50
+ 220,
51
+ 224,
52
+ 228,
53
+ 232,
54
+ 236,
55
+ 240,
56
+ 244,
57
+ 248,
58
+ 252,
59
+ 256,
60
+ 260,
61
+ 264,
62
+ 268,
63
+ 272,
64
+ 276,
65
+ 280,
66
+ 284,
67
+ ]
68
+ culane_row_anchor = [
69
+ 121,
70
+ 131,
71
+ 141,
72
+ 150,
73
+ 160,
74
+ 170,
75
+ 180,
76
+ 189,
77
+ 199,
78
+ 209,
79
+ 219,
80
+ 228,
81
+ 238,
82
+ 248,
83
+ 258,
84
+ 267,
85
+ 277,
86
+ 287,
87
+ ]
88
+
89
+
90
+ class ModelType(Enum):
91
+ TUSIMPLE = 0
92
+ CULANE = 1
93
+
94
+
95
+ class ModelConfig:
96
+
97
+ def __init__(self, model_type):
98
+
99
+ if model_type == ModelType.TUSIMPLE:
100
+ self.init_tusimple_config()
101
+ else:
102
+ self.init_culane_config()
103
+
104
+ def init_tusimple_config(self):
105
+ self.img_w = 1280
106
+ self.img_h = 720
107
+ self.row_anchor = tusimple_row_anchor
108
+ self.griding_num = 100
109
+ self.cls_num_per_lane = 56
110
+
111
+ def init_culane_config(self):
112
+ self.img_w = 1640
113
+ self.img_h = 590
114
+ self.row_anchor = culane_row_anchor
115
+ self.griding_num = 200
116
+ self.cls_num_per_lane = 18
117
+
118
+
119
+ class UltrafastLaneDetector:
120
+
121
+ def __init__(self, model_path, model_type=ModelType.TUSIMPLE):
122
+ self.fps = 0
123
+ self.timeLastPrediction = time.time()
124
+ self.frameCounter = 0
125
+
126
+ # Load model configuration based on the model type
127
+ self.cfg = ModelConfig(model_type)
128
+
129
+ # Initialize model
130
+ self.initialize_model(model_path)
131
+
132
+ def initialize_model(self, model_path):
133
+
134
+ self.session = onnxruntime.InferenceSession(model_path)
135
+
136
+ # Get model info
137
+ self.getModel_input_details()
138
+ self.getModel_output_details()
139
+
140
+ def detect_lanes(self, image, draw_points=True):
141
+
142
+ input_tensor = self.prepare_input(image)
143
+
144
+ # Perform inference on the image
145
+ output = self.inference(input_tensor)
146
+ # Process output data
147
+ self.lanes_points, self.lanes_detected = self.process_output(output, self.cfg)
148
+
149
+ return self.lanes_points, self.lanes_detected
150
+
151
+ def prepare_input(self, image):
152
+ img = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
153
+ self.img_height, self.img_width, self.img_channels = img.shape
154
+
155
+ # Input values should be from -1 to 1 with a size of 288 x 800 pixels
156
+ img_input = cv2.resize(img, (self.input_width, self.input_height)).astype(
157
+ np.float32
158
+ )
159
+
160
+ # Scale input pixel values to -1 to 1
161
+ mean = [0.485, 0.456, 0.406]
162
+ std = [0.229, 0.224, 0.225]
163
+
164
+ img_input = (img_input / 255.0 - mean) / std
165
+ img_input = img_input.transpose(2, 0, 1)
166
+ img_input = img_input[np.newaxis, :, :, :]
167
+
168
+ # Convert to float16
169
+ return img_input.astype(np.float16)
170
+
171
+ def inference(self, input_tensor):
172
+ input_name = self.session.get_inputs()[0].name
173
+ output_name = self.session.get_outputs()[0].name
174
+
175
+ output = self.session.run([output_name], {input_name: input_tensor})
176
+
177
+ return output
178
+
179
+ def getModel_input_details(self):
180
+
181
+ self.input_shape = self.session.get_inputs()[0].shape
182
+ self.channes = self.input_shape[2]
183
+ self.input_height = self.input_shape[2]
184
+ self.input_width = self.input_shape[3]
185
+
186
+ def getModel_output_details(self):
187
+
188
+ self.output_shape = self.session.get_outputs()[0].shape
189
+ self.num_points = self.output_shape[1]
190
+ self.num_anchors = self.output_shape[2]
191
+ self.num_lanes = self.output_shape[3]
192
+
193
+ @staticmethod
194
+ def process_output(output, cfg):
195
+ # Parse the output of the model
196
+ processed_output = np.squeeze(output[0])
197
+ processed_output = processed_output[:, ::-1, :]
198
+ prob = scipy.special.softmax(processed_output[:-1, :, :], axis=0)
199
+ idx = np.arange(cfg.griding_num) + 1
200
+ idx = idx.reshape(-1, 1, 1)
201
+ loc = np.sum(prob * idx, axis=0)
202
+ processed_output = np.argmax(processed_output, axis=0)
203
+ loc[processed_output == cfg.griding_num] = 0
204
+ processed_output = loc
205
+
206
+ col_sample = np.linspace(0, 800 - 1, cfg.griding_num)
207
+ col_sample_w = col_sample[1] - col_sample[0]
208
+
209
+ lanes_points = []
210
+ lanes_detected = []
211
+
212
+ max_lanes = processed_output.shape[1]
213
+ for lane_num in range(max_lanes):
214
+ lane_points = []
215
+ # Check if there are any points detected in the lane
216
+ if np.sum(processed_output[:, lane_num] != 0) > 2:
217
+ lanes_detected.append(True)
218
+ # Process each of the points for each lane
219
+ for point_num in range(processed_output.shape[0]):
220
+ if processed_output[point_num, lane_num] > 0:
221
+ lane_point = [
222
+ int(
223
+ processed_output[point_num, lane_num]
224
+ * col_sample_w
225
+ * cfg.img_w
226
+ / 800
227
+ )
228
+ - 1,
229
+ int(
230
+ cfg.img_h
231
+ * (
232
+ cfg.row_anchor[cfg.cls_num_per_lane - 1 - point_num]
233
+ / 288
234
+ )
235
+ )
236
+ - 1,
237
+ ]
238
+ lane_points.append(lane_point)
239
+ else:
240
+ lanes_detected.append(False)
241
+
242
+ lanes_points.append(lane_points)
243
+
244
+ return lanes_points, np.array(lanes_detected)
245
+
246
+ @staticmethod
247
+ def draw_lanes(input_img, lanes_points, lanes_detected, cfg, draw_points=True):
248
+ # Write the detected line points in the image
249
+ visualization_img = cv2.resize(
250
+ input_img, (cfg.img_w, cfg.img_h), interpolation=cv2.INTER_AREA
251
+ )
252
+
253
+ # Draw a mask for the current lane
254
+ if lanes_detected[1] and lanes_detected[2]:
255
+ lane_segment_img = visualization_img.copy()
256
+
257
+ cv2.fillPoly(
258
+ lane_segment_img,
259
+ pts=[np.vstack((lanes_points[1], np.flipud(lanes_points[2])))],
260
+ color=(255, 191, 0),
261
+ )
262
+ visualization_img = cv2.addWeighted(
263
+ visualization_img, 0.7, lane_segment_img, 0.3, 0
264
+ )
265
+
266
+ if draw_points:
267
+ for lane_num, lane_points in enumerate(lanes_points):
268
+ for lane_point in lane_points:
269
+ cv2.circle(
270
+ visualization_img,
271
+ (lane_point[0], lane_point[1]),
272
+ 3,
273
+ lane_colors[lane_num],
274
+ -1,
275
+ )
276
+
277
+ return visualization_img
utils_func_go_str.py ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import csv
2
+ import pandas as pd
3
+ from statistics import mode
4
+ from setting_AI import *
5
+
6
+ csv_path = "dataCSV/direction_control.csv"
7
+ csv_mask_path = "dataCSV/direction_control_mask.csv"
8
+ csv_straight_path = "dataCSV/direction_straight.csv"
9
+ csv_back_control_path = "dataCSV/back_control.csv"
10
+
11
+
12
+
13
+ def ADD_DATA_CSV_MASK_DIRECTION(direction, angle):
14
+ with open(csv_mask_path, mode='a', newline='', encoding='utf-8') as csvfile:
15
+ writer = csv.writer(csvfile)
16
+ writer.writerow([direction, angle])
17
+
18
+ data_csv = pd.read_csv(csv_mask_path)
19
+
20
+ if len(data_csv) == 10000:
21
+ file_start = pd.read_csv(csv_mask_path, nrows=0)
22
+ file_start_new = pd.DataFrame(columns=file_start.columns)
23
+ file_start_new.to_csv(csv_mask_path, index=False)
24
+
25
+ def ADD_DATA_CSV_DIRECTION(direction, angle):
26
+ with open(csv_path, mode='a', newline='', encoding='utf-8') as csvfile:
27
+ writer = csv.writer(csvfile)
28
+ writer.writerow([direction, angle])
29
+
30
+ def ADD_DATA_CSV_DIRECTION_STRAIGHT(direction, angle):
31
+ with open(csv_straight_path, mode='a', newline='', encoding='utf-8') as csvfile:
32
+ writer = csv.writer(csvfile)
33
+ writer.writerow([direction, angle])
34
+
35
+ data_csv = pd.read_csv(csv_straight_path)
36
+ if len(data_csv) == 500:
37
+ CLEAN_DATA_CSV_DIRECTION_STRAIGHT()
38
+
39
+ def CLEAN_DATA_CSV_DIRECTION():
40
+ # Clear "direction_control.csv"
41
+ file_start = pd.read_csv(csv_path, nrows=0)
42
+ file_start_new = pd.DataFrame(columns=file_start.columns)
43
+ file_start_new.to_csv(csv_path, index=False)
44
+
45
+ # Clear "direction_control_mask.csv"
46
+ file_start = pd.read_csv(csv_mask_path, nrows=0)
47
+ file_start_new = pd.DataFrame(columns=file_start.columns)
48
+ file_start_new.to_csv(csv_mask_path, index=False)
49
+
50
+ def ADD_DATA_CSV_BACK_CONTROL(direction, angle):
51
+ with open(csv_back_control_path, mode='a', newline='', encoding='utf-8') as csvfile:
52
+ writer = csv.writer(csvfile)
53
+ writer.writerow([direction, angle])
54
+
55
+ def CLEAN_DATA_CSV_BACK_CONTROL():
56
+ # Clear "back_control.csv"
57
+ file_start = pd.read_csv(csv_back_control_path, nrows=0)
58
+ file_start_new = pd.DataFrame(columns=file_start.columns)
59
+ file_start_new.to_csv(csv_back_control_path, index=False)
60
+
61
+ def CLEAN_DATA_CSV_DIRECTION_STRAIGHT():
62
+ # Clear "direction_control.csv"
63
+ file_start = pd.read_csv(csv_straight_path, nrows=0)
64
+ file_start_new = pd.DataFrame(columns=file_start.columns)
65
+ file_start_new.to_csv(csv_straight_path, index=False)
66
+
67
+ def BOTTOM_DATA_CSV_CHECK():
68
+ data_csv_ = pd.read_csv(csv_path)
69
+ last_row = data_csv_.iloc[-1]
70
+ return (last_row["direction"], last_row["angle"])
71
+
72
+
73
+
74
+ def CHECK_PUSH():
75
+ push_variable = None
76
+ dr_back, an_back = None, None
77
+ data_csv_ = pd.read_csv(csv_mask_path)
78
+ direction_list_to_mode = list(data_csv_['direction'][-count_control:])
79
+ if len(direction_list_to_mode) > 0:
80
+ direction_mode = mode(direction_list_to_mode)
81
+ max_angle = max(list(data_csv_['angle'][:count_control]))
82
+ if len(pd.read_csv(csv_path)) == 0:
83
+ dr_back, an_back = direction_mode, max_angle
84
+ ADD_DATA_CSV_DIRECTION(direction_mode, max_angle)
85
+ # ADD_DATA_CSV_BACK_CONTROL(direction_mode, max_angle)
86
+ return f"{direction_mode}:{max_angle:03d}", dr_back, an_back
87
+ else:
88
+ bottom_data_csv_check = BOTTOM_DATA_CSV_CHECK()
89
+ if bottom_data_csv_check[0] != direction_mode or (abs(bottom_data_csv_check[1] - max_angle) >= threshold_scale):
90
+ CLEAN_DATA_CSV_DIRECTION()
91
+ # ADD_DATA_CSV_DIRECTION(direction_mode, max_angle)
92
+ dr_back, an_back = direction_mode, max_angle
93
+ return f"{direction_mode}:{max_angle:03d}", dr_back, an_back
94
+ else:
95
+ return push_variable, dr_back, an_back
96
+
97
+ return push_variable, dr_back, an_back
98
+
99
+
100
+
101
+
102
+
v_test.py ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ # from AI_brain import AI
3
+ # from AI_brain_TRT import AI_TRT
4
+ import time
5
+ import serial
6
+
7
+ # cap = cv2.VideoCapture(1)
8
+
9
+ serial_port = serial.Serial(
10
+ port="COM8",
11
+ baudrate=9600,
12
+ bytesize=serial.EIGHTBITS,
13
+ parity=serial.PARITY_NONE,
14
+ stopbits=serial.STOPBITS_ONE,
15
+ )
16
+
17
+ if not serial_port.is_open:
18
+ serial_port.open()
19
+
20
+
21
+ # Wait a second to let the port initialize
22
+ time.sleep(1)
23
+ while True:
24
+ PUSH_RETURN = "Y:010"
25
+ print(PUSH_RETURN)
26
+ bytes_written = serial_port.write(PUSH_RETURN.encode())
27
+ print(f"Bytes sent: {bytes_written}")
28
+ time.sleep(1)
29
+ bytes_written = serial_port.write("x:000".encode())
30
+ time.sleep(1)
31
+ PUSH_RETURN = "X:000"
32
+ print(PUSH_RETURN)
33
+ bytes_written = serial_port.write(PUSH_RETURN.encode())
34
+ print(f"Bytes sent: {bytes_written}")
35
+ time.sleep(0.5)
36
+ bytes_written = serial_port.write("x:000".encode())
37
+ time.sleep(2)
38
+
39
+ break
videos/test_video.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d7bd00e07f804300d14ac83bbb411447c334766fe49cd606ca173fb4cec9efa2
3
+ size 6085499