ComputerVision / src /test.py
pirahansiah's picture
multi object tracking
ce3199d
from ultralytics import YOLO
from PIL import Image
import cv2
import ffmpeg
ffmpeg.input('files/a.MOV').output('files/a.mp4').run()
ffmpeg.input('input.mov').output('output.mp4').run()
def draw_boxes(image, boxes):
for box in boxes:
x1, y1, x2, y2, name, prob = box
cv2.rectangle(image, (x1, y1), (x2, y2), (0, 255, 0), 2)
cv2.putText(image, f"{name} {prob:.2f}", (x1, y1-10), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (0,255,0), 2)
return image
def detect_objects_on_image(buf):
model = YOLO("yolov8n.pt")
results = model.predict(buf)
result = results[0]
output = []
for box in result.boxes:
x1, y1, x2, y2 = [
round(x) for x in box.xyxy[0].tolist()
]
class_id = box.cls[0].item()
prob = round(box.conf[0].item(), 2)
output.append([
x1, y1, x2, y2, result.names[class_id], prob
])
return output
# model = MaskRCNN("mask_rcnn_model.pth")
# results = model.predict(img)
# masks = results['masks']
# img = cv2.imread('a.png')
# boxes=detect_objects_on_image(img)
# img_with_boxes = draw_boxes(img, boxes)
# cv2.imshow("test",img_with_boxes)
# cv2.waitKey(0)
model = YOLO("files/yolov8n.pt")
video_path = "files/a.MOV"
cap = cv2.VideoCapture(video_path)
# Loop through the video frames
while cap.isOpened():
# Read a frame from the video
success, frame = cap.read()
if success:
# Run YOLOv8 tracking on the frame, persisting tracks between frames
results = model.track(frame, persist=True)
# Visualize the results on the frame
annotated_frame = results[0].plot()
# Display the annotated frame
cv2.imshow("YOLOv8 Tracking", annotated_frame)
# Break the loop if 'q' is pressed
if cv2.waitKey(1) & 0xFF == ord("q"):
break
else:
# Break the loop if the end of the video is reached
break
# Release the video capture object and close the display window
cap.release()
cv2.destroyAllWindows()