## Track but done as a stream from computer webcam from collections import defaultdict from ultralytics import YOLO import cv2 import numpy as np # Load an official or custom model model = YOLO('yolov8n.pt') # Load an official Detect model model = YOLO('yolov8n-seg.pt') # Load an official Segment model model = YOLO('yolov8n-pose.pt') # Load an official Pose model # Starts capturing video from webcam cap = cv2.VideoCapture(0) # Can be used for a video file as well # cap = cv2.VideoCapture("path/to/video.mp4") # Store the track history track_history = defaultdict(lambda: []) # Loop through the video frames while cap.isOpened(): # Read a frame from the video success, frame = cap.read() if success: # Run YOLOv8 tracking on the frame, persisting tracks between frames results = model.track(frame, persist=True, tracker="bytetrack.yaml") # Get the boxes and track IDs boxes = results[0].boxes.xywh.cpu() if results[0].boxes.id is not None: track_ids = results[0].boxes.id.int().cpu().tolist() else: track_ids = [] # Visualize the results on the frame annotated_frame = results[0].plot() # Plot the tracks for box, track_id in zip(boxes, track_ids): x, y, w, h = box track = track_history[track_id] track.append((float(x), float(y))) # x, y center point if len(track) > 30: # retain 90 tracks for 90 frames track.pop(0) # Draw the tracking lines points = np.hstack(track).astype(np.int32).reshape((-1, 1, 2)) cv2.polylines(annotated_frame, [points], isClosed=False, color=(230, 230, 230), thickness=10) # Display the annotated frame cv2.imshow("YOLOv8 Tracking", annotated_frame) # Break the loop if 'q' is pressed if cv2.waitKey(1) & 0xFF == ord("q"): break else: # Break the loop if the end of the video is reached break # Release the video capture object and close the display window cap.release() cv2.destroyAllWindows()