File size: 1,682 Bytes
1ca9b28
 
 
 
 
 
 
53952ec
1ca9b28
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
import cv2
from ultralytics import YOLO

# Load YOLOv8-pose model (adjust path as needed)
model = YOLO("yolo11n-pose.pt")

# Stream URL (MJPEG HTTP stream)
stream_url = "http://192.168.137.244:8080/video"

# Open video capture from stream
cap = cv2.VideoCapture(stream_url)

if not cap.isOpened():
    print("Error: Cannot open video stream")
    exit()

# Get properties (fallback to defaults if zero)
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = cap.get(cv2.CAP_PROP_FPS)

if width == 0 or height == 0:
    # Typical fallback for MJPEG streams that don't report width/height properly
    width, height = 640, 480
if fps == 0:
    fps = 20  # Set a reasonable default FPS for streams

print(f"Stream opened: {width}x{height} at {fps} FPS")

# Since we rotate 90 degrees clockwise, swap width and height for output video
out = cv2.VideoWriter("output_pose.mp4", cv2.VideoWriter_fourcc(*"mp4v"), fps, (height, width))

while True:
    ret, frame = cap.read()
    if not ret:
        print("Failed to grab frame")
        break

    # Rotate frame 90 degrees clockwise
    rotated_frame = cv2.rotate(frame, cv2.ROTATE_90_CLOCKWISE)

    # Run YOLO pose estimation on rotated frame
    results = model(rotated_frame, verbose=False, conf=0.3, iou=0.1)

    # Draw pose annotations
    annotated_frame = results[0].plot()

    # Write annotated frame to output video
    out.write(annotated_frame)

    # Show annotated frame
    cv2.imshow("Pose Detection (rotated)", annotated_frame)

    # Press 'q' to quit
    if cv2.waitKey(1) & 0xFF == ord("q"):
        break

# Cleanup
cap.release()
out.release()
cv2.destroyAllWindows()