Amodit commited on
Commit
5751a55
·
1 Parent(s): d42fc80

Add app files

Browse files
Files changed (2) hide show
  1. app.py +134 -0
  2. requirements.txt +4 -0
app.py ADDED
@@ -0,0 +1,134 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import gradio as gr
3
+ from ultralytics import YOLO
4
+ import numpy as np
5
+ import math
6
+ import tempfile
7
+ import os
8
+
9
+ # Load the model
10
+ # yolov8n-pose.pt will be automatically downloaded if not present
11
+ model = YOLO('yolov8n-pose.pt')
12
+
13
+ def get_angle(p1, p2, p3):
14
+ """
15
+ Calculate the angle between three points (p1-p2-p3) at joint p2.
16
+ """
17
+ rad = math.atan2(p3[1]-p2[1], p3[0]-p2[0]) - math.atan2(p1[1]-p2[1], p1[0]-p2[0])
18
+ deg = abs(rad * 180.0 / math.pi)
19
+ return 360 - deg if deg > 180 else deg
20
+
21
+ def draw_pose(img, kps):
22
+ """
23
+ Draw pose landmarks and classify posture based on knee angle.
24
+ """
25
+ legs = [(11, 13, 15), (12, 14, 16)] # hip-knee-ankle indices
26
+ status = "Unknown"
27
+
28
+ for h_idx, k_idx, a_idx in legs:
29
+ hip, knee, ankle = kps[h_idx], kps[k_idx], kps[a_idx]
30
+
31
+ # Check confidence scores (index 2)
32
+ if hip[2] > 0.5 and knee[2] > 0.5 and ankle[2] > 0.5:
33
+ ang = get_angle(hip[:2], knee[:2], ankle[:2])
34
+
35
+ if ang > 160:
36
+ posture, color = "STANDING", (0, 255, 0)
37
+ elif ang < 140:
38
+ posture, color = "SITTING", (255, 0, 0)
39
+ else:
40
+ posture, color = "BENDING", (0, 165, 255)
41
+
42
+ # Draw lines and text
43
+ pt = lambda p: (int(p[0]), int(p[1]))
44
+ cv2.line(img, pt(hip), pt(knee), (255,0,255), 2)
45
+ cv2.line(img, pt(knee), pt(ankle), (255,0,255), 2)
46
+
47
+ label = f"{posture} {int(ang)}"
48
+ (w, h), _ = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 0.8, 2)
49
+ cv2.rectangle(img, (int(hip[0]), int(hip[1]) - 30), (int(hip[0]) + w, int(hip[1])), color, -1)
50
+ cv2.putText(img, label, (int(hip[0]), int(hip[1]) - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255, 255, 255), 2)
51
+
52
+ return posture
53
+
54
+ return status
55
+
56
+ def process_img(img):
57
+ """
58
+ Process a single image for posture detection.
59
+ """
60
+ if img is None:
61
+ return None, "No Image"
62
+
63
+ # Run inference
64
+ res = model(img)
65
+ out = img.copy()
66
+ msg = "No Person Detected"
67
+
68
+ if res and res[0].keypoints is not None:
69
+ for k in res[0].keypoints.data.cpu().numpy():
70
+ msg = f"POSTURE: {draw_pose(out, k)}"
71
+
72
+ return out, msg
73
+
74
+ def process_vid(vid_path):
75
+ """
76
+ Process a video file for posture detection.
77
+ """
78
+ if not vid_path:
79
+ return None
80
+
81
+ cap = cv2.VideoCapture(vid_path)
82
+ fps = cap.get(cv2.CAP_PROP_FPS)
83
+ w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
84
+ h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
85
+
86
+ # Create temp file for output
87
+ fd, out_path = tempfile.mkstemp(suffix='.mp4')
88
+ os.close(fd)
89
+
90
+ # Initialize video writer
91
+ writer = cv2.VideoWriter(out_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h))
92
+
93
+ while cap.isOpened():
94
+ ret, frame = cap.read()
95
+ if not ret:
96
+ break
97
+
98
+ # Run inference
99
+ res = model(frame, verbose=False)
100
+ if res and res[0].keypoints is not None:
101
+ for k in res[0].keypoints.data.cpu().numpy():
102
+ draw_pose(frame, k)
103
+
104
+ writer.write(frame)
105
+
106
+ cap.release()
107
+ writer.release()
108
+ return out_path
109
+
110
+ # Define Gradio Interface
111
+ with gr.Blocks(title="Postures") as app:
112
+ gr.Markdown("# Posture Detection\nSimple angle-based classification using YOLOv8: Standing (>160), Sitting (<140)")
113
+
114
+ with gr.Tab("Image"):
115
+ with gr.Row():
116
+ with gr.Column():
117
+ img_inp = gr.Image(type="numpy", label="Input Image")
118
+ img_btn = gr.Button("Detect Posture")
119
+ with gr.Column():
120
+ img_out = gr.Image(label="Result")
121
+ img_stat = gr.Textbox(label="Status")
122
+ img_btn.click(process_img, inputs=img_inp, outputs=[img_out, img_stat])
123
+
124
+ with gr.Tab("Video"):
125
+ with gr.Row():
126
+ with gr.Column():
127
+ vid_inp = gr.Video(label="Input Video")
128
+ vid_btn = gr.Button("Process Video")
129
+ with gr.Column():
130
+ vid_out = gr.Video(label="Processed Video")
131
+ vid_btn.click(process_vid, inputs=vid_inp, outputs=vid_out)
132
+
133
+ if __name__ == "__main__":
134
+ app.launch()
requirements.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ ultralytics
2
+ gradio
3
+ opencv-python
4
+ numpy