HansGan commited on
Commit
dc44f34
·
1 Parent(s): 1b15ee2

Update Pose_And_Motion_Based_SVM_Model.py

Browse files
pages/Pose_And_Motion_Based_SVM_Model.py CHANGED
@@ -17,11 +17,69 @@ ocsvm_pose = joblib.load('pages/ocsvm_pose_model-4.pkl') # Pre-trained One-Clas
17
 
18
  #==========================FUNCTIONS==========================
19
 
20
-
 
 
 
 
 
 
 
 
 
 
 
21
 
22
  # Function to extract poses from the video
23
  def extract_poses(video_path):
24
- test = 'hello'
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
25
 
26
  #==========================PREDICTING UPLOADED VIDEO==========================
27
 
 
17
 
18
  #==========================FUNCTIONS==========================
19
 
20
+ # Function to initialize OpenPose
21
+ def initialize_openpose():
22
+ params = {
23
+ "model_folder": "openpose/models", # Path to OpenPose models folder
24
+ "hand": True, # If you want hand keypoints
25
+ "face": False, # If you want face keypoints (set to True if needed)
26
+ "body": 25 # Number of body keypoints (e.g., 18 or 25 depending on your model)
27
+ }
28
+ opWrapper = op.WrapperPython()
29
+ opWrapper.configure(params)
30
+ opWrapper.start()
31
+ return opWrapper
32
 
33
  # Function to extract poses from the video
34
  def extract_poses(video_path):
35
+ # Initialize OpenPose
36
+ opWrapper = initialize_openpose()
37
+
38
+ # Open the video file
39
+ cap = cv2.VideoCapture(video_path)
40
+ frame_count = 0
41
+ all_pose_data = []
42
+
43
+ while cap.isOpened():
44
+ ret, frame = cap.read()
45
+ if not ret:
46
+ break # End of video
47
+
48
+ # Extract poses for the current frame
49
+ datum = op.Datum()
50
+ datum.cvInputData = frame
51
+ opWrapper.emplaceAndPop([datum])
52
+
53
+ # Extract the body keypoints (datum.poseKeypoints is of shape (num_people, 25, 3) if using 25 keypoints)
54
+ pose_keypoints = datum.poseKeypoints
55
+
56
+ if pose_keypoints is not None:
57
+ # Extract features for each person (you can customize this)
58
+ for person_keypoints in pose_keypoints:
59
+ # Flatten the keypoints to a single vector for each person
60
+ flat_keypoints = person_keypoints.flatten()
61
+ all_pose_data.append(flat_keypoints)
62
+
63
+ frame_count += 1
64
+
65
+ cap.release()
66
+
67
+ # If pose data is collected, predict with One-Class SVM
68
+ if all_pose_data:
69
+ pose_data = np.array(all_pose_data)
70
+
71
+ # Preprocess pose data (scale using the same scaler used during training)
72
+ pose_data_scaled = pose_scaler.transform(pose_data)
73
+
74
+ # Make predictions with the One-Class SVM
75
+ predictions = ocsvm_pose.predict(pose_data_scaled)
76
+
77
+ # Show predictions (for demonstration purposes, you can adjust as needed)
78
+ st.write(f"Predictions (1 = normal, -1 = panic): {predictions}")
79
+
80
+ # Optionally, process the predictions (e.g., count the number of panic poses)
81
+ panic_count = np.sum(predictions == -1)
82
+ st.write(f"Number of panic poses detected: {panic_count}")
83
 
84
  #==========================PREDICTING UPLOADED VIDEO==========================
85