Beasto commited on
Commit
b317de9
·
1 Parent(s): 05b8d49

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +29 -39
app.py CHANGED
@@ -6,67 +6,57 @@ import tensorflow as tf
6
  import streamlit as st
7
  import tempfile
8
 
9
- # Function to detect hand using Haar Cascade
10
- def detect_hand(frame, hand_cascade):
11
- gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
12
- hands = hand_cascade.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=3, minSize=(30, 30))
13
- return hands
14
-
15
- # Load Haar Cascade for hand detection
16
- hand_cascade_path = 'hand.xml' # Replace with your actual path
17
- hand_cascade = cv2.CascadeClassifier(hand_cascade_path)
18
-
19
  # Open the video file
20
  f = st.file_uploader("Choose a Video")
21
 
22
  if f is not None:
 
 
23
  tfile = tempfile.NamedTemporaryFile(delete=False)
24
  tfile.write(f.read())
25
- cap = cv2.VideoCapture(tfile.name)
26
- fps = cap.get(cv2.CAP_PROP_FPS)
 
 
 
 
27
  st.write(fps)
 
 
28
  interval = int(round(fps/1))
 
 
29
  frame_count = 0
30
  model = tf.keras.models.load_model('HandSignClassifier (1).h5')
31
  array = ['a','b','c','d','e','f','g','h','i','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y']
32
  out = ''
33
-
34
  while True:
 
 
35
  ret, frame = cap.read()
 
 
36
  if not ret:
37
  break
38
 
39
  # Check if it's time to capture a frame
40
  if frame_count % interval == 0:
41
- hands = detect_hand(frame, hand_cascade)
42
-
43
- for (x, y, w, h) in hands:
44
- # Draw rectangles around detected hands
45
- cv2.rectangle(frame, (x, y), (x+w, y+h), (255, 0, 0), 2)
46
-
47
- # Display the frame with detected hands
48
-
49
- if hands:
50
- # Extract the region of interest for hand from the frame
51
- hand_roi = frame[y:y+h, x:x+w]
52
-
53
- # Preprocess the hand ROI for your model (resize, convert to grayscale, etc.)
54
- hand_roi = cv2.cvtColor(hand_roi, cv2.COLOR_BGR2GRAY)
55
- hand_roi = cv2.resize(hand_roi, (28, 28))
56
- hand_roi = np.reshape(hand_roi, (1, 28, 28, 1))
57
-
58
- # Make predictions using your model
59
- pred = model.predict(hand_roi)
60
- st.write(pred)
61
- pred = np.argmax(pred)
62
- pred = array[pred]
63
- if not out or out[-1] != pred:
64
- out = out + pred
65
- if not hands:
66
- st.write("No Hand")
67
 
68
  # Increment the frame counter
69
  frame_count += 1
70
 
 
71
  cap.release()
 
72
  st.write(out)
 
6
  import streamlit as st
7
  import tempfile
8
 
 
 
 
 
 
 
 
 
 
 
9
  # Open the video file
10
  f = st.file_uploader("Choose a Video")
11
 
12
  if f is not None:
13
+ # Read the video file from the file-like object
14
+
15
  tfile = tempfile.NamedTemporaryFile(delete=False)
16
  tfile.write(f.read())
17
+
18
+ # Opens the Video file
19
+ cap= cv2.VideoCapture(tfile.name)
20
+
21
+ # Get the frames per second (fps) of the video
22
+ fps = (cap.get(cv2.CAP_PROP_FPS))
23
  st.write(fps)
24
+
25
+ # Calculate the interval to capture one frame per second
26
  interval = int(round(fps/1))
27
+
28
+ # Initialize a counter for frames
29
  frame_count = 0
30
  model = tf.keras.models.load_model('HandSignClassifier (1).h5')
31
  array = ['a','b','c','d','e','f','g','h','i','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y']
32
  out = ''
33
+
34
  while True:
35
+ # Read the next fram
36
+
37
  ret, frame = cap.read()
38
+
39
+ # Break the loop if the video is over
40
  if not ret:
41
  break
42
 
43
  # Check if it's time to capture a frame
44
  if frame_count % interval == 0:
45
+ frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # Convert to grayscale
46
+ frame = cv2.resize(frame, (28, 28)) # Resize to (28, 28)
47
+ frame = np.reshape(frame, (1, 28, 28, 1))
48
+ st.image(frame, 'input')# Reshape
49
+ pred = model.predict(frame)
50
+ st.write(pred)
51
+ pred = np.argmax(pred)
52
+ pred = array[pred]
53
+ if not out or out[-1] != pred:
54
+ out = out + pred
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
55
 
56
  # Increment the frame counter
57
  frame_count += 1
58
 
59
+ # Release the video capture object
60
  cap.release()
61
+
62
  st.write(out)