Beasto commited on
Commit
ac727cd
·
1 Parent(s): 0f172c8

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +45 -43
app.py CHANGED
@@ -4,47 +4,49 @@ from PIL import Image
4
  import numpy as np
5
  import streamlit as st
6
 
7
- # Replace 'your_video.mp4' with the path to your video file
8
  # Open the video file
9
- video_path = st.file_uploader("Choose a video file", type=["mp4"])
10
-
11
- cap = cv2.VideoCapture(video_path)
12
-
13
- # Get the frames per second (fps) of the video
14
- fps = cap.get(cv2.CAP_PROP_FPS)
15
-
16
- # Calculate the interval to capture one frame per second
17
- interval = int(round(1 / fps))
18
-
19
- # Initialize a counter for frames
20
- frame_count = 0
21
- model = load_model('HandSignClassifier.h5')
22
- array = ['a','b','c','d','e','f','g','h','i','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y']
23
- out = ''
24
- while True:
25
- # Read the next frame
26
- ret, frame = cap.read()
27
-
28
- # Break the loop if the video is over
29
- if not ret:
30
- break
31
-
32
- # Check if it's time to capture a frame
33
- if frame_count % interval == 0:
34
- frame = np.array(frame)
35
- frame = Image.fromarray(frame).resize((28,28))
36
- frame = frame.comvert('L')
37
- frame = np.reshape((1,28,28,1))
38
- pred = model.predict(frame)
39
- pred = np.argmax(pred)
40
- pred = array[pred]
41
- if out[-1] != pred:
42
- out = out+pred
43
-
44
- # Increment the frame counter
45
- frame_count += 1
46
-
47
- # Release the video capture object
48
- cap.release()
49
-
50
- print(out)
 
 
 
 
4
  import numpy as np
5
  import streamlit as st
6
 
 
7
  # Open the video file
8
+ video_file = st.file_uploader("Choose a video file", type=["mp4"])
9
+
10
+ if video_file is not None:
11
+ # Read the video file from the file-like object
12
+ video_path = video_file.name
13
+ cap = cv2.VideoCapture(video_path)
14
+
15
+ # Get the frames per second (fps) of the video
16
+ fps = cap.get(cv2.CAP_PROP_FPS)
17
+
18
+ # Calculate the interval to capture one frame per second
19
+ interval = int(round(1 / fps))
20
+
21
+ # Initialize a counter for frames
22
+ frame_count = 0
23
+ model = load_model('HandSignClassifier.h5')
24
+ array = ['a','b','c','d','e','f','g','h','i','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y']
25
+ out = ''
26
+
27
+ while True:
28
+ # Read the next frame
29
+ ret, frame = cap.read()
30
+
31
+ # Break the loop if the video is over
32
+ if not ret:
33
+ break
34
+
35
+ # Check if it's time to capture a frame
36
+ if frame_count % interval == 0:
37
+ frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # Convert to grayscale
38
+ frame = cv2.resize(frame, (28, 28)) # Resize to (28, 28)
39
+ frame = np.reshape(frame, (1, 28, 28, 1)) # Reshape
40
+ pred = model.predict(frame)
41
+ pred = np.argmax(pred)
42
+ pred = array[pred]
43
+ if not out or out[-1] != pred:
44
+ out = out + pred
45
+
46
+ # Increment the frame counter
47
+ frame_count += 1
48
+
49
+ # Release the video capture object
50
+ cap.release()
51
+
52
+ print(out)