Beasto's picture
Update app.py
89b040a
raw
history blame
1.59 kB
import cv2
from tensorflow.keras.models import load_model
from PIL import Image
import numpy as np
import streamlit as st
# Open the video file
video_file = st.file_uploader("Choose a video file", type=["mp4"])
if video_file is not None:
# Read the video file from the file-like object
video_path = video_file.name
cap = cv2.VideoCapture(video_path)
# Get the frames per second (fps) of the video
fps = (cap.get(cv2.CAP_PROP_FPS))
st.write(fps)
# Calculate the interval to capture one frame per second
interval = int(round(fps/1))
# Initialize a counter for frames
frame_count = 0
model = load_model('HandSignClassifier.h5')
array = ['a','b','c','d','e','f','g','h','i','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y']
out = ''
while True:
# Read the next frame
ret, frame = cap.read()
# Break the loop if the video is over
if not ret:
break
# Check if it's time to capture a frame
if frame_count % interval == 0:
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # Convert to grayscale
frame = cv2.resize(frame, (28, 28)) # Resize to (28, 28)
frame = np.reshape(frame, (1, 28, 28, 1)) # Reshape
pred = model.predict(frame)
pred = np.argmax(pred)
pred = array[pred]
if not out or out[-1] != pred:
out = out + pred
# Increment the frame counter
frame_count += 1
# Release the video capture object
cap.release()
print(out)