Beasto's picture
Update app.py
b317de9
raw
history blame
1.77 kB
import cv2
from tensorflow.keras.models import load_model
from PIL import Image
import numpy as np
import tensorflow as tf
import streamlit as st
import tempfile
# Open the video file
f = st.file_uploader("Choose a Video")
if f is not None:
# Read the video file from the file-like object
tfile = tempfile.NamedTemporaryFile(delete=False)
tfile.write(f.read())
# Opens the Video file
cap= cv2.VideoCapture(tfile.name)
# Get the frames per second (fps) of the video
fps = (cap.get(cv2.CAP_PROP_FPS))
st.write(fps)
# Calculate the interval to capture one frame per second
interval = int(round(fps/1))
# Initialize a counter for frames
frame_count = 0
model = tf.keras.models.load_model('HandSignClassifier (1).h5')
array = ['a','b','c','d','e','f','g','h','i','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y']
out = ''
while True:
# Read the next fram
ret, frame = cap.read()
# Break the loop if the video is over
if not ret:
break
# Check if it's time to capture a frame
if frame_count % interval == 0:
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # Convert to grayscale
frame = cv2.resize(frame, (28, 28)) # Resize to (28, 28)
frame = np.reshape(frame, (1, 28, 28, 1))
st.image(frame, 'input')# Reshape
pred = model.predict(frame)
st.write(pred)
pred = np.argmax(pred)
pred = array[pred]
if not out or out[-1] != pred:
out = out + pred
# Increment the frame counter
frame_count += 1
# Release the video capture object
cap.release()
st.write(out)