File size: 2,402 Bytes
d946723
 
 
57a40bf
 
b74f535
 
d946723
8bcb9e5
 
 
 
 
 
 
 
 
 
d946723
10f5d90
ac727cd
44cfb37
10f5d90
 
8bcb9e5
 
89b040a
 
ac727cd
1a0c7bb
ac727cd
 
8bcb9e5
ac727cd
 
 
 
 
 
 
8bcb9e5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ac727cd
 
 
 
 
ef74183
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
import cv2
from tensorflow.keras.models import load_model
from PIL import Image
import numpy as np 
import tensorflow as tf
import streamlit as st 
import tempfile

# Function to detect hand using Haar Cascade
def detect_hand(frame, hand_cascade):
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    hands = hand_cascade.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=5, minSize=(30, 30))
    return hands

# Load Haar Cascade for hand detection
hand_cascade_path = 'path/to/your/hand_cascade.xml'  # Replace with your actual path
hand_cascade = cv2.CascadeClassifier(hand_cascade_path)

# Open the video file
f = st.file_uploader("Choose a Video")

if f is not None:
    tfile = tempfile.NamedTemporaryFile(delete=False) 
    tfile.write(f.read())
    cap = cv2.VideoCapture(tfile.name)
    fps = cap.get(cv2.CAP_PROP_FPS)
    st.write(fps)
    interval = int(round(fps/1))
    frame_count = 0
    model = tf.keras.models.load_model('HandSignClassifier (1).h5')
    array = ['a','b','c','d','e','f','g','h','i','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y']
    out = ''

    while True:
        ret, frame = cap.read()
        if not ret:
            break

        # Check if it's time to capture a frame
        if frame_count % interval == 0:
            hands = detect_hand(frame, hand_cascade)

            for (x, y, w, h) in hands:
                # Draw rectangles around detected hands
                cv2.rectangle(frame, (x, y), (x+w, y+h), (255, 0, 0), 2)

            # Display the frame with detected hands
            st.image(frame, 'input')

            if hands:
                # Extract the region of interest for hand from the frame
                hand_roi = frame[y:y+h, x:x+w]

                # Preprocess the hand ROI for your model (resize, convert to grayscale, etc.)
                hand_roi = cv2.cvtColor(hand_roi, cv2.COLOR_BGR2GRAY)
                hand_roi = cv2.resize(hand_roi, (28, 28))
                hand_roi = np.reshape(hand_roi, (1, 28, 28, 1))

                # Make predictions using your model
                pred = model.predict(hand_roi)
                st.write(pred)
                pred = np.argmax(pred)
                pred = array[pred]
                if not out or out[-1] != pred:
                    out = out + pred

        # Increment the frame counter
        frame_count += 1

    cap.release()
    st.write(out)