omsandeepatil commited on
Commit
d22148f
·
verified ·
1 Parent(s): 1ba2ff6

Upload 7 files

Browse files
README.md CHANGED
@@ -1,12 +1,13 @@
1
  ---
2
- title: Sign Language
3
- emoji: 📊
4
- colorFrom: red
5
- colorTo: gray
6
- sdk: gradio
7
- sdk_version: 5.20.1
8
- app_file: app.py
9
  pinned: false
 
10
  ---
11
 
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
+ title: Sign Language Detection
3
+ emoji: 💻
4
+ colorFrom: pink
5
+ colorTo: purple
6
+ sdk: streamlit
7
+ sdk_version: 1.36.0
8
+ app_file: prototype_static_test.py
9
  pinned: false
10
+ short_description: Peforms ASL Sign Language Prediction
11
  ---
12
 
13
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
gitattributes ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
mlp_model.joblib ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:804899867625543de815a28c878b7a4c1333a57a4f0994c9f5f87baa468ace91
3
+ size 255840
packages.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ libgl1
prototype_static.ipynb ADDED
The diff for this file is too large to render. See raw diff
 
prototype_static_test.py ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import cv2
3
+ import numpy as np
4
+ import joblib
5
+ import mediapipe as mp
6
+ from PIL import Image
7
+ import os
8
+ import tempfile
9
+
10
+ # Load the trained MLP Classifier model
11
+ model = joblib.load('mlp_model.joblib')
12
+
13
+ # Loading the class dictionary
14
+ class_names = {i: str(i) for i in range(10)} # For numbers 0-9
15
+ class_names.update({10 + i: chr(97 + i) for i in range(26)}) # For letters a-z
16
+
17
+ # Initialize MediaPipe Hand model
18
+ mp_hands = mp.solutions.hands
19
+ hands = mp_hands.Hands(static_image_mode=True, max_num_hands=1, min_detection_confidence=0.5)
20
+
21
+
22
+ def skeletal_image(image_path, shape=(256, 256, 3)):
23
+ image = cv2.imread(image_path)
24
+ image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
25
+ results = hands.process(image_rgb)
26
+ if not results.multi_hand_landmarks:
27
+ return None
28
+ white_background = np.ones(shape, dtype=np.uint8) * 255
29
+ for hand_landmarks in results.multi_hand_landmarks:
30
+ mp.solutions.drawing_utils.draw_landmarks(white_background, hand_landmarks, mp_hands.HAND_CONNECTIONS)
31
+ white_background_bgr = cv2.cvtColor(white_background, cv2.COLOR_RGB2BGR)
32
+ return white_background_bgr
33
+
34
+
35
+ # Function to extract landmarks from an uploaded image
36
+ def extract_landmarks(image_path):
37
+ image = cv2.imread(image_path)
38
+ results = hands.process(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
39
+ if results.multi_hand_landmarks:
40
+ # Extract landmarks
41
+ landmarks = np.array([[lm.x, lm.y, lm.z] for lm in results.multi_hand_landmarks[0].landmark]).flatten()
42
+ return landmarks
43
+ return None
44
+
45
+
46
+
47
+ # Streamlit app
48
+ st.title('Hand Gesture Recognition')
49
+
50
+ # Option for the user to choose the input method
51
+ input_method = st.radio("Choose the input method:", ("Upload an Image", "Use Webcam"))
52
+
53
+ if input_method == "Upload an Image":
54
+ uploaded_file = st.file_uploader("Upload an Image", type=["jpg", "jpeg", "png"])
55
+ if uploaded_file is not None:
56
+ uploaded_image = Image.open(uploaded_file).convert('RGB')
57
+ elif input_method == "Use Webcam":
58
+ uploaded_file = st.camera_input("Take a picture")
59
+ if uploaded_file is not None:
60
+ uploaded_image = Image.open(uploaded_file).convert('RGB')
61
+
62
+ if uploaded_file is not None:
63
+ st.image(uploaded_image, caption='Uploaded Image', use_column_width=True)
64
+
65
+ # Save the uploaded or captured image to a temporary file
66
+ with tempfile.NamedTemporaryFile(delete=False, suffix=".jpg") as tmpfile:
67
+ uploaded_image.save(tmpfile, format="JPEG")
68
+ tmpfile_path = tmpfile.name
69
+
70
+ try:
71
+ skeletal_img = skeletal_image(tmpfile_path)
72
+ if skeletal_img is not None:
73
+ st.image(skeletal_img, channels="BGR", caption='This processed image contains your hand landmarks')
74
+ processed_image = extract_landmarks(tmpfile_path)
75
+
76
+ if processed_image is not None:
77
+ with st.spinner('Please wait, while the model predicts...'):
78
+ # Reshape the processed_image for the model if necessary
79
+ processed_image = processed_image.reshape(1, -1) # Reshape if your model expects a specific input shape
80
+ predictions = model.predict(processed_image)
81
+ predicted_class_name = class_names[predictions[0]]
82
+
83
+ # Display the prediction
84
+ st.write(f"The predicted ASL sign seems to be {predicted_class_name.upper()}")
85
+ else:
86
+ st.write("No hand landmarks were detected.")
87
+ finally:
88
+ # Ensure the temporary file is deleted even if an error occurs
89
+ os.remove(tmpfile_path)
90
+
91
+
requirements.txt ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ mediapipe
2
+ scikit-learn
3
+ numpy==1.26.4
4
+ opencv-python
5
+ tensorflow
6
+ joblib
7
+ Pillow
8
+ matplotlib
9
+ pandas
10
+ seaborn