AamirMalik commited on
Commit
852f62f
ยท
verified ยท
1 Parent(s): 8584cc1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +20 -19
app.py CHANGED
@@ -2,23 +2,24 @@ import streamlit as st
2
  import requests
3
  import os
4
  from PIL import Image
5
- from transformers import AutoImageProcessor, AutoModelForImageClassification
6
  import torch
7
  import cv2
8
  import time
 
 
9
 
10
- # Load the open-source Hugging Face sign language model
11
- processor = AutoImageProcessor.from_pretrained("microsoft/resnet-50")
12
- model = AutoModelForImageClassification.from_pretrained("microsoft/resnet-50")
13
 
14
- # Function for sign classification
15
- def classify_sign(image):
16
- image = image.convert("RGB")
17
- inputs = processor(images=image, return_tensors="pt")
18
- outputs = model(**inputs)
19
- prediction = torch.argmax(outputs.logits, dim=-1).item()
20
- labels = ["Hello", "Thank You", "Yes", "No", "Please"] # Update with actual model labels if available
21
- return labels[prediction % len(labels)]
22
 
23
  # Streamlit UI
24
  def main():
@@ -34,15 +35,15 @@ def main():
34
  st.button("๐Ÿ“ž Contact Us", use_container_width=True)
35
  st.button("๐Ÿ’ฌ Feedback", use_container_width=True)
36
 
37
- tab1, tab2, tab3, tab4 = st.tabs(["Image Load", "Take Picture", "Live", "Text2Sign"])
38
 
39
  with tab1:
40
  st.subheader("๐Ÿ“ธ Image Load")
41
- uploaded_image = st.file_uploader("Upload an image of a hand gesture", type=["png", "jpg", "jpeg"])
42
  if uploaded_image:
43
  image = Image.open(uploaded_image)
44
  st.image(image, caption="Uploaded Image", use_container_width=True)
45
- gesture = classify_sign(image)
46
  st.success(f"Detected Gesture: {gesture}")
47
 
48
  with tab2:
@@ -51,11 +52,11 @@ def main():
51
  if camera_image:
52
  image = Image.open(camera_image)
53
  st.image(image, caption="Captured Image", use_container_width=True)
54
- gesture = classify_sign(image)
55
  st.success(f"Detected Gesture: {gesture}")
56
 
57
  with tab3:
58
- st.subheader("๐Ÿ“น Live")
59
  if st.button("Enable Cam"):
60
  cap = cv2.VideoCapture(0)
61
  stframe = st.image([])
@@ -65,10 +66,10 @@ def main():
65
  if not ret:
66
  break
67
  image = Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
68
- gesture = classify_sign(image)
69
  frame = cv2.putText(frame, f"Gesture: {gesture}", (10, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
70
  stframe.image(frame, channels="BGR", use_container_width=True)
71
- time.sleep(5)
72
  cap.release()
73
 
74
  with tab4:
 
2
  import requests
3
  import os
4
  from PIL import Image
 
5
  import torch
6
  import cv2
7
  import time
8
+ import numpy as np
9
+ from tensorflow.keras.models import load_model
10
 
11
+ # Load the DeepASL model for live ASL alphabet classification
12
+ MODEL_PATH = "asl_alphabet_model.h5"
13
+ model = load_model(MODEL_PATH)
14
 
15
+ # Function for ASL classification
16
+ def classify_asl(image):
17
+ image = image.resize((64, 64)) # Resize image to model input size
18
+ image = np.array(image) / 255.0 # Normalize
19
+ image = np.expand_dims(image, axis=0) # Add batch dimension
20
+ prediction = model.predict(image)
21
+ labels = list("ABCDEFGHIJKLMNOPQRSTUVWXYZ") # ASL alphabet labels
22
+ return labels[np.argmax(prediction)]
23
 
24
  # Streamlit UI
25
  def main():
 
35
  st.button("๐Ÿ“ž Contact Us", use_container_width=True)
36
  st.button("๐Ÿ’ฌ Feedback", use_container_width=True)
37
 
38
+ tab1, tab2, tab3, tab4 = st.tabs(["Image Load", "Take Picture", "Live ASL", "Text2Sign"])
39
 
40
  with tab1:
41
  st.subheader("๐Ÿ“ธ Image Load")
42
+ uploaded_image = st.file_uploader("Upload an image of an ASL alphabet gesture", type=["png", "jpg", "jpeg"])
43
  if uploaded_image:
44
  image = Image.open(uploaded_image)
45
  st.image(image, caption="Uploaded Image", use_container_width=True)
46
+ gesture = classify_asl(image)
47
  st.success(f"Detected Gesture: {gesture}")
48
 
49
  with tab2:
 
52
  if camera_image:
53
  image = Image.open(camera_image)
54
  st.image(image, caption="Captured Image", use_container_width=True)
55
+ gesture = classify_asl(image)
56
  st.success(f"Detected Gesture: {gesture}")
57
 
58
  with tab3:
59
+ st.subheader("๐Ÿ“น Live ASL")
60
  if st.button("Enable Cam"):
61
  cap = cv2.VideoCapture(0)
62
  stframe = st.image([])
 
66
  if not ret:
67
  break
68
  image = Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
69
+ gesture = classify_asl(image)
70
  frame = cv2.putText(frame, f"Gesture: {gesture}", (10, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
71
  stframe.image(frame, channels="BGR", use_container_width=True)
72
+ time.sleep(1)
73
  cap.release()
74
 
75
  with tab4: