AamirMalik commited on
Commit
10e8be4
Β·
verified Β·
1 Parent(s): ab34be6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +40 -22
app.py CHANGED
@@ -2,6 +2,8 @@ import streamlit as st
2
  import numpy as np
3
  from PIL import Image
4
  from transformers import AutoImageProcessor, AutoModelForImageClassification
 
 
5
 
6
  # Load publicly available model from Hugging Face Hub
7
  processor = AutoImageProcessor.from_pretrained("google/vit-base-patch16-224-in21k")
@@ -24,14 +26,32 @@ def classify_sign(image):
24
  prediction = outputs.logits.argmax(-1).item()
25
  return sign_labels.get(prediction % len(sign_labels), "Unknown Sign")
26
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
27
  # Streamlit UI
28
  def main():
29
  st.set_page_config(page_title="Sign Language Translator", layout="wide")
30
- st.title("🀟 Sign Language Translator")
31
- st.write("Translate sign language gestures into text and speech.")
32
 
33
  # Top tabs
34
- tab1, tab2, tab3 = st.tabs(["Image Load", "Take Picture", "Live"])
35
 
36
  with tab1:
37
  uploaded_image = st.file_uploader("Upload an image of a hand gesture", type=["png", "jpg", "jpeg"])
@@ -42,7 +62,7 @@ def main():
42
  st.image(image, caption="Uploaded Image", use_column_width=True)
43
  with col2:
44
  gesture = classify_sign(image)
45
- st.info(f"Detected Gesture: {gesture}")
46
 
47
  with tab2:
48
  camera_image = st.camera_input("Take a picture")
@@ -53,28 +73,26 @@ def main():
53
  st.image(image, caption="Captured Image", use_column_width=True)
54
  with col2:
55
  gesture = classify_sign(image)
56
- st.info(f"Detected Gesture: {gesture}")
57
 
58
  with tab3:
59
- st.write("**Live gesture detection is now available!**")
 
60
 
61
- # Left-side tabs
62
  with st.sidebar:
63
- selected_tab = st.radio("Menu", ["About Us", "Contact Us", "Feedback"])
64
-
65
- if selected_tab == "About Us":
66
- st.markdown("**We are team SignAI.** We leverage advanced AI to interpret sign language gestures, making communication more accessible.")
67
-
68
- elif selected_tab == "Contact Us":
69
- st.markdown("**Contact Information:**")
70
- st.write("πŸ“ž Phone: +123-456-7890")
71
- st.write("πŸ“§ Email: contact@signai.com")
72
- st.write("🌐 [LinkedIn](https://linkedin.com)")
73
- st.write("πŸ“· [Instagram](https://instagram.com)")
74
- st.write("πŸ“˜ [Facebook](https://facebook.com)")
75
-
76
- elif selected_tab == "Feedback":
77
- st.text_area("We value your feedback. Please share your thoughts below:")
78
 
79
  if __name__ == "__main__":
80
  main()
 
2
  import numpy as np
3
  from PIL import Image
4
  from transformers import AutoImageProcessor, AutoModelForImageClassification
5
+ import cv2
6
+ import time
7
 
8
  # Load publicly available model from Hugging Face Hub
9
  processor = AutoImageProcessor.from_pretrained("google/vit-base-patch16-224-in21k")
 
26
  prediction = outputs.logits.argmax(-1).item()
27
  return sign_labels.get(prediction % len(sign_labels), "Unknown Sign")
28
 
29
+ # Live gesture detection
30
+ def live_detection():
31
+ cap = cv2.VideoCapture(0)
32
+ if not cap.isOpened():
33
+ st.error("Error: Could not open webcam.")
34
+ return
35
+
36
+ stframe = st.image([])
37
+ while True:
38
+ ret, frame = cap.read()
39
+ if not ret:
40
+ break
41
+ image = Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
42
+ gesture = classify_sign(image)
43
+ frame = cv2.putText(frame, f"Detected Gesture: {gesture}", (10, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
44
+ stframe.image(frame, channels="BGR")
45
+ time.sleep(5)
46
+ cap.release()
47
+
48
  # Streamlit UI
49
  def main():
50
  st.set_page_config(page_title="Sign Language Translator", layout="wide")
51
+ st.markdown("<h1 style='text-align: center; color: #4CAF50;'>🀟 Sign Language Translator</h1>", unsafe_allow_html=True)
 
52
 
53
  # Top tabs
54
+ tab1, tab2, tab3 = st.tabs(["πŸ“Έ Image Load", "πŸ“· Take Picture", "πŸŽ₯ Live"])
55
 
56
  with tab1:
57
  uploaded_image = st.file_uploader("Upload an image of a hand gesture", type=["png", "jpg", "jpeg"])
 
62
  st.image(image, caption="Uploaded Image", use_column_width=True)
63
  with col2:
64
  gesture = classify_sign(image)
65
+ st.success(f"Detected Gesture: {gesture}")
66
 
67
  with tab2:
68
  camera_image = st.camera_input("Take a picture")
 
73
  st.image(image, caption="Captured Image", use_column_width=True)
74
  with col2:
75
  gesture = classify_sign(image)
76
+ st.success(f"Detected Gesture: {gesture}")
77
 
78
  with tab3:
79
+ if st.button("Enable Cam"):
80
+ live_detection()
81
 
82
+ # Left-side sidebar with buttons
83
  with st.sidebar:
84
+ st.markdown("<h2 style='color: #4CAF50;'>Menu</h2>", unsafe_allow_html=True)
85
+ if st.button("About Us", key="about_us", help="Learn more about us"):
86
+ st.markdown("**We are team SignAI.** We leverage advanced AI to interpret sign language gestures, making communication more accessible.")
87
+ if st.button("Contact Us", key="contact_us", help="Get in touch"):
88
+ st.markdown("**Contact Information:**")
89
+ st.write("πŸ“ž Phone: +123-456-7890")
90
+ st.write("πŸ“§ Email: contact@signai.com")
91
+ st.write("🌐 [LinkedIn](https://linkedin.com)")
92
+ st.write("πŸ“· [Instagram](https://instagram.com)")
93
+ st.write("πŸ“˜ [Facebook](https://facebook.com)")
94
+ if st.button("Feedback", key="feedback", help="Give us your feedback"):
95
+ st.text_area("We value your feedback. Please share your thoughts below:")
 
 
 
96
 
97
  if __name__ == "__main__":
98
  main()