AamirMalik commited on
Commit
14c3b21
·
verified ·
1 Parent(s): 7a58141

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +20 -12
app.py CHANGED
@@ -1,20 +1,28 @@
1
  import streamlit as st
2
  import numpy as np
3
- import tensorflow as tf
4
  from PIL import Image
5
  from transformers import AutoImageProcessor, AutoModelForImageClassification
6
 
7
- # Load gesture classification model from Hugging Face Hub (public model)
8
- processor = AutoImageProcessor.from_pretrained("google/vit-base-patch16-224-in21k")
9
- model = AutoModelForImageClassification.from_pretrained("google/vit-base-patch16-224-in21k")
10
-
11
- # Function for gesture classification
12
- def classify_gesture(image):
 
 
 
 
 
 
 
 
 
13
  image = image.convert("RGB")
14
  inputs = processor(images=image, return_tensors="pt")
15
  outputs = model(**inputs)
16
  prediction = outputs.logits.argmax(-1).item()
17
- return prediction
18
 
19
  # Streamlit UI
20
  def main():
@@ -37,15 +45,15 @@ def main():
37
  if camera_image:
38
  image = Image.open(camera_image)
39
  st.image(image, caption="Captured Image", use_column_width=True)
40
- gesture = classify_gesture(image)
41
- st.write(f"Gesture: {gesture}")
42
 
43
  # Display uploaded image
44
  if uploaded_image:
45
  image = Image.open(uploaded_image)
46
  st.image(image, caption="Uploaded Image", use_column_width=True)
47
- gesture = classify_gesture(image)
48
- st.write(f"Gesture: {gesture}")
49
 
50
  if __name__ == "__main__":
51
  main()
 
1
  import streamlit as st
2
  import numpy as np
 
3
  from PIL import Image
4
  from transformers import AutoImageProcessor, AutoModelForImageClassification
5
 
6
+ # Load sign language model from Hugging Face Hub
7
+ processor = AutoImageProcessor.from_pretrained("nateraw/gesture-classification")
8
+ model = AutoModelForImageClassification.from_pretrained("nateraw/gesture-classification")
9
+
10
+ # Sign labels for classification
11
+ sign_labels = {
12
+ 0: "Hello",
13
+ 1: "Thank You",
14
+ 2: "Yes",
15
+ 3: "No",
16
+ 4: "Please"
17
+ }
18
+
19
+ # Function for sign classification
20
+ def classify_sign(image):
21
  image = image.convert("RGB")
22
  inputs = processor(images=image, return_tensors="pt")
23
  outputs = model(**inputs)
24
  prediction = outputs.logits.argmax(-1).item()
25
+ return sign_labels.get(prediction, "Unknown Sign")
26
 
27
  # Streamlit UI
28
  def main():
 
45
  if camera_image:
46
  image = Image.open(camera_image)
47
  st.image(image, caption="Captured Image", use_column_width=True)
48
+ gesture = classify_sign(image)
49
+ st.write(f"Detected Gesture: {gesture}")
50
 
51
  # Display uploaded image
52
  if uploaded_image:
53
  image = Image.open(uploaded_image)
54
  st.image(image, caption="Uploaded Image", use_column_width=True)
55
+ gesture = classify_sign(image)
56
+ st.write(f"Detected Gesture: {gesture}")
57
 
58
  if __name__ == "__main__":
59
  main()