AamirMalik commited on
Commit
bde03fa
·
verified ·
1 Parent(s): be99c79

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +14 -4
app.py CHANGED
@@ -7,9 +7,9 @@ import torch
7
  import cv2
8
  import time
9
 
10
- # Load the publicly available Hugging Face model
11
- processor = AutoImageProcessor.from_pretrained("google/vit-base-patch16-224-in21k")
12
- model = AutoModelForImageClassification.from_pretrained("google/vit-base-patch16-224-in21k")
13
 
14
  # Function for sign classification
15
  def classify_sign(image):
@@ -17,13 +17,23 @@ def classify_sign(image):
17
  inputs = processor(images=image, return_tensors="pt")
18
  outputs = model(**inputs)
19
  prediction = torch.argmax(outputs.logits, dim=-1).item()
20
- labels = ["Hello", "Thank You", "Yes", "No", "Please"] # Update with the actual model labels
21
  return labels[prediction % len(labels)]
22
 
23
  # Streamlit UI
24
  def main():
 
25
  st.title("Sign Language Translator")
26
 
 
 
 
 
 
 
 
 
 
27
  tab1, tab2, tab3, tab4 = st.tabs(["Image Load", "Take Picture", "Live", "Text2Sign"])
28
 
29
  with tab1:
 
7
  import cv2
8
  import time
9
 
10
+ # Load the open-source Hugging Face sign language model
11
+ processor = AutoImageProcessor.from_pretrained("microsoft/resnet-50")
12
+ model = AutoModelForImageClassification.from_pretrained("microsoft/resnet-50")
13
 
14
  # Function for sign classification
15
  def classify_sign(image):
 
17
  inputs = processor(images=image, return_tensors="pt")
18
  outputs = model(**inputs)
19
  prediction = torch.argmax(outputs.logits, dim=-1).item()
20
+ labels = ["Hello", "Thank You", "Yes", "No", "Please"] # Update with actual model labels if available
21
  return labels[prediction % len(labels)]
22
 
23
  # Streamlit UI
24
  def main():
25
+ st.set_page_config(page_title="Sign Language Translator", layout="wide")
26
  st.title("Sign Language Translator")
27
 
28
+ # Sidebar
29
+ with st.sidebar:
30
+ st.header("Menu")
31
+ st.info("Use the tabs to navigate between features.")
32
+ st.markdown("- 📸 **Image Load**: Upload an image to detect signs")
33
+ st.markdown("- 📷 **Take Picture**: Capture a sign using your camera")
34
+ st.markdown("- 📹 **Live**: Detect signs in real-time")
35
+ st.markdown("- 📝 **Text2Sign**: Convert text into sign language video")
36
+
37
  tab1, tab2, tab3, tab4 = st.tabs(["Image Load", "Take Picture", "Live", "Text2Sign"])
38
 
39
  with tab1: