AamirMalik commited on
Commit
2e40b9f
Β·
verified Β·
1 Parent(s): 36716e8

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +23 -30
app.py CHANGED
@@ -17,13 +17,7 @@ GROQ_API_URL = "https://api.groq.com/openai/v1/chat/completions"
17
  processor = AutoImageProcessor.from_pretrained("google/vit-base-patch16-224-in21k")
18
 
19
  # Placeholder sign labels
20
- sign_labels = {
21
- 0: "Hello",
22
- 1: "Thank You",
23
- 2: "Yes",
24
- 3: "No",
25
- 4: "Please"
26
- }
27
 
28
  # Function to classify sign and refine using Groq API
29
  def classify_sign(image):
@@ -31,36 +25,28 @@ def classify_sign(image):
31
  inputs = processor(images=image, return_tensors="pt")
32
  prediction = inputs['pixel_values'].argmax().item()
33
  gesture = sign_labels.get(prediction % len(sign_labels), "Unknown Sign")
34
-
35
  if GROQ_API_KEY:
36
- response = requests.post(
37
- GROQ_API_URL,
38
- headers={
39
- "Content-Type": "application/json",
40
- "Authorization": f"Bearer {GROQ_API_KEY}"
41
- },
42
- json={
43
- "model": "llama-3.3-70b-versatile",
44
- "messages": [{"role": "user", "content": f"Refine this detected sign: {gesture}"}]
45
- }
46
- )
47
-
48
  if response.status_code == 200:
49
  return response.json()['choices'][0]['message']['content']
50
-
51
  return gesture
52
 
53
- # Streamlit UI
 
 
 
 
 
 
 
54
 
 
55
  def main():
56
  st.set_page_config(page_title="Sign Language Translator", layout="wide")
57
  st.markdown("<h1 style='text-align: center; font-size: 40px; font-weight: bold; color: #4CAF50;'>🀟 Sign Language Translator</h1>", unsafe_allow_html=True)
58
 
59
- tab1, tab2, tab3 = st.tabs([
60
- "πŸ“Έ **Image Load**",
61
- "πŸ“· **Take Picture**",
62
- "πŸŽ₯ **Live**"
63
- ])
64
 
65
  with tab1:
66
  uploaded_image = st.file_uploader("Upload an image of a hand gesture", type=["png", "jpg", "jpeg"])
@@ -82,7 +68,6 @@ def main():
82
  if st.button("Enable Cam", key="enable_cam"):
83
  cap = cv2.VideoCapture(0)
84
  stframe = st.image([])
85
-
86
  while cap.isOpened():
87
  ret, frame = cap.read()
88
  if not ret:
@@ -94,6 +79,15 @@ def main():
94
  time.sleep(5)
95
  cap.release()
96
 
 
 
 
 
 
 
 
 
 
97
  with st.sidebar:
98
  st.markdown("<h2 style='font-size:28px; font-weight: bold; color: #4CAF50;'>Menu</h2>", unsafe_allow_html=True)
99
  if st.button("πŸ“– About Us", use_container_width=True):
@@ -109,5 +103,4 @@ def main():
109
  if st.button("πŸ’¬ Feedback", use_container_width=True):
110
  st.text_area("We value your feedback! Please share your thoughts below:")
111
 
112
- if __name__ == "__main__":
113
- main()
 
17
  processor = AutoImageProcessor.from_pretrained("google/vit-base-patch16-224-in21k")
18
 
19
  # Placeholder sign labels
20
+ sign_labels = {0: "Hello", 1: "Thank You", 2: "Yes", 3: "No", 4: "Please"}
 
 
 
 
 
 
21
 
22
  # Function to classify sign and refine using Groq API
23
  def classify_sign(image):
 
25
  inputs = processor(images=image, return_tensors="pt")
26
  prediction = inputs['pixel_values'].argmax().item()
27
  gesture = sign_labels.get(prediction % len(sign_labels), "Unknown Sign")
 
28
  if GROQ_API_KEY:
29
+ response = requests.post(GROQ_API_URL, headers={"Content-Type": "application/json", "Authorization": f"Bearer {GROQ_API_KEY}"},
30
+ json={"model": "llama-3.3-70b-versatile", "messages": [{"role": "user", "content": f"Refine this detected sign: {gesture}"}]})
 
 
 
 
 
 
 
 
 
 
31
  if response.status_code == 200:
32
  return response.json()['choices'][0]['message']['content']
 
33
  return gesture
34
 
35
+ # Function to generate sign avatar video
36
+ def generate_sign_video(text):
37
+ if GROQ_API_KEY:
38
+ response = requests.post(GROQ_API_URL, headers={"Content-Type": "application/json", "Authorization": f"Bearer {GROQ_API_KEY}"},
39
+ json={"model": "llama-3.3-70b-versatile", "messages": [{"role": "user", "content": f"Generate sign language avatar video for: {text}"}]})
40
+ if response.status_code == 200:
41
+ return "https://example.com/avatar_video.mp4" # Placeholder URL
42
+ return None
43
 
44
+ # Streamlit UI
45
  def main():
46
  st.set_page_config(page_title="Sign Language Translator", layout="wide")
47
  st.markdown("<h1 style='text-align: center; font-size: 40px; font-weight: bold; color: #4CAF50;'>🀟 Sign Language Translator</h1>", unsafe_allow_html=True)
48
 
49
+ tab1, tab2, tab3, tab4 = st.tabs(["πŸ“Έ **Image Load**", "πŸ“· **Take Picture**", "πŸŽ₯ **Live**", "✍️ **Text2Sign**"])
 
 
 
 
50
 
51
  with tab1:
52
  uploaded_image = st.file_uploader("Upload an image of a hand gesture", type=["png", "jpg", "jpeg"])
 
68
  if st.button("Enable Cam", key="enable_cam"):
69
  cap = cv2.VideoCapture(0)
70
  stframe = st.image([])
 
71
  while cap.isOpened():
72
  ret, frame = cap.read()
73
  if not ret:
 
79
  time.sleep(5)
80
  cap.release()
81
 
82
+ with tab4:
83
+ text_input = st.text_area("Enter text (max 200 characters)", max_chars=200)
84
+ if st.button("Generate Sign"):
85
+ video_url = generate_sign_video(text_input)
86
+ if video_url:
87
+ st.video(video_url)
88
+ else:
89
+ st.error("Unable to generate sign video. Please try again.")
90
+
91
  with st.sidebar:
92
  st.markdown("<h2 style='font-size:28px; font-weight: bold; color: #4CAF50;'>Menu</h2>", unsafe_allow_html=True)
93
  if st.button("πŸ“– About Us", use_container_width=True):
 
103
  if st.button("πŸ’¬ Feedback", use_container_width=True):
104
  st.text_area("We value your feedback! Please share your thoughts below:")
105
 
106
+ if __name__ == "__main__":