Spaces:
Build error
Build error
| import streamlit as st | |
| import numpy as np | |
| from PIL import Image | |
| from transformers import AutoImageProcessor | |
| import cv2 | |
| import time | |
| import torch | |
| import requests | |
| import json | |
| import os | |
| # Groq API Configuration | |
| GROQ_API_KEY = os.getenv("HF_GROQ_API_KEY") # Fetch key from Hugging Face secrets | |
| GROQ_API_URL = "https://api.groq.com/openai/v1/chat/completions" | |
| # Load processor | |
| processor = AutoImageProcessor.from_pretrained("google/vit-base-patch16-224-in21k") | |
| # Placeholder sign labels | |
| sign_labels = {0: "Hello", 1: "Thank You", 2: "Yes", 3: "No", 4: "Please"} | |
| # Function to classify sign and refine using Groq API | |
| def classify_sign(image): | |
| image = image.convert("RGB") | |
| inputs = processor(images=image, return_tensors="pt") | |
| prediction = inputs['pixel_values'].argmax().item() | |
| gesture = sign_labels.get(prediction % len(sign_labels), "Unknown Sign") | |
| if GROQ_API_KEY: | |
| response = requests.post(GROQ_API_URL, headers={"Content-Type": "application/json", "Authorization": f"Bearer {GROQ_API_KEY}"}, | |
| json={"model": "llama-3.3-70b-versatile", "messages": [{"role": "user", "content": f"Refine this detected sign: {gesture}"}]}) | |
| if response.status_code == 200: | |
| return response.json()['choices'][0]['message']['content'] | |
| return gesture | |
| # Function to generate sign avatar video | |
| def generate_sign_video(text): | |
| if GROQ_API_KEY: | |
| response = requests.post(GROQ_API_URL, headers={"Content-Type": "application/json", "Authorization": f"Bearer {GROQ_API_KEY}"}, | |
| json={"model": "llama-3.3-70b-versatile", "messages": [{"role": "user", "content": f"Generate sign language avatar video for: {text}"}]}) | |
| if response.status_code == 200: | |
| return "https://example.com/avatar_video.mp4" # Placeholder URL | |
| return None | |
| # Streamlit UI | |
| def main(): | |
| st.set_page_config(page_title="Sign Language Translator", layout="wide") | |
| st.markdown("<h1 style='text-align: center; font-size: 40px; font-weight: bold; color: #4CAF50;'>π€ Sign Language Translator</h1>", unsafe_allow_html=True) | |
| tab1, tab2, tab3, tab4 = st.tabs(["πΈ **Image Load**", "π· **Take Picture**", "π₯ **Live**", "βοΈ **Text2Sign**"]) | |
| with tab1: | |
| uploaded_image = st.file_uploader("Upload an image of a hand gesture", type=["png", "jpg", "jpeg"]) | |
| if uploaded_image: | |
| image = Image.open(uploaded_image) | |
| st.image(image, caption="Uploaded Image", use_container_width=True) | |
| gesture = classify_sign(image) | |
| st.success(f"Detected Gesture: {gesture}") | |
| with tab2: | |
| camera_image = st.camera_input("Take a picture") | |
| if camera_image: | |
| image = Image.open(camera_image) | |
| st.image(image, caption="Captured Image", use_container_width=True) | |
| gesture = classify_sign(image) | |
| st.success(f"Detected Gesture: {gesture}") | |
| with tab3: | |
| if st.button("Enable Cam", key="enable_cam"): | |
| cap = cv2.VideoCapture(0) | |
| stframe = st.image([]) | |
| while cap.isOpened(): | |
| ret, frame = cap.read() | |
| if not ret: | |
| break | |
| image = Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)) | |
| gesture = classify_sign(image) | |
| frame = cv2.putText(frame, f"Detected Gesture: {gesture}", (10, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2) | |
| stframe.image(frame, channels="BGR", use_container_width=True) | |
| time.sleep(5) | |
| cap.release() | |
| with tab4: | |
| text_input = st.text_area("Enter text (max 200 characters)", max_chars=200) | |
| if st.button("Generate Sign"): | |
| video_url = generate_sign_video(text_input) | |
| if video_url: | |
| st.video(video_url) | |
| else: | |
| st.error("Unable to generate sign video. Please try again.") | |
| with st.sidebar: | |
| st.markdown("<h2 style='font-size:28px; font-weight: bold; color: #4CAF50;'>Menu</h2>", unsafe_allow_html=True) | |
| if st.button("π About Us", use_container_width=True): | |
| st.markdown("We are team SignAI. We leverage advanced AI and Groq technology to interpret sign language gestures, making communication more accessible.") | |
| if st.button("π Contact Us", use_container_width=True): | |
| st.markdown(""" | |
| Phone: +123 456 7890 | |
| LinkedIn: [SignAI](#) | |
| Facebook: [SignAI](#) | |
| Email: info@signai.com | |
| Instagram: [@signai_official](#) | |
| """) | |
| if st.button("π¬ Feedback", use_container_width=True): | |
| st.text_area("We value your feedback! Please share your thoughts below:") | |
| if __name__ == "__main__": |