import streamlit as st from PIL import Image import cv2 import numpy as np import subprocess import shutil import os import torch import time import streamlit_analytics def clear_detect_directory(): detect_directory = "yolov5/runs/detect" if os.path.exists(detect_directory): shutil.rmtree(detect_directory) os.makedirs(detect_directory) def save_image(): st.title("Hand Sign Detection") col1, col2 = st.columns(2) # 2 for two col pd_df = None with col1: genre = st.radio( "Upload Your Hand Sign", ('Browse', 'Camera')) if genre == 'Camera': uploaded_image = st.camera_input("Take a picture") else: uploaded_image = st.file_uploader("Upload an image", type=["jpg", "jpeg", "png"]) if uploaded_image is not None: # Convert the image to a format compatible with PIL and OpenCV pil_image = Image.open(uploaded_image) opencv_image = np.array(pil_image) opencv_image = cv2.cvtColor(opencv_image, cv2.COLOR_BGR2RGB) # Provide a file path to save the image upload_image_path = "processed_image.jpg" cv2.imwrite(upload_image_path, opencv_image) st.text("Detection class probabilities") st.success(f"Processing Image as {upload_image_path}") # label of image: model = torch.hub.load('ultralytics/yolov5', 'custom', path='best.pt') # local model results = model(upload_image_path) pd_df = (results.pandas().xyxy[0]) # max_confidence_name = pd_df.loc[pd_df['confidence'].idxmax(), 'name'] clear_detect_directory() command = [ "python", "yolov5/detect.py", "--weights", "best.pt", "--img", "416", "--conf", "0.50", "--source", upload_image_path ] process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True) std_out, std_err = process.communicate() if process.returncode != 0: error_message = f"Error: {std_err}" st.text(error_message) st.text(pd_df) with col2: detect_image_pred = "yolov5/runs/detect/exp/processed_image.jpg" if os.path.exists(detect_image_pred): st.text("Detected Gesture") st.image(detect_image_pred, caption="Detected Image", use_column_width=True) else: st.text("Detection Threshold is 60") st.text("Detection Gesture") st.text("Note: Detection model is train on limited Gestures (as shown in example image) Make clean Gesture if not detected try another Gesture") st.image("Untitled_img.png") streamlit_analytics.start_tracking() save_image() streamlit_analytics.stop_tracking()