Spaces:
Sleeping
Sleeping
| # import gradio as gr | |
| # import cv2 | |
| # import numpy as np | |
| # import onnxruntime as ort | |
| # # Load the ONNX model using onnxruntime | |
| # onnx_model_path = "Model_IV.onnx" # Update with your ONNX model path | |
| # session = ort.InferenceSession(onnx_model_path) | |
| # # Function to perform object detection with the ONNX model | |
| # def detect_objects(frame, confidence_threshold=0.5): | |
| # # Convert the frame from BGR (OpenCV) to RGB | |
| # image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) | |
| # # Preprocessing: Resize and normalize the image | |
| # # Assuming YOLO model input is 640x640, update according to your model's input size | |
| # input_size = (640, 640) | |
| # image_resized = cv2.resize(image, input_size) | |
| # image_normalized = image_resized / 255.0 # Normalize to [0, 1] | |
| # image_input = np.transpose(image_normalized, (2, 0, 1)) # Change to CHW format | |
| # image_input = np.expand_dims(image_input, axis=0).astype(np.float32) # Add batch dimension | |
| # # Perform inference | |
| # inputs = {session.get_inputs()[0].name: image_input} | |
| # outputs = session.run(None, inputs) | |
| # # # Assuming YOLO model outputs are in the form of [boxes, confidences, class_probs] | |
| # # boxes, confidences, class_probs = outputs | |
| # # # Post-processing: Filter boxes by confidence threshold | |
| # # detections = [] | |
| # # for i, confidence in enumerate(confidences[0]): | |
| # # if confidence >= confidence_threshold: | |
| # # x1, y1, x2, y2 = boxes[0][i] | |
| # # class_id = np.argmax(class_probs[0][i]) # Get class with highest probability | |
| # # detections.append((x1, y1, x2, y2, confidence, class_id)) | |
| # # # Draw bounding boxes and labels on the image | |
| # # for (x1, y1, x2, y2, confidence, class_id) in detections: | |
| # # color = (0, 255, 0) # Green color for bounding boxes | |
| # # cv2.rectangle(image, (int(x1), int(y1)), (int(x2), int(y2)), color, 2) | |
| # # label = f"Class {class_id}: {confidence:.2f}" | |
| # # cv2.putText(image, label, (int(x1), int(y1)-10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2) | |
| # # # Convert the image back to BGR for displaying in Gradio | |
| # # image_bgr = cv2.cvtColor(image, cv2.COLOR_RGB2BGR) | |
| # return outputs | |
| # # Gradio interface to use the webcam for real-time object detection | |
| # # Added a slider for the confidence threshold | |
| # iface = gr.Interface(fn=detect_objects, | |
| # #inputs=[ | |
| # # gr.Video(sources="webcam", type="numpy"), # Webcam input | |
| # inputs = gr.Image(sources=["webcam"], type="numpy"), | |
| # # gr.Slider(minimum=0.0, maximum=1.0, default=0.5, label="Confidence Threshold") # Confidence slider | |
| # # ], | |
| # outputs="image") # Show output image with bounding boxes | |
| # iface.launch() | |
| ### | |
| # import gradio as gr | |
| # import cv2 | |
| # from huggingface_hub import hf_hub_download | |
| # from gradio_webrtc import WebRTC | |
| # from twilio.rest import Client | |
| # import os | |
| # from inference import YOLOv8 | |
| # model_file = hf_hub_download( | |
| # repo_id="aje6/ASL-Fingerspelling-Detection", filename="onnx/Model_IV.onnx" | |
| # ) | |
| # model = YOLOv8(model_file) | |
| # account_sid = os.environ.get("TWILIO_ACCOUNT_SID") | |
| # auth_token = os.environ.get("TWILIO_AUTH_TOKEN") | |
| # if account_sid and auth_token: | |
| # client = Client(account_sid, auth_token) | |
| # token = client.tokens.create() | |
| # rtc_configuration = { | |
| # "iceServers": token.ice_servers, | |
| # "iceTransportPolicy": "relay", | |
| # } | |
| # else: | |
| # rtc_configuration = None | |
| # def detection(image, conf_threshold=0.3): | |
| # image = cv2.resize(image, (model.input_width, model.input_height)) | |
| # new_image = model.detect_objects(image, conf_threshold) | |
| # return cv2.resize(new_image, (500, 500)) | |
| # css = """.my-group {max-width: 600px !important; max-height: 600 !important;} | |
| # .my-column {display: flex !important; justify-content: center !important; align-items: center !important};""" | |
| # with gr.Blocks(css=css) as demo: | |
| # gr.HTML( | |
| # """ | |
| # <h1 style='text-align: center'> | |
| # YOLOv10 Webcam Stream (Powered by WebRTC ⚡️) | |
| # </h1> | |
| # """ | |
| # ) | |
| # gr.HTML( | |
| # """ | |
| # <h3 style='text-align: center'> | |
| # <a href='https://arxiv.org/abs/2405.14458' target='_blank'>arXiv</a> | <a href='https://github.com/THU-MIG/yolov10' target='_blank'>github</a> | |
| # </h3> | |
| # """ | |
| # ) | |
| # with gr.Column(elem_classes=["my-column"]): | |
| # with gr.Group(elem_classes=["my-group"]): | |
| # image = WebRTC(label="Stream", rtc_configuration=rtc_configuration) | |
| # conf_threshold = gr.Slider( | |
| # label="Confidence Threshold", | |
| # minimum=0.0, | |
| # maximum=1.0, | |
| # step=0.05, | |
| # value=0.30, | |
| # ) | |
| # image.stream( | |
| # fn=detection, inputs=[image, conf_threshold], outputs=[image], time_limit=10 | |
| # ) | |
| # if __name__ == "__main__": | |
| # demo.launch() | |
| # import gradio as gr | |
| # import numpy as np | |
| # import cv2 | |
| # from ultralytics import YOLO | |
| # model = YOLO('Model_IV.pt') | |
| # def transform_cv2(frame, transform): | |
| # if transform == "cartoon": | |
| # # prepare color | |
| # img_color = cv2.pyrDown(cv2.pyrDown(frame)) | |
| # for _ in range(6): | |
| # img_color = cv2.bilateralFilter(img_color, 9, 9, 7) | |
| # img_color = cv2.pyrUp(cv2.pyrUp(img_color)) | |
| # # prepare edges | |
| # img_edges = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY) | |
| # img_edges = cv2.adaptiveThreshold( | |
| # cv2.medianBlur(img_edges, 7), | |
| # 255, | |
| # cv2.ADAPTIVE_THRESH_MEAN_C, | |
| # cv2.THRESH_BINARY, | |
| # 9, | |
| # 2, | |
| # ) | |
| # img_edges = cv2.cvtColor(img_edges, cv2.COLOR_GRAY2RGB) | |
| # # combine color and edges | |
| # img = cv2.bitwise_and(img_color, img_edges) | |
| # return img | |
| # elif transform == "edges": | |
| # # perform edge detection | |
| # img = cv2.cvtColor(cv2.Canny(frame, 100, 200), cv2.COLOR_GRAY2BGR) | |
| # return img | |
| # else: | |
| # return np.flipud(frame) | |
| # with gr.Blocks() as demo: | |
| # with gr.Row(): | |
| # with gr.Column(): | |
| # transform = gr.Dropdown(choices=["cartoon", "edges", "flip"], | |
| # value="flip", label="Transformation") | |
| # input_img = gr.Image(sources=["webcam"], type="numpy") | |
| # with gr.Column(): | |
| # output_img = gr.Image(streaming=True) | |
| # dep = input_img.stream(transform_cv2, [input_img, transform], [output_img], | |
| # time_limit=30, stream_every=0.1, concurrency_limit=30) | |
| # if __name__ == "__main__": | |
| # demo.launch() | |
| ### | |
| # import gradio as gr | |
| # import torch | |
| # import cv2 | |
| # # Load the YOLOv8 model | |
| # model = torch.hub.load('ultralytics/yolov8', 'yolov8s', trust_repo=True) | |
| # model.load_state_dict(torch.load('Model_IV')) | |
| # def inference(img): | |
| # results = model(img) | |
| # annotated_img = results.render()[0] | |
| # return annotated_img | |
| # iface = gr.Interface(fn=inference, inputs="webcam", outputs="image") | |
| # iface.launch() | |
| import gradio as gr | |
| import torch | |
| from PIL import Image | |
| import torchvision.transforms as T | |
| from ultralytics import YOLO | |
| # import onnxruntime as ort | |
| import cv2 | |
| import numpy as np | |
| # Load your model | |
| model = YOLO("Model_IV.pt") | |
| # model = torch.load("Model_IV.pt") | |
| # model.eval() | |
| checkpoint = torch.load("Model_IV.pt") | |
| # model.load_state_dict(checkpoint) # Load the saved weights | |
| # model.eval() # Set the model to evaluation mode | |
| # Load the onnx model | |
| # model = ort.InferenceSession("Model_IV.onnx") | |
| # Define preprocessing | |
| transform = T.Compose([ | |
| T.Resize((224, 224)), # Adjust to your model's input size | |
| T.ToTensor(), | |
| ]) | |
| def predict(image): | |
| # Preprocess the image | |
| img_tensor = transform(image).unsqueeze(0) # Add batch dimension | |
| # # Make prediction | |
| # with torch.no_grad(): | |
| # output = model(img_tensor) | |
| # Process output (adjust based on your model's format) | |
| results = model(image) | |
| annotated_img = results[0].plot() | |
| return annotated_img | |
| # # Preprocess the image | |
| # # Get name and shape of the model's inputs | |
| # input_name = model.get_inputs()[0].name | |
| # input_shape = model.get_inputs()[0].shape | |
| # # Resize the image to the model's input shape | |
| # image = cv2.resize(image, (input_shape[2], input_shape[3])) | |
| # original_image_shape = image.shape | |
| # print("Original image shape:", original_image_shape) | |
| # # Reshape the image to match the model's input shape | |
| # image = image.reshape(3, 640, 640) | |
| # # Normalize output image using ImageNet-style normalization | |
| # mean = [0.485, 0.456, 0.406] | |
| # std = [0.229, 0.224, 0.225] | |
| # mean = np.expand_dims(mean, axis=(1,2)) | |
| # std = np.expand_dims(std, axis=(1,2)) | |
| # image = (image / 255.0 - mean)/std | |
| # # Convert the image to a numpy array and add a batch dimension | |
| # if len(input_shape) == 4 and input_shape[0] == 1: | |
| # image = np.expand_dims(image, axis=0) | |
| # image = image.astype(np.float32) | |
| # print("Input image shape:", image.shape) | |
| # # Make prediction | |
| # output = model.run(None, {input_name: image}) | |
| # # print("Output shape:", output.shape) | |
| # # print("type output:", type(output)) | |
| # # print(output) | |
| # # Postprocess output image | |
| # annotated_img = output[0] | |
| # # annotated_img = (output[0] / 255.0 - mean)/std | |
| # # annotated_img = classes[output[0][0].argmax(0)] | |
| # print("Annotated image type before normalization:", type(annotated_img)) | |
| # # print("Annotated image before normalization:", annotated_img) | |
| # print("Min value of image before normalization:", np.min(annotated_img)) | |
| # print("Max value of image before normalization:", np.max(annotated_img)) | |
| # # # Normalize output image using ImageNet-style normalization (again) | |
| # # annotated_img = (annotated_img / 255.0 - mean)/std | |
| # # Normalize output image using Min-Max normalization | |
| # min_val = np.min(annotated_img) | |
| # max_val = np.max(annotated_img) | |
| # annotated_img = (annotated_img - min_val) / (max_val - min_val) | |
| # print("Min value of image after normalization:", np.min(annotated_img)) | |
| # print("Max value of image after normalization:", np.max(annotated_img)) | |
| # print("annotated_img type after normalization:", type(annotated_img)) | |
| # # print("annotated_img shape after normalization:", annotated_img.shape) | |
| # # Reshape the image to match the PIL Image input shape | |
| # print("annotated_img shape before reshape:", annotated_img.shape) | |
| # annotated_img = annotated_img.reshape(original_image_shape) | |
| # print("annotated_img shape after reshape:", annotated_img.shape) | |
| # # Convert to PIL Image | |
| # annotated_img = Image.fromarray(annotated_img) | |
| # print("PIL Image type:", type(annotated_img)) | |
| # # print("PIL Image shape:", annotated_img.shape) | |
| # return annotated_img | |
| # Gradio interface | |
| demo = gr.Interface( | |
| fn=predict, | |
| inputs=gr.Image(sources=["webcam"]), # Accepts image input | |
| outputs="image" # Customize based on your output format | |
| ) | |
| if __name__ == "__main__": | |
| demo.launch() |