Spaces:
Sleeping
Sleeping
| import supervision as sv | |
| import gradio as gr | |
| from ultralytics import YOLO | |
| import sahi | |
| import numpy as np | |
| sahi.utils.file.download_from_url( | |
| "https://cdn.discordapp.com/attachments/1133447881009934490/1147993846224011316/ex1.png", | |
| "ex1.png",) | |
| sahi.utils.file.download_from_url( | |
| "https://raw.githubusercontent.com/mensss/vvvvv/main/e7d86208-a7e1-4d2a-963c-af6102430b0c%20(1).jpg", | |
| "tu3.jpg", | |
| ) | |
| annotatorbbox = sv.BoxAnnotator() | |
| annotatormask=sv.MaskAnnotator() | |
| def yolov8_inference( | |
| image: gr.inputs.Image = None, | |
| conf_threshold: gr.inputs.Slider = 0.5, | |
| iou_threshold: gr.inputs.Slider = 0.45, | |
| ): | |
| image=image[:, :, ::-1].astype(np.uint8) | |
| model = YOLO("https://huggingface.co/spaces/devisionx/Amazon_demo/blob/main/amazon.pt") | |
| print("conf_threshold : ",conf_threshold ," iou_threshold : ",iou_threshold) | |
| results = model(image,conf=conf_threshold,iou=iou_threshold ,imgsz=1280)[0] | |
| image=image[:, :, ::-1].astype(np.uint8) | |
| detections = sv.Detections.from_yolov8(results) | |
| annotated_image = annotatormask.annotate(scene=image, detections=detections) | |
| annotated_image = annotatorbbox.annotate(scene=annotated_image , detections=detections) | |
| return annotated_image | |
| ''' | |
| image_input = gr.inputs.Image() # Adjust the shape according to your requirements | |
| inputs = [ | |
| gr.Image(label="Input Image"), | |
| gr.Slider( | |
| minimum=0.0, maximum=1.0, value=0.25, step=0.05, label="Confidence Threshold" | |
| ), | |
| gr.Slider(minimum=0.0, maximum=1.0, value=0.45, step=0.05, label="IOU Threshold"), | |
| ] | |
| outputs = gr.Image(type="filepath", label="Output Image") | |
| title = "Amazon Products Demo" | |
| ''' | |
| import os | |
| examples = [["ex1.png", 0.5, 0.45], | |
| ["tu3.jpg", 0.5, 0.45], | |
| ] | |
| outputs_images = [ | |
| ["1.jpg"], # First example: an output image for the cat example | |
| ["2.jpg"] # Second example: an output image for the dog exam | |
| ] | |
| readme_html = """ | |
| <html> | |
| <head> | |
| <style> | |
| .description { | |
| margin: 20px; | |
| padding: 10px; | |
| border: 1px solid #ccc; | |
| } | |
| </style> | |
| </head> | |
| <body> | |
| <div class="description"> | |
| <p><strong>More details:</strong></p> | |
| <p>We present a demo for performing object segmentation using a model trained on Amazon's ARMBench dataset. The model was trained on over 37,000 training images and validated on 4,425 images.</p> | |
| <p><strong>Usage:</strong></p> | |
| <p>You can use our demo by uploading your product image, and it will provide you with a segmented image.</p> | |
| <p><strong>Dataset:</strong></p> | |
| <p>-The model was trained on the ARMBench segmentation dataset, which comprises more than 50,000 images.</p> | |
| <ul> | |
| <li>Paper: ARMBench: An object-centric benchmark dataset for robotic manipulation</li> | |
| <li>Authors: Chaitanya Mitash, Fan Wang, Shiyang Lu, Vikedo Terhuja, Tyler Garaas, Felipe Polido, Manikantan Nambi</li> | |
| </ul> | |
| <p><strong>License:</strong> This dataset is made available under the Creative Commons Attribution 4.0 International License (CC BY 4.0).</p> | |
| <p> You can request a link to download this dataset from Amazon, please follow this link: <a href="http://armbench.s3-website-us-east-1.amazonaws.com/data.html " target="_blank">Dataset Download</a></p> | |
| <p> if you want to know more about this dataset, please follow this link: <a href="https://www.amazon.science/blog/amazon-releases-largest-dataset-for-training-pick-and-place-robots " target="_blank">more information</a></p> | |
| </body> | |
| </html> | |
| """ | |
| with gr.Blocks() as demo: | |
| gr.Markdown( | |
| """ | |
| <div style="text-align: center;"> | |
| <h1> Amazon Products Demo</h1> | |
| Powered by <a href="https://Tuba.ai">Tuba</a> | |
| </div> | |
| """ | |
| ) | |
| # Define the input components and add them to the layout | |
| with gr.Row(): | |
| image_input = gr.inputs.Image() | |
| outputs = gr.Image(type="filepath", label="Output Image") | |
| # Define the output component and add it to the layout | |
| with gr.Row(): | |
| conf_slider=gr.Slider(minimum=0.0, maximum=1.0, value=0.25, step=0.05, label="Confidence Threshold" ) | |
| with gr.Row(): | |
| IOU_Slider=gr.Slider(minimum=0.0, maximum=1.0, value=0.45, step=0.05, label="IOU Threshold") | |
| button = gr.Button("Run") | |
| # Define the event listener that connects the input and output components and triggers the function | |
| button.click(fn=yolov8_inference, inputs=[image_input, conf_slider,IOU_Slider], outputs=outputs, api_name="yolov8_inference") | |
| gr.Examples( | |
| fn=yolov8_inference, | |
| examples=examples, | |
| inputs=[image_input, conf_slider,IOU_Slider], | |
| outputs=[outputs] | |
| ) | |
| # gr.Examples(inputs=examples, outputs=outputs_images) | |
| # Add the description below the layout | |
| gr.Markdown(readme_html) | |
| # Launch the app | |
| demo.launch(share=False) |