Spaces:
Sleeping
Sleeping
File size: 4,977 Bytes
43974f8 201e8b5 43974f8 b2c8c5a 43974f8 b2c8c5a c57ae71 22ded14 df7ed06 d319b6a f017173 884cb4d 43974f8 8435703 f77fbe2 43974f8 f77fbe2 336d7ed 1463476 73fedc4 f77fbe2 43974f8 063eb12 43974f8 c30c3c6 43974f8 f77fbe2 43974f8 bc382c1 c30c3c6 43974f8 7f3ea1a f77fbe2 43974f8 c30c3c6 98f2c79 c30c3c6 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 | import supervision as sv
import gradio as gr
from ultralytics import YOLO
import sahi
import numpy as np
sahi.utils.file.download_from_url(
"https://cdn.discordapp.com/attachments/1133447881009934490/1147993846224011316/ex1.png",
"ex1.png",)
sahi.utils.file.download_from_url(
"https://raw.githubusercontent.com/mensss/vvvvv/main/e7d86208-a7e1-4d2a-963c-af6102430b0c%20(1).jpg",
"tu3.jpg",
)
annotatorbbox = sv.BoxAnnotator()
annotatormask=sv.MaskAnnotator()
def yolov8_inference(
image: gr.inputs.Image = None,
conf_threshold: gr.inputs.Slider = 0.5,
iou_threshold: gr.inputs.Slider = 0.45,
):
image=image[:, :, ::-1].astype(np.uint8)
model = YOLO("https://huggingface.co/spaces/devisionx/Amazon_demo/blob/main/amazon.pt")
print("conf_threshold : ",conf_threshold ," iou_threshold : ",iou_threshold)
results = model(image,conf=conf_threshold,iou=iou_threshold ,imgsz=1280)[0]
image=image[:, :, ::-1].astype(np.uint8)
detections = sv.Detections.from_yolov8(results)
annotated_image = annotatormask.annotate(scene=image, detections=detections)
annotated_image = annotatorbbox.annotate(scene=annotated_image , detections=detections)
return annotated_image
'''
image_input = gr.inputs.Image() # Adjust the shape according to your requirements
inputs = [
gr.Image(label="Input Image"),
gr.Slider(
minimum=0.0, maximum=1.0, value=0.25, step=0.05, label="Confidence Threshold"
),
gr.Slider(minimum=0.0, maximum=1.0, value=0.45, step=0.05, label="IOU Threshold"),
]
outputs = gr.Image(type="filepath", label="Output Image")
title = "Amazon Products Demo"
'''
import os
examples = [["ex1.png", 0.5, 0.45],
["tu3.jpg", 0.5, 0.45],
]
outputs_images = [
["1.jpg"], # First example: an output image for the cat example
["2.jpg"] # Second example: an output image for the dog exam
]
readme_html = """
<html>
<head>
<style>
.description {
margin: 20px;
padding: 10px;
border: 1px solid #ccc;
}
</style>
</head>
<body>
<div class="description">
<p><strong>More details:</strong></p>
<p>We present a demo for performing object segmentation using a model trained on Amazon's ARMBench dataset. The model was trained on over 37,000 training images and validated on 4,425 images.</p>
<p><strong>Usage:</strong></p>
<p>You can use our demo by uploading your product image, and it will provide you with a segmented image.</p>
<p><strong>Dataset:</strong></p>
<p>-The model was trained on the ARMBench segmentation dataset, which comprises more than 50,000 images.</p>
<ul>
<li>Paper: ARMBench: An object-centric benchmark dataset for robotic manipulation</li>
<li>Authors: Chaitanya Mitash, Fan Wang, Shiyang Lu, Vikedo Terhuja, Tyler Garaas, Felipe Polido, Manikantan Nambi</li>
</ul>
<p><strong>License:</strong> This dataset is made available under the Creative Commons Attribution 4.0 International License (CC BY 4.0).</p>
<p> You can request a link to download this dataset from Amazon, please follow this link: <a href="http://armbench.s3-website-us-east-1.amazonaws.com/data.html " target="_blank">Dataset Download</a></p>
<p> if you want to know more about this dataset, please follow this link: <a href="https://www.amazon.science/blog/amazon-releases-largest-dataset-for-training-pick-and-place-robots " target="_blank">more information</a></p>
</body>
</html>
"""
with gr.Blocks() as demo:
gr.Markdown(
"""
<div style="text-align: center;">
<h1> Amazon Products Demo</h1>
Powered by <a href="https://Tuba.ai">Tuba</a>
</div>
"""
)
# Define the input components and add them to the layout
with gr.Row():
image_input = gr.inputs.Image()
outputs = gr.Image(type="filepath", label="Output Image")
# Define the output component and add it to the layout
with gr.Row():
conf_slider=gr.Slider(minimum=0.0, maximum=1.0, value=0.25, step=0.05, label="Confidence Threshold" )
with gr.Row():
IOU_Slider=gr.Slider(minimum=0.0, maximum=1.0, value=0.45, step=0.05, label="IOU Threshold")
button = gr.Button("Run")
# Define the event listener that connects the input and output components and triggers the function
button.click(fn=yolov8_inference, inputs=[image_input, conf_slider,IOU_Slider], outputs=outputs, api_name="yolov8_inference")
gr.Examples(
fn=yolov8_inference,
examples=examples,
inputs=[image_input, conf_slider,IOU_Slider],
outputs=[outputs]
)
# gr.Examples(inputs=examples, outputs=outputs_images)
# Add the description below the layout
gr.Markdown(readme_html)
# Launch the app
demo.launch(share=False) |