Spaces:
Sleeping
Sleeping
File size: 5,128 Bytes
a7feac5 75637dd a7feac5 dc6ee64 a7feac5 c425ed7 a7feac5 7320984 a7feac5 3e2ba31 a7feac5 1cdeb8f a7feac5 1cdeb8f 22de3cb ee67eda 1cdeb8f a7feac5 1cdeb8f a7feac5 75a58f1 a7feac5 a73527f 75a58f1 a7feac5 ee67eda a7feac5 75a58f1 0787ba1 75a58f1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 | import supervision as sv
import gradio as gr
from ultralytics import YOLO
import sahi
import numpy as np
# Images
sahi.utils.file.download_from_url(
"https://transform.roboflow.com/i4FKV5acvhPPX6jb5fqVRBP900D3/0fdc23d04956a472db0c768a33974f50/thumb.jpg",
"t1.jpg",
)
sahi.utils.file.download_from_url(
"https://transform.roboflow.com/i4FKV5acvhPPX6jb5fqVRBP900D3/e11b147e2f547b8265eb8731299673f7/thumb.jpg",
"t2.jpg",
)
sahi.utils.file.download_from_url(
"https://transform.roboflow.com/i4FKV5acvhPPX6jb5fqVRBP900D3/0fdc23d04956a472db0c768a33974f50/thumb.jpg",
"t3.jpg",
)
annotatorbbox = sv.BoxAnnotator()
annotatormask=sv.MaskAnnotator()
def yolov8_inference(
image: gr.inputs.Image = None,
conf_threshold: gr.inputs.Slider = 0.5,
iou_threshold: gr.inputs.Slider = 0.45,
):
image=image[:, :, ::-1].astype(np.uint8)
model = YOLO("https://huggingface.co/spaces/devisionx/Sixth_Demo/blob/main/bestt_weight.pt")
results = model(image,imgsz=640,conf=conf_threshold,iou=iou_threshold)[0]
image=image[:, :, ::-1].astype(np.uint8)
detections = sv.Detections.from_yolov8(results)
annotated_image = annotatormask.annotate(scene=image, detections=detections)
annotated_image = annotatorbbox.annotate(scene=annotated_image , detections=detections)
return annotated_image
'''
image_input = gr.inputs.Image() # Adjust the shape according to your requirements
inputs = [
gr.inputs.Image(label="Input Image"),
gr.Slider(
minimum=0.0, maximum=1.0, value=0.25, step=0.05, label="Confidence Threshold"
),
gr.Slider(minimum=0.0, maximum=1.0, value=0.45, step=0.05, label="IOU Threshold"),
]
outputs = gr.Image(type="filepath", label="Output Image")
title = "Tennis Court Demo"
'''
import os
examples = [
["t1.jpg", 0.25, 0.45],
["t2.jpg", 0.25, 0.45],
["t3.jpg", 0.25, 0.45],
]
outputs_images = [
["1.jpg"], # First example: an output image for the cat example
["2.jpg"] # Second example: an output image for the dog example
,["3.jpg"]
]
readme_html = """
<html>
<head>
<style>
.description {
margin: 20px;
padding: 10px;
border: 1px solid #ccc;
}
</style>
</head>
<body>
<div class="description">
<p><strong>More details:</strong></p>
<p> We present a demo for performing object segmentation with training a Yolov8-seg on wheel Image dataset. The model was trained on 696 training images and validated on 199 images.</p>
<p><strong>Usage:</strong></p>
<p>You can upload Tennis-Court images, and the demo will provide you with your segmented image.</p>
<p><strong>Dataset:</strong></p>
<p>The dataset contains 3,146 images and is formatted in COCO style. To facilitate usage with YOLOv8-seg, we have converted it into YOLOv8 format.</p>
<ul>
<li><strong>Training Set:</strong> It includes 2649 images and is intended for training the model.</li>
<li><strong>Validation Set:</strong> There are 250 images in the validation set, which is used for optimizing model parameters during development.</li>
<li><strong>Test Set:</strong> This set consists of 247 images and serves as a separate evaluation dataset to assess the performance of trained models.</li>
</ul>
<p><strong>License:</strong> This dataset is made available under the Creative Commons Attribution 4.0 International License (CC BY 4.0).</p>
<p>To access and download this dataset, please follow this link: <a href="https://universe.roboflow.com/tenniscourtworkspace/teslasegmentation" target="_blank">Dataset Download</a></p>
</body>
</html>
"""
with gr.Blocks() as demo:
gr.Markdown(
"""
<div style="text-align: center;">
<h1>Tennis Court Demo</h1>
Powered by <a href="https://Tuba.ai">Tuba</a>
</div>
"""
)
# Define the input components and add them to the layout
with gr.Row():
image_input = gr.inputs.Image()
outputs = gr.Image(type="filepath", label="Output Image")
# Define the output component and add it to the layout
with gr.Row():
conf_slider=gr.Slider(minimum=0.0, maximum=1.0, value=0.25, step=0.05, label="Confidence Threshold" )
with gr.Row():
IOU_Slider=gr.Slider(minimum=0.0, maximum=1.0, value=0.45, step=0.05, label="IOU Threshold")
button = gr.Button("Run")
# Define the event listener that connects the input and output components and triggers the function
button.click(fn=yolov8_inference, inputs=[image_input, conf_slider,IOU_Slider], outputs=outputs, api_name="yolov8_inference")
gr.Examples(
fn=yolov8_inference,
examples=examples,
inputs=[image_input, conf_slider,IOU_Slider],
outputs=[outputs]
)
# gr.Examples(inputs=examples, outputs=outputs_images)
# Add the description below the layout
gr.Markdown(readme_html)
# Launch the app
demo.launch(share=False) |