Spaces:
Runtime error
Runtime error
| import supervision as sv | |
| import gradio as gr | |
| from ultralytics import YOLO | |
| import sahi | |
| import numpy as np | |
| # Images | |
| sahi.utils.file.download_from_url( | |
| "https://transform.roboflow.com/aHClLv0V9gWdgkEi3TZOcyGv4zZ2/2b24b3f5ef9330424b9fda06ad38f98a/thumb.jpg", | |
| "m1.jpg", | |
| ) | |
| sahi.utils.file.download_from_url( | |
| "https://transform.roboflow.com/aHClLv0V9gWdgkEi3TZOcyGv4zZ2/751a6fca76be162856174c24048b293d/thumb.jpg", | |
| "m2.jpg", | |
| ) | |
| annotatorbbox = sv.BoxAnnotator() | |
| annotatormask=sv.MaskAnnotator() | |
| def yolov8_inference( | |
| image: gr.inputs.Image = None, | |
| conf_threshold: gr.inputs.Slider = 0.5, | |
| iou_threshold: gr.inputs.Slider = 0.45, | |
| ): | |
| image=image[:, :, ::-1].astype(np.uint8) | |
| model = YOLO("https://huggingface.co/spaces/devisionx/Final_demo/blob/main/best_weights.pt") | |
| results = model(image,imgsz=360,conf=conf_threshold,iou=iou_threshold)[0] | |
| image=image[:, :, ::-1].astype(np.uint8) | |
| detections = sv.Detections.from_yolov8(results) | |
| annotated_image = annotatormask.annotate(scene=image, detections=detections) | |
| annotated_image = annotatorbbox.annotate(scene=annotated_image , detections=detections) | |
| return annotated_image | |
| ''' | |
| image_input = gr.inputs.Image() # Adjust the shape according to your requirements | |
| inputs = [ | |
| gr.inputs.Image(label="Input Image"), | |
| gr.Slider( | |
| minimum=0.0, maximum=1.0, value=0.25, step=0.05, label="Confidence Threshold" | |
| ), | |
| gr.Slider(minimum=0.0, maximum=1.0, value=0.45, step=0.05, label="IOU Threshold"), | |
| ] | |
| outputs = gr.Image(type="filepath", label="Output Image") | |
| title = "Materials-Demo" | |
| ''' | |
| import os | |
| examples = [ | |
| ["m1.jpg", 0.25, 0.45], | |
| ["m2.jpg", 0.25, 0.45], | |
| ] | |
| outputs_images = [ | |
| ["1.jpg"], # First example: an output image for the cat example | |
| ["2.jpg"] # Second example: an output image for the dog example | |
| ] | |
| readme_html = """ | |
| <html> | |
| <head> | |
| <style> | |
| .description { | |
| margin: 20px; | |
| padding: 10px; | |
| border: 1px solid #ccc; | |
| } | |
| </style> | |
| </head> | |
| <body> | |
| <div class="description"> | |
| <p><strong>More details:</strong></p> | |
| <p>We present a demo for performing object segmentation with training a Yolov8-seg on Materials dataset. The model was trained on 4424 training images and validated on 464 images.</p> | |
| <p><strong>Usage:</strong></p> | |
| <p>You can upload Material images, and the demo will provide you with your segmented image.</p> | |
| <p><strong>Dataset:</strong></p> | |
| <p>The dataset contains 6,365 images and is formatted in COCO style. To facilitate usage with YOLOv8-seg, we have converted it into YOLOv8 format</p> | |
| <ul> | |
| <li><strong>Training Set:</strong> It includes 4424 images and is intended for training the model.</li> | |
| <li><strong>Validation Set:</strong> There are 464 images in the validation set, which is used for optimizing model parameters during development.</li> | |
| <li><strong>Test Set:</strong> This set consists of 1477 images and serves as a separate evaluation dataset to assess the performance of trained models.</li> | |
| </ul> | |
| <p><strong>License:</strong> This dataset is made available under the Creative Commons Attribution 4.0 International License (CC BY 4.0).</p> | |
| <p>To access and download this dataset, please follow this link: <a href="https://universe.roboflow.com/expand-ai/materials-semantic" target="_blank">Dataset Download</a></p> | |
| </body> | |
| </html> | |
| """ | |
| with gr.Blocks() as demo: | |
| gr.Markdown( | |
| """ | |
| <div style="text-align: center;"> | |
| <h1>Materials-Demo</h1> | |
| Powered by <a href="https://Tuba.ai">Tuba</a> | |
| </div> | |
| """ | |
| ) | |
| # Define the input components and add them to the layout | |
| with gr.Row(): | |
| image_input = gr.inputs.Image() | |
| outputs = gr.Image(type="filepath", label="Output Image") | |
| # Define the output component and add it to the layout | |
| with gr.Row(): | |
| conf_slider=gr.Slider(minimum=0.0, maximum=1.0, value=0.25, step=0.05, label="Confidence Threshold" ) | |
| with gr.Row(): | |
| IOU_Slider=gr.Slider(minimum=0.0, maximum=1.0, value=0.45, step=0.05, label="IOU Threshold") | |
| button = gr.Button("Run") | |
| # Define the event listener that connects the input and output components and triggers the function | |
| button.click(fn=yolov8_inference, inputs=[image_input, conf_slider,IOU_Slider], outputs=outputs, api_name="yolov8_inference") | |
| gr.Examples( | |
| fn=yolov8_inference, | |
| examples=examples, | |
| inputs=[image_input, conf_slider,IOU_Slider], | |
| outputs=[outputs] | |
| ) | |
| # gr.Examples(inputs=examples, outputs=outputs_images) | |
| # Add the description below the layout | |
| gr.Markdown(readme_html) | |
| # Launch the app | |
| demo.launch(share=False) |