Commit
·
3a3705f
1
Parent(s):
906ebb4
Update app.py
Browse files
app.py
CHANGED
|
@@ -7,11 +7,11 @@ from sahi.utils.cv import visualize_object_predictions, read_image
|
|
| 7 |
from ultralyticsplus import YOLO
|
| 8 |
|
| 9 |
def yolov8_inference(
|
| 10 |
-
|
| 11 |
-
model_path:
|
| 12 |
-
image_size:
|
| 13 |
-
conf_threshold:
|
| 14 |
-
iou_threshold:
|
| 15 |
):
|
| 16 |
"""
|
| 17 |
YOLOv8 inference function
|
|
@@ -21,22 +21,15 @@ def yolov8_inference(
|
|
| 21 |
image_size: Image size
|
| 22 |
conf_threshold: Confidence threshold
|
| 23 |
iou_threshold: IOU threshold
|
| 24 |
-
|
|
|
|
| 25 |
"""
|
| 26 |
-
# Load your model using the specified model_path (You should adjust this part based on your model loading logic)
|
| 27 |
model = YOLO(model_path)
|
| 28 |
model.overrides['conf'] = conf_threshold
|
| 29 |
-
model.overrides['iou']
|
| 30 |
model.overrides['agnostic_nms'] = False # NMS class-agnostic
|
| 31 |
-
model.overrides['max_det'] = 1000
|
| 32 |
-
|
| 33 |
-
# Preprocess your image as needed (You should adjust this part based on your preprocessing logic)
|
| 34 |
-
image_cv = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR)
|
| 35 |
-
image_rgb = cv2.cvtColor(image_cv, cv2.COLOR_BGR2RGB)
|
| 36 |
-
|
| 37 |
-
# Perform inference with your model (You should adjust this part based on your inference logic)
|
| 38 |
-
results = model(image_cv)
|
| 39 |
-
|
| 40 |
# Observe results (You should adjust this part based on your result extraction logic)
|
| 41 |
top_class_index = torch.argmax(results[0].probs).item()
|
| 42 |
Class1 = model.names[top_class_index]
|
|
@@ -44,14 +37,15 @@ def yolov8_inference(
|
|
| 44 |
return Class1
|
| 45 |
|
| 46 |
# Define Gradio input and output components
|
|
|
|
| 47 |
inputs = [
|
| 48 |
gr.Image(type="filepath", label="Input Image"),
|
| 49 |
-
gr.
|
| 50 |
-
|
| 51 |
-
gr.Slider(minimum=
|
| 52 |
-
gr.Slider(minimum=0.0, maximum=1.0, default=0.
|
|
|
|
| 53 |
]
|
| 54 |
-
|
| 55 |
outputs = gr.Textbox(label="Result")
|
| 56 |
|
| 57 |
title = "AI-Powered Tire Quality Inspection: YOLOv8s Enhanced Classification"
|
|
|
|
| 7 |
from ultralyticsplus import YOLO
|
| 8 |
|
| 9 |
def yolov8_inference(
|
| 10 |
+
image: gr.Image = None,
|
| 11 |
+
model_path: gr.Dropdown = None,
|
| 12 |
+
image_size: gr.Slider = 640,
|
| 13 |
+
conf_threshold: gr.Slider = 0.25,
|
| 14 |
+
iou_threshold: gr.Slider = 0.45,
|
| 15 |
):
|
| 16 |
"""
|
| 17 |
YOLOv8 inference function
|
|
|
|
| 21 |
image_size: Image size
|
| 22 |
conf_threshold: Confidence threshold
|
| 23 |
iou_threshold: IOU threshold
|
| 24 |
+
Returns:
|
| 25 |
+
Rendered image
|
| 26 |
"""
|
|
|
|
| 27 |
model = YOLO(model_path)
|
| 28 |
model.overrides['conf'] = conf_threshold
|
| 29 |
+
model.overrides['iou']= iou_threshold
|
| 30 |
model.overrides['agnostic_nms'] = False # NMS class-agnostic
|
| 31 |
+
model.overrides['max_det'] = 1000
|
| 32 |
+
image = read_image(image)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 33 |
# Observe results (You should adjust this part based on your result extraction logic)
|
| 34 |
top_class_index = torch.argmax(results[0].probs).item()
|
| 35 |
Class1 = model.names[top_class_index]
|
|
|
|
| 37 |
return Class1
|
| 38 |
|
| 39 |
# Define Gradio input and output components
|
| 40 |
+
|
| 41 |
inputs = [
|
| 42 |
gr.Image(type="filepath", label="Input Image"),
|
| 43 |
+
gr.Dropdown(["foduucom/Tyre-Quality-Classification-AI"],
|
| 44 |
+
default="foduucom/Tyre-Quality-Classification-AI", label="Model"),
|
| 45 |
+
gr.Slider(minimum=320, maximum=1280, default=640, step=32, label="Image Size"),
|
| 46 |
+
gr.Slider(minimum=0.0, maximum=1.0, default=0.25, step=0.05, label="Confidence Threshold"),
|
| 47 |
+
gr.Slider(minimum=0.0, maximum=1.0, default=0.45, step=0.05, label="IOU Threshold"),
|
| 48 |
]
|
|
|
|
| 49 |
outputs = gr.Textbox(label="Result")
|
| 50 |
|
| 51 |
title = "AI-Powered Tire Quality Inspection: YOLOv8s Enhanced Classification"
|