Noursine's picture
Create app.py
9a9614b verified
import cv2
import numpy as np
import gradio as gr
from ultralytics import YOLO
from PIL import Image
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from io import BytesIO
import os
# Load YOLOv8 model (adjust path if deploying to HF Spaces)
model_path = "best.pt" # Must be placed in the root directory or loaded from Hugging Face Hub
model = YOLO(model_path)
def predict(image_path: str):
# Load image
image = cv2.imread(image_path)
if image is None:
return None, "Error: Could not load image."
image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# Run YOLO inference
results = model(image_path)[0]
class_names = model.names if hasattr(model, 'names') else None
# Prepare matplotlib figure
fig, ax = plt.subplots(1, figsize=(10, 10))
ax.imshow(image_rgb)
# Draw bounding boxes
for box, cls_id in zip(results.boxes.xyxy, results.boxes.cls):
x1, y1, x2, y2 = box
x, y, w, h = int(x1), int(y1), int(x2 - x1), int(y2 - y1)
cls_id = int(cls_id)
label = class_names[cls_id] if class_names else str(cls_id)
rect = patches.Rectangle((x, y), w, h, linewidth=2, edgecolor='blue', facecolor='none')
ax.add_patch(rect)
ax.text(x, y, label, fontsize=10, color='blue', backgroundcolor='black')
ax.axis('off')
# Convert to PIL then NumPy array
buf = BytesIO()
plt.savefig(buf, format='png', bbox_inches='tight')
plt.close(fig)
buf.seek(0)
img_pil = Image.open(buf).convert("RGB")
annotated_np = np.array(img_pil)
return annotated_np, "YOLOv8 Object Detection completed successfully."
# UI Style
css = """
.gradio-container {
background: linear-gradient(to right, #a1c4fd, #c2e9fb);
font-family: 'Segoe UI', Tahoma, Geneva, Verdana, sans-serif;
}
h1, p {
color: #0d3b66;
}
.gr-button {
background-color: #0d3b66 !important;
color: white !important;
font-weight: bold;
}
"""
# Gradio interface
with gr.Blocks(css=css) as demo:
gr.Markdown("<h1 style='text-align:center;'>DentAIxpert - YOLOv8 Object Detection</h1>")
gr.Markdown("<p style='text-align:center;'>Upload a panoramic dental X-ray to see YOLOv8 detections with class labels.</p>")
with gr.Row():
with gr.Column(scale=1):
input_image = gr.Image(type="filepath", label="Upload Panoramic X-ray")
analyze_button = gr.Button("Analyze")
with gr.Column(scale=1):
output_image = gr.Image(label="Detected Objects")
results_text = gr.Textbox(label="Status Message", lines=2, interactive=False)
analyze_button.click(fn=predict, inputs=input_image, outputs=[output_image, results_text])
demo.launch()