Spaces:
Build error
Build error
Mnjar commited on
Commit ·
1b64c18
1
Parent(s): 8abf2b4
Add app.py
Browse files
.DS_Store
ADDED
|
Binary file (6.15 kB). View file
|
|
|
app.py
CHANGED
|
@@ -1,9 +1,11 @@
|
|
| 1 |
import gradio as gr
|
| 2 |
import numpy as np
|
|
|
|
|
|
|
| 3 |
from ultralytics import YOLO
|
| 4 |
|
| 5 |
# Load YOLO model
|
| 6 |
-
model = YOLO('TrashDetection/trash_detection.pt')
|
| 7 |
|
| 8 |
def predict(image):
|
| 9 |
"""
|
|
@@ -11,29 +13,67 @@ def predict(image):
|
|
| 11 |
Args:
|
| 12 |
image (PIL.Image): Input image.
|
| 13 |
Returns:
|
| 14 |
-
|
| 15 |
"""
|
| 16 |
# Convert PIL image to numpy array
|
| 17 |
img = np.array(image)
|
| 18 |
|
| 19 |
-
#
|
| 20 |
-
results
|
|
|
|
| 21 |
|
| 22 |
-
#
|
| 23 |
-
|
| 24 |
-
# Select necessary columns and convert to list
|
| 25 |
-
output = predictions[['name', 'confidence', 'xmin', 'ymin', 'xmax', 'ymax']].values.tolist()
|
| 26 |
|
| 27 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 28 |
|
| 29 |
# Create Gradio interface
|
| 30 |
iface = gr.Interface(
|
| 31 |
fn=predict,
|
| 32 |
inputs=gr.Image(type="pil"), # Input image as PIL
|
| 33 |
-
outputs=gr.
|
| 34 |
-
|
| 35 |
-
|
| 36 |
-
|
|
|
|
| 37 |
title="YOLO Object Detection",
|
| 38 |
description="Upload an image to detect objects using YOLO."
|
| 39 |
)
|
|
|
|
| 1 |
import gradio as gr
|
| 2 |
import numpy as np
|
| 3 |
+
import pandas as pd
|
| 4 |
+
from PIL import Image, ImageDraw, ImageFont
|
| 5 |
from ultralytics import YOLO
|
| 6 |
|
| 7 |
# Load YOLO model
|
| 8 |
+
model = YOLO('/Users/fajar/Kuliah/Semester 5/Computer-Vision/Project/src/app/TrashDetection/trash_detection.pt')
|
| 9 |
|
| 10 |
def predict(image):
|
| 11 |
"""
|
|
|
|
| 13 |
Args:
|
| 14 |
image (PIL.Image): Input image.
|
| 15 |
Returns:
|
| 16 |
+
Tuple: Processed image with bounding boxes and a list of predictions with labels, confidence, and bounding boxes.
|
| 17 |
"""
|
| 18 |
# Convert PIL image to numpy array
|
| 19 |
img = np.array(image)
|
| 20 |
|
| 21 |
+
results = model(img) # Run the model on the image
|
| 22 |
+
if isinstance(results, list):
|
| 23 |
+
results = results[0] # Take the first result if it's a list
|
| 24 |
|
| 25 |
+
# Access the bounding boxes and other relevant information
|
| 26 |
+
boxes = results[0].boxes # Get the boxes from the first result
|
|
|
|
|
|
|
| 27 |
|
| 28 |
+
# Convert the boxes to a Pandas DataFrame
|
| 29 |
+
df = pd.DataFrame(boxes.xyxy.cpu().numpy(), columns=['x1', 'y1', 'x2', 'y2'])
|
| 30 |
+
df['confidence'] = boxes.conf.cpu().numpy()
|
| 31 |
+
df['class'] = boxes.cls.cpu().numpy()
|
| 32 |
+
|
| 33 |
+
# Get the class names
|
| 34 |
+
class_names = results.names # Class names dictionary
|
| 35 |
+
df['label'] = df['class'].apply(lambda x: class_names[int(x)])
|
| 36 |
+
|
| 37 |
+
# Select the necessary columns for the output
|
| 38 |
+
df_output = df[['label', 'confidence', 'x1', 'y1', 'x2', 'y2']]
|
| 39 |
+
|
| 40 |
+
# Convert DataFrame to list of lists
|
| 41 |
+
output = df_output.values.tolist()
|
| 42 |
+
|
| 43 |
+
# Draw bounding boxes on the image
|
| 44 |
+
pil_img = Image.fromarray(img) # Convert numpy array back to PIL image
|
| 45 |
+
draw = ImageDraw.Draw(pil_img)
|
| 46 |
+
|
| 47 |
+
# Load a font with a larger size
|
| 48 |
+
try:
|
| 49 |
+
font = ImageFont.truetype("/Library/Fonts/Arial.ttf", 24) # Adjust the font size as needed
|
| 50 |
+
except IOError:
|
| 51 |
+
font = ImageFont.load_default() # Use default font if the specified one is not found
|
| 52 |
+
|
| 53 |
+
for _, row in df.iterrows():
|
| 54 |
+
x1, y1, x2, y2 = row['x1'], row['y1'], row['x2'], row['y2']
|
| 55 |
+
label = row['label']
|
| 56 |
+
confidence = row['confidence']
|
| 57 |
+
|
| 58 |
+
# Draw bounding box
|
| 59 |
+
draw.rectangle([x1, y1, x2, y2], outline="red", width=3)
|
| 60 |
+
|
| 61 |
+
# Draw label and confidence with larger font
|
| 62 |
+
text = f"{label} ({confidence:.2f})"
|
| 63 |
+
draw.text((x1, y1 - 30), text, font=font, fill="red")
|
| 64 |
+
|
| 65 |
+
# Return the image with bounding boxes and the prediction results
|
| 66 |
+
return pil_img, output
|
| 67 |
|
| 68 |
# Create Gradio interface
|
| 69 |
iface = gr.Interface(
|
| 70 |
fn=predict,
|
| 71 |
inputs=gr.Image(type="pil"), # Input image as PIL
|
| 72 |
+
outputs=[gr.Image(type="pil"), # Output image with bounding boxes
|
| 73 |
+
gr.Dataframe(
|
| 74 |
+
headers=["Label", "Confidence", "Xmin", "Ymin", "Xmax", "Ymax"],
|
| 75 |
+
label="Predictions"
|
| 76 |
+
)],
|
| 77 |
title="YOLO Object Detection",
|
| 78 |
description="Upload an image to detect objects using YOLO."
|
| 79 |
)
|