Update app.py
Browse files
app.py
CHANGED
|
@@ -18,7 +18,7 @@ import numpy as np
|
|
| 18 |
from ultralytics import YOLO
|
| 19 |
|
| 20 |
# load trained model
|
| 21 |
-
model = YOLO("best.
|
| 22 |
|
| 23 |
# image inference function
|
| 24 |
def predict_image(img, conf_threshold, iou_threshold):
|
|
@@ -67,7 +67,7 @@ def pil_to_cv2(pil_image):
|
|
| 67 |
# process video, convert frame to PIL image
|
| 68 |
def process_video(video_path):
|
| 69 |
cap = cv2.VideoCapture(video_path)
|
| 70 |
-
|
| 71 |
while cap.isOpened():
|
| 72 |
ret, frame = cap.read()
|
| 73 |
if not ret:
|
|
@@ -77,15 +77,13 @@ def process_video(video_path):
|
|
| 77 |
result = model.predict(source=pil_img)
|
| 78 |
for r in result:
|
| 79 |
im_array = r.plot()
|
| 80 |
-
|
|
|
|
| 81 |
cap.release()
|
|
|
|
| 82 |
# You may choose to display each frame or compile them back using cv2 or a similar library
|
| 83 |
# Display the processed frames
|
| 84 |
-
|
| 85 |
-
cv2.imshow("Processed Frame", pil_to_cv2(frame))
|
| 86 |
-
if cv2.waitKey(25) & 0xFF == ord('q'):
|
| 87 |
-
break
|
| 88 |
-
cv2.destroyAllWindows()
|
| 89 |
# return processed_frames[-1] # Example, returning the last processed frame
|
| 90 |
|
| 91 |
# interface setting for video
|
|
@@ -94,11 +92,11 @@ video_iface = gr.Interface(
|
|
| 94 |
inputs=[
|
| 95 |
gr.Video(label="Upload Video", interactive=True)
|
| 96 |
],
|
| 97 |
-
outputs=gr.Image(type="
|
| 98 |
title="Fire Detection using YOLOv8n on Gradio",
|
| 99 |
description="Upload video for inference. The Ultralytics YOLOv8n trained model is used for inference.",
|
| 100 |
examples=[
|
| 101 |
-
|
| 102 |
[os.path.join(video_directory, "video_fire_2.mp4")],
|
| 103 |
]
|
| 104 |
)
|
|
|
|
| 18 |
from ultralytics import YOLO
|
| 19 |
|
| 20 |
# load trained model
|
| 21 |
+
model = YOLO("best.onnx")
|
| 22 |
|
| 23 |
# image inference function
|
| 24 |
def predict_image(img, conf_threshold, iou_threshold):
|
|
|
|
| 67 |
# process video, convert frame to PIL image
|
| 68 |
def process_video(video_path):
|
| 69 |
cap = cv2.VideoCapture(video_path)
|
| 70 |
+
|
| 71 |
while cap.isOpened():
|
| 72 |
ret, frame = cap.read()
|
| 73 |
if not ret:
|
|
|
|
| 77 |
result = model.predict(source=pil_img)
|
| 78 |
for r in result:
|
| 79 |
im_array = r.plot()
|
| 80 |
+
processed_frame = Image.fromarray(im_array[..., ::-1]) # Convert RGB back to BGR
|
| 81 |
+
yield processed_frame # generate frame one by one
|
| 82 |
cap.release()
|
| 83 |
+
|
| 84 |
# You may choose to display each frame or compile them back using cv2 or a similar library
|
| 85 |
# Display the processed frames
|
| 86 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
| 87 |
# return processed_frames[-1] # Example, returning the last processed frame
|
| 88 |
|
| 89 |
# interface setting for video
|
|
|
|
| 92 |
inputs=[
|
| 93 |
gr.Video(label="Upload Video", interactive=True)
|
| 94 |
],
|
| 95 |
+
outputs=gr.Image(type="pil",label="Result"),
|
| 96 |
title="Fire Detection using YOLOv8n on Gradio",
|
| 97 |
description="Upload video for inference. The Ultralytics YOLOv8n trained model is used for inference.",
|
| 98 |
examples=[
|
| 99 |
+
[os.path.join(video_directory, "video_fire_1.mp4")],
|
| 100 |
[os.path.join(video_directory, "video_fire_2.mp4")],
|
| 101 |
]
|
| 102 |
)
|