fire / app.py
ArchiMathur's picture
Update app.py
6f8a603 verified
raw
history blame
4.43 kB
import streamlit as st
from ultralytics import YOLO
import cv2
import numpy as np
from PIL import Image
# Load the YOLO model (replace with your model path)
model = YOLO("best.pt")
# Use your YOLO model file here
st.title("Fire Detection in Forest")
# Sidebar for input options
input_option = st.sidebar.selectbox("Select Input Method", ["Upload Image", "Use Webcam","Upload Video"])
if input_option == "Upload Image":
# Upload Image
uploaded_file = st.file_uploader("Choose an Image", type=["jpg", "jpeg", "png"])
if uploaded_file is not None:
img = Image.open(uploaded_file)
st.image(img, caption='User Image')
st.write("Classifying...")
# Convert image to numpy array
img_np = np.array(img)
# Make predictions
results = model.predict(source=img_np, conf=0.5)
# Draw bounding boxes on the image
for result in results:
boxes = result.boxes.xyxy
for box in boxes:
x1, y1, x2, y2 = box[:4].astype(int)
img_np = cv2.rectangle(img_np, (x1, y1), (x2, y2), (0, 255, 0), 2)
# Show the resulting image
st.image(img_np, caption='Detected Fire', use_column_width=True)
elif input_option == "Use Webcam":
st.write("Starting webcam for live detection...")
# Start video capture
camera = cv2.VideoCapture(0) # 0 is the default camera
# Create a placeholder for the video feed
video_placeholder = st.empty()
# Main loop for live detection
while True:
ret, frame = camera.read()
if not ret:
st.write("Failed to capture image")
break
# Make predictions
results = model.predict(source=frame, conf=0.5)
# Draw bounding boxes on the frame
for result in results:
boxes = result.boxes.xyxy
for box in boxes:
x1, y1, x2, y2 = box[:4].astype(int)
frame = cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 255, 0), 2)
# Convert frame to RGB
rgb_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
# Display the frame in the Streamlit app
video_placeholder.image(rgb_frame, channels="RGB", use_column_width=True)
# Break loop on user command
if st.button("Stop Detection"):
break
# Release the camera
camera.release()
elif input_option == "Upload Video":
uploaded_video = st.file_uploader("Choose a video", type=["mp4", "avi", "mov", "mkv"])
if uploaded_video is not None:
# Save the uploaded video temporarily
temp_video_path = "temp_video.mp4"
with open(temp_video_path, "wb") as f:
f.write(uploaded_video.read())
# Display the uploaded video
st.video(temp_video_path)
# Open the video file
video_capture = cv2.VideoCapture(temp_video_path)
# Create a placeholder for video frame processing
video_frame_placeholder = st.empty()
fire_detected = False
# Loop through video frames
while video_capture.isOpened():
ret, frame = video_capture.read()
if not ret:
break
# Make predictions using your fire detection model
results = model.predict(source=frame, conf=0.5)
# Check if any bounding boxes are detected (fire detected)
if len(results) > 0 and len(results[0].boxes) > 0:
fire_detected = True
# Draw bounding boxes on the frame if fire is detected
for result in results:
boxes = result.boxes.xyxy
for box in boxes:
x1, y1, x2, y2 = box[:4].astype(int)
frame = cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 255, 0), 2)
fire_detected = True # Set fire_detected flag if a bounding box is found
# Convert the frame to RGB format
rgb_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
# Display the processed frame
video_frame_placeholder.image(rgb_frame, channels="RGB", use_column_width=True)
# Display detection result
if fire_detected:
st.write("Fire detected in the video.")
else:
st.write("No fire detected in the video.")
# Release the video capture
video_capture.release()