Spaces:
Runtime error
Runtime error
| # import cv2 | |
| # import numpy as np | |
| # import gradio as gr | |
| # import torch | |
| # from ultralytics import YOLO | |
| # # Load the YOLO model (adjust the path to your model weights and config) | |
| # model = torch.hub.load('best.pt') # Change to your model path | |
| # def detect_fire(frame): | |
| # # Convert the frame to RGB (YOLO models usually expect this format) | |
| # img = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) | |
| # # Use the model to detect objects | |
| # results = model(img) | |
| # # Get the predictions | |
| # predictions = results.pred[0] # Assuming a single image | |
| # # Draw boxes on the detected fire objects | |
| # for *xyxy, conf, cls in predictions: | |
| # label = model.names[int(cls)] | |
| # if label == "fire": # Adjust based on your class name for fire | |
| # cv2.rectangle(img, (int(xyxy[0]), int(xyxy[1])), (int(xyxy[2]), int(xyxy[3])), (255, 0, 0), 2) | |
| # cv2.putText(img, f"{label} {conf:.2f}", (int(xyxy[0]), int(xyxy[1]) - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 0, 0), 2) | |
| # return img | |
| # def webcam_demo(): | |
| # # Start video capture from webcam | |
| # cap = cv2.VideoCapture(0) | |
| # while True: | |
| # ret, frame = cap.read() | |
| # if not ret: | |
| # break | |
| # # Detect fire in the current frame | |
| # frame_with_detections = detect_fire(frame) | |
| # # Display the result | |
| # cv2.imshow("Fire Detection", frame_with_detections) | |
| # if cv2.waitKey(1) & 0xFF == ord('q'): | |
| # break | |
| # cap.release() | |
| # cv2.destroyAllWindows() | |
| # # Create a Gradio interface | |
| # iface = gr.Interface( | |
| # fn=webcam_demo, | |
| # inputs=[], | |
| # outputs="image", | |
| # title="Fire Detection using Webcam", | |
| # description="This application detects fire using a webcam feed." | |
| # ) | |
| # # Launch the Gradio app | |
| # iface.launch() | |
| # import gradio as gr | |
| # import PIL.Image as Image | |
| # from ultralytics import YOLO | |
| # # Load the YOLOv8 model | |
| # model = YOLO("best.pt") | |
| # def predict_image(img, conf_threshold, iou_threshold): | |
| # """Predicts objects in an image using a YOLOv8 model with adjustable confidence and IOU thresholds.""" | |
| # results = model.predict( | |
| # source=img, | |
| # conf=conf_threshold, | |
| # iou=iou_threshold, | |
| # show_labels=True, | |
| # show_conf=True, | |
| # imgsz=640, | |
| # ) | |
| # for r in results: | |
| # im_array = r.plot() | |
| # im = Image.fromarray(im_array[..., ::-1]) | |
| # return im | |
| # iface = gr.Interface( | |
| # fn=predict_image, | |
| # inputs=[ | |
| # gr.Image(source="webcam", type="pil", label="Capture Image"), | |
| # gr.Slider(minimum=0, maximum=1, value=0.25, label="Confidence threshold"), | |
| # gr.Slider(minimum=0, maximum=1, value=0.45, label="IoU threshold"), | |
| # ], | |
| # outputs=gr.Image(type="pil", label="Result"), | |
| # live=True, # Enables real-time processing | |
| # title="Ultralytics Gradio", | |
| # description="Capture images from your webcam for real-time inference using the Ultralytics YOLOv8n model.", | |
| # ) | |
| # if __name__ == "__main__": | |
| # iface.launch() | |
| import cv2 | |
| import gradio as gr | |
| from ultralytics import YOLO | |
| # Load the YOLO model (update the path to your fire detection model weights) | |
| model = YOLO('best.pt') # Replace 'path/to/your/best.pt' with the actual path to your model file | |
| def detect_fire(frame): | |
| # Convert the frame to RGB format (YOLO expects this format) | |
| img = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) | |
| # Perform fire detection using the YOLO model | |
| results = model(img) | |
| # Draw bounding boxes and labels on the detected fire areas | |
| for bbox in results[0].boxes: | |
| xyxy = bbox.xyxy[0] # Bounding box coordinates | |
| conf = bbox.conf[0] # Confidence score | |
| cls = int(bbox.cls[0]) # Class ID | |
| label = model.names[cls] # Class name | |
| if label == "fire": # Make sure this matches the label in your trained model | |
| # Draw a rectangle around the detected fire | |
| cv2.rectangle(img, (int(xyxy[0]), int(xyxy[1])), (int(xyxy[2]), int(xyxy[3])), (255, 0, 0), 2) | |
| # Put the label text above the rectangle | |
| cv2.putText(img, f"{label} {conf:.2f}", (int(xyxy[0]), int(xyxy[1]) - 10), | |
| cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 0, 0), 2) | |
| # Convert the image back to BGR format for display | |
| return cv2.cvtColor(img, cv2.COLOR_RGB2BGR) | |
| # Create a Gradio interface for fire detection | |
| iface = gr.Interface( | |
| fn=detect_fire, | |
| inputs=gr.Image(source="webcam", tool="editor", streaming=True), # Use webcam as the input source | |
| outputs="image", | |
| title="Fire Detection using YOLO", | |
| description="This application detects fire in real-time using a YOLO model." | |
| ) | |
| # Launch the Gradio app | |
| iface.launch() | |