Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| import cv2 | |
| import torch | |
| import time | |
| import numpy as np | |
| from ultralytics import YOLO | |
| import os | |
| # Optimize CPU usage | |
| torch.set_num_threads(8) | |
| MODEL_DIR = "models" | |
| stop_processing = False # Global flag to stop processing | |
| def get_model_options(): | |
| models = {} | |
| for root, dirs, files in os.walk(MODEL_DIR): | |
| for file in files: | |
| if file.endswith(".pt"): | |
| model_name = os.path.basename(os.path.dirname(root)) | |
| models[model_name] = os.path.join(root, file) | |
| return models | |
| model_options = get_model_options() | |
| def annotate_frame(frame, results): | |
| for box in results[0].boxes: | |
| xyxy = box.xyxy[0].numpy() | |
| class_id = int(box.cls[0].item()) | |
| label = results[0].names[class_id] | |
| start_point = (int(xyxy[0]), int(xyxy[1])) | |
| end_point = (int(xyxy[2]), int(xyxy[3])) | |
| color = (0, 255, 0) | |
| thickness = 2 | |
| cv2.rectangle(frame, start_point, end_point, color, thickness) | |
| font = cv2.FONT_HERSHEY_SIMPLEX | |
| font_scale = 0.5 | |
| font_thickness = 1 | |
| label_position = (int(xyxy[0]), int(xyxy[1] - 10)) | |
| cv2.putText(frame, label, label_position, font, font_scale, color, font_thickness) | |
| return frame | |
| def process_image(model_name, image, confidence_threshold, iou_threshold): | |
| model_path = model_options[model_name] | |
| model = YOLO(model_path).to('cpu') | |
| frame = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR) | |
| with torch.inference_mode(): | |
| results = model(frame, conf=confidence_threshold, iou=iou_threshold) | |
| annotated_frame = annotate_frame(frame, results) | |
| annotated_frame = cv2.cvtColor(annotated_frame, cv2.COLOR_BGR2RGB) | |
| return annotated_frame, "N/A" | |
| def run_inference(mode, model_name, image, video, confidence_threshold, iou_threshold): | |
| global stop_processing | |
| stop_processing = False # Reset stop flag at the start | |
| if mode == "Image": | |
| if image is None: | |
| yield None, None, "Please upload an image." | |
| return | |
| annotated_img, fps = process_image(model_name, image, confidence_threshold, iou_threshold) | |
| yield annotated_img, None, fps | |
| else: | |
| if video is None: | |
| yield None, None, "Please upload a video." | |
| return | |
| model_path = model_options[model_name] | |
| model = YOLO(model_path).to('cpu') | |
| cap = cv2.VideoCapture(video) | |
| frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) | |
| if frame_count <= 0: | |
| frame_count = 1 | |
| output_frames = [] | |
| fps_list = [] | |
| processed_count = 0 | |
| while not stop_processing: | |
| ret, frame = cap.read() | |
| if not ret: | |
| break | |
| start_time = time.time() | |
| with torch.inference_mode(): | |
| results = model(frame, conf=confidence_threshold, iou=iou_threshold) | |
| annotated_frame = annotate_frame(frame, results) | |
| output_frames.append(annotated_frame) | |
| fps_val = 1 / (time.time() - start_time) | |
| fps_list.append(fps_val) | |
| processed_count += 1 | |
| progress_fraction = processed_count / frame_count | |
| # Yield progress every few frames | |
| if processed_count % 5 == 0: | |
| yield None, None, f"Processing... {progress_fraction * 100:.2f}%" | |
| if stop_processing: | |
| yield None, None, "Processing canceled." | |
| return | |
| cap.release() | |
| if len(output_frames) > 0 and not stop_processing: | |
| avg_fps = sum(fps_list) / len(fps_list) if fps_list else 0 | |
| height, width, _ = output_frames[0].shape | |
| output_video_path = "output.mp4" | |
| out = cv2.VideoWriter(output_video_path, cv2.VideoWriter_fourcc(*'mp4v'), 30, (width, height)) | |
| for frame in output_frames: | |
| out.write(frame) | |
| out.release() | |
| yield None, output_video_path, f"Average FPS: {avg_fps:.2f}" | |
| elif not stop_processing: | |
| yield None, None, "No frames processed." | |
| def cancel_processing(): | |
| global stop_processing | |
| stop_processing = True | |
| return "Cancel signal sent." | |
| def start_app(): | |
| model_names = list(model_options.keys()) | |
| with gr.Blocks() as app: | |
| # **Instructional Message Added Here** | |
| gr.Markdown(""" | |
| ### Welcome to the YOLO Inference App! | |
| **How to Use:** | |
| 1. **Select Mode:** | |
| - Choose between **Image** or **Video** processing. | |
| 2. **Select Model:** | |
| - Pick a pre-trained YOLO model from the dropdown menu. | |
| 3. **Upload Your File:** | |
| - For **Image** mode, upload an image (e.g., `pothole.jpg`). | |
| - For **Video** mode, upload a video (e.g., `potholeall.mp4` or `electric bus fire.mp4`). | |
| 4. **Adjust Thresholds:** | |
| - **Confidence Threshold:** Determines the minimum confidence for detections. | |
| - **IoU Threshold:** Determines the Intersection over Union threshold for non-maximum suppression. | |
| 5. **Start Processing:** | |
| - Click on **Start Processing** to begin inference. | |
| - You can cancel the processing at any time by clicking **Cancel Processing**. | |
| **Example Files:** | |
| - **Image:** `pothole.jpg` | |
| - **Videos:** `potholeall.mp4`, `electric bus fire.mp4` | |
| """) | |
| gr.Markdown("## YOLO Inference (Image or Video) with Progress & Cancel") | |
| with gr.Row(): | |
| mode = gr.Radio(["Image", "Video"], value="Image", label="Mode") | |
| model_selector = gr.Dropdown(choices=model_names, label="Select Model", value=model_names[0]) | |
| image_input = gr.Image(label="Upload Image", visible=True) | |
| video_input = gr.Video(label="Upload Video", visible=False) | |
| confidence_slider = gr.Slider(0.1, 1.0, value=0.3, step=0.1, label="Confidence Threshold") | |
| iou_slider = gr.Slider(0.1, 1.0, value=0.001, step=0.001, label="IoU Threshold") | |
| annotated_image_output = gr.Image(label="Annotated Image", visible=True) | |
| annotated_video_output = gr.Video(label="Output Video", visible=False) | |
| fps_output = gr.Textbox(label="Status / Average FPS", interactive=False) | |
| start_button = gr.Button("Start Processing") | |
| cancel_button = gr.Button("Cancel Processing", variant="stop") | |
| # Updated example files with 'examples/' path and renamed video file | |
| examples = gr.Examples( | |
| examples=[ | |
| ["examples/pothole.jpg", None, 0.3, 0.001], # Example for image | |
| [None, "examples/potholeall.mp4", 0.3, 0.001], # Renamed video example | |
| [None, "examples/electric bus fire.mp4", 0.5, 0.001] # Updated confidence threshold for new video example | |
| ], | |
| inputs=[image_input, video_input, confidence_slider, iou_slider] | |
| ) | |
| def update_visibility(selected_mode): | |
| if selected_mode == "Image": | |
| return ( | |
| gr.update(visible=True), | |
| gr.update(visible=False), | |
| gr.update(visible=True), | |
| gr.update(visible=False) | |
| ) | |
| else: | |
| return ( | |
| gr.update(visible=False), | |
| gr.update(visible=True), | |
| gr.update(visible=False), | |
| gr.update(visible=True) | |
| ) | |
| mode.change( | |
| update_visibility, | |
| inputs=mode, | |
| outputs=[image_input, video_input, annotated_image_output, annotated_video_output] | |
| ) | |
| start_button.click( | |
| fn=run_inference, | |
| inputs=[mode, model_selector, image_input, video_input, confidence_slider, iou_slider], | |
| outputs=[annotated_image_output, annotated_video_output, fps_output], | |
| queue=True | |
| ) | |
| cancel_button.click( | |
| fn=cancel_processing, | |
| inputs=[], | |
| outputs=[fps_output], | |
| queue=False | |
| ) | |
| return app | |
| if __name__ == "__main__": | |
| app = start_app() | |
| app.launch() | |