Spaces:
Runtime error
Runtime error
| import gradio as gr | |
| import torch | |
| import cv2 | |
| import os | |
| import shutil | |
| # Load the model | |
| model = torch.hub.load('ultralytics/yolov5', 'custom', path='best.pt') | |
| def detect_image(image): | |
| results = model(image) | |
| return results.render()[0] | |
| def detect_video(video_path): | |
| video = cv2.VideoCapture(video_path) | |
| frame_rate = video.get(cv2.CAP_PROP_FPS) | |
| # Create a directory to store the frames | |
| frames_dir = 'frames' | |
| os.makedirs(frames_dir, exist_ok=True) | |
| # Remove existing contents of the frames directory | |
| for file_name in os.listdir(frames_dir): | |
| file_path = os.path.join(frames_dir, file_name) | |
| if os.path.isfile(file_path): | |
| os.remove(file_path) | |
| frame_count = 0 | |
| process_frame_count = 0 | |
| while True: | |
| success, frame = video.read() | |
| if not success: | |
| break | |
| if frame_count % 2 == 0: # Process every 2nd frame (adjust as needed) | |
| frame_output_path = os.path.join(frames_dir, f'frame_{process_frame_count:04d}.jpg') | |
| cv2.imwrite(frame_output_path, frame) | |
| process_frame_count += 1 | |
| frame_count += 1 | |
| video.release() | |
| cv2.destroyAllWindows() | |
| # Process the frames with object detection and save the results | |
| results_dir = 'results' | |
| os.makedirs(results_dir, exist_ok=True) | |
| # Remove existing contents of the results directory | |
| for file_name in os.listdir(results_dir): | |
| file_path = os.path.join(results_dir, file_name) | |
| if os.path.isfile(file_path): | |
| os.remove(file_path) | |
| for i in range(process_frame_count): | |
| frame_path = os.path.join(frames_dir, f'frame_{i:04d}.jpg') | |
| frame = cv2.imread(frame_path) | |
| results = model(frame) | |
| results_output_path = os.path.join(results_dir, f'results_{i:04d}.jpg') | |
| cv2.imwrite(results_output_path, results.render()[0]) | |
| # Create the output video from the processed frames | |
| frame_files = sorted(os.listdir(results_dir)) | |
| frame_path = os.path.join(results_dir, frame_files[0]) | |
| frame = cv2.imread(frame_path) | |
| height, width, _ = frame.shape | |
| video_output_path = 'output_video.mp4' # Replace with your desired output video path | |
| fourcc = cv2.VideoWriter_fourcc(*'mp4v') # You can change the codec as needed | |
| video_writer = cv2.VideoWriter(video_output_path, fourcc, frame_rate/2, (width, height)) # Adjust frame rate | |
| for frame_file in frame_files: | |
| frame_path = os.path.join(results_dir, frame_file) | |
| frame = cv2.imread(frame_path) | |
| video_writer.write(frame) | |
| video_writer.release() | |
| # Clean up the temporary directories | |
| shutil.rmtree(frames_dir) | |
| shutil.rmtree(results_dir) | |
| return video_output_path | |
| # Create Gradio interfaces for different modes | |
| img_interface = gr.Interface( | |
| fn=detect_image, | |
| examples=[os.path.join(os.path.abspath(''), "plastic_bottles1.jpg")], | |
| inputs=gr.inputs.Image(source="upload"), | |
| outputs="image", | |
| title="Image", | |
| cache_examples=True | |
| ) | |
| vid_interface = gr.Interface( | |
| fn=detect_video, | |
| inputs=gr.inputs.Video(source="upload"), | |
| examples=[ | |
| os.path.join(os.path.abspath(''), "a-plastic-bag-is-floating-in-a-sea-ocean-plastic-pollution-big-environmental-p-SBV-347235576-preview.mp4"), | |
| os.path.join(os.path.abspath(''), "hand-woman-in-yellow-gloves-picking-up-empty-plastic-bottles-cleaning-on-the-b-SBV-346452144-preview.mp4"), | |
| os.path.join(os.path.abspath(''), "plastic-bottle-being-dumped-in-to-the-sea-ocean-pollution-in-ocean-is-a-big-en-SBV-347235586-preview.mp4"), | |
| os.path.join(os.path.abspath(''), "pollution-garbages-plastic-and-wastes-on-the-beach-after-winter-storms-SBV-331355306-preview.mp4"), | |
| os.path.join(os.path.abspath(''), "volunteer-woman-picking-plastic-bottle-into-trash-plastic-bag-black-for-cleani-SBV-346871657-preview.mp4") | |
| ], | |
| outputs="video", | |
| title="Video", | |
| cache_examples=True | |
| ) | |
| # Add examples | |
| # with gr.Blocks() as demo: | |
| # gr.Examples( | |
| # inputs= "image", | |
| # outputs= "image", | |
| # fn=detect_image, | |
| # ) | |
| # Create a list of interfaces | |
| interfaces = [img_interface, vid_interface] | |
| # Create the tabbed interface | |
| tabbed_interface = gr.TabbedInterface(interfaces, ["Image", "Video"]) | |
| # Launch the tabbed interface | |
| tabbed_interface.launch(debug=True) | |