Spaces:
Runtime error
Runtime error
| import torch | |
| import cv2 | |
| import os | |
| import numpy as np | |
| import gradio as gr | |
| # Muat model pre-trained YOLOv5 | |
| model = torch.hub.load('ultralytics/yolov5', 'yolov5s', pretrained=True) | |
| # Fungsi untuk memproses video | |
| def process_video(video_path): | |
| # Direktori output | |
| output_dir = "output_videos" | |
| os.makedirs(output_dir, exist_ok=True) | |
| output_path = os.path.join(output_dir, "person_counter_output.mp4") | |
| # Buka video input | |
| cap = cv2.VideoCapture(video_path) | |
| # Dapatkan spesifikasi video | |
| frame_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) | |
| frame_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) | |
| fps = int(cap.get(cv2.CAP_PROP_FPS)) | |
| # Buat VideoWriter untuk menyimpan video output | |
| fourcc = cv2.VideoWriter_fourcc(*"mp4v") | |
| out = cv2.VideoWriter(output_path, fourcc, fps, (frame_width, frame_height)) | |
| while cap.isOpened(): | |
| ret, frame = cap.read() | |
| if not ret: | |
| break | |
| # Inferensi dengan YOLOv5 | |
| results = model(frame) | |
| detections = results.pred[0] | |
| names = model.names | |
| # Filter hanya label 'person' | |
| person_detections = [d for d in detections if names[int(d[-1])] == "person"] | |
| person_count = len(person_detections) | |
| # Render frame dan buat salinan eksplisit | |
| annotated_frame = results.render()[0] | |
| annotated_frame = np.copy(annotated_frame) | |
| # Tambahkan teks ke frame | |
| cv2.putText(annotated_frame, f"Person Count: {person_count}", (10, 30), | |
| cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2) | |
| # Tulis frame yang telah dianotasi ke video output | |
| out.write(annotated_frame) | |
| # Tutup video input dan output | |
| cap.release() | |
| out.release() | |
| return output_path | |
| # Fungsi untuk memproses gambar | |
| def process_image(image_path): | |
| # Baca gambar | |
| image = cv2.imread(image_path) | |
| # Inferensi dengan YOLOv5 | |
| results = model(image) | |
| detections = results.pred[0] | |
| names = model.names | |
| # Filter hanya label 'person' | |
| person_detections = [d for d in detections if names[int(d[-1])] == "person"] | |
| person_count = len(person_detections) | |
| # Render frame dan buat salinan eksplisit | |
| annotated_image = results.render()[0] | |
| annotated_image = np.copy(annotated_image) | |
| # Tambahkan teks ke gambar | |
| cv2.putText(annotated_image, f"Person Count: {person_count}", (10, 30), | |
| cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2) | |
| # Simpan gambar hasil | |
| output_dir = "output_images" | |
| os.makedirs(output_dir, exist_ok=True) | |
| output_path = os.path.join(output_dir, "person_counter_output.jpg") | |
| cv2.imwrite(output_path, annotated_image) | |
| return output_path | |
| # Fungsi Gradio untuk antarmuka | |
| def gradio_interface(file, is_video): | |
| if is_video: | |
| return process_video(file) | |
| else: | |
| return process_image(file) | |
| # Antarmuka Gradio | |
| iface = gr.Interface( | |
| fn=gradio_interface, | |
| inputs=[ | |
| gr.File(type="filepath", label="Upload File (Image/Video)"), | |
| gr.Checkbox(label="Is Video?", value=True), | |
| ], | |
| outputs=gr.File(label="Processed File"), | |
| title="Person Counter using YOLOv5", | |
| description="Upload a video or image file to detect and count the number of people using YOLOv5." | |
| ) | |
| # Menjalankan aplikasi | |
| if __name__ == "__main__": | |
| iface.launch() | |