File size: 3,339 Bytes
8d6a0e4
 
 
 
 
 
 
 
 
999f5e8
ce9d5f8
 
 
 
 
 
8d6a0e4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ce9d5f8
8d6a0e4
 
 
 
 
ce9d5f8
8d6a0e4
 
 
1927bd2
ce9d5f8
999f5e8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1927bd2
8d6a0e4
999f5e8
 
 
 
 
 
 
8d6a0e4
 
 
999f5e8
 
 
 
 
ce9d5f8
999f5e8
8d6a0e4
 
 
ce9d5f8
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
import torch
import cv2
import os
import numpy as np
import gradio as gr

# Muat model pre-trained YOLOv5
model = torch.hub.load('ultralytics/yolov5', 'yolov5s', pretrained=True)

# Fungsi untuk memproses video
def process_video(video_path):
    # Direktori output
    output_dir = "output_videos"
    os.makedirs(output_dir, exist_ok=True)
    output_path = os.path.join(output_dir, "person_counter_output.mp4")

    # Buka video input
    cap = cv2.VideoCapture(video_path)

    # Dapatkan spesifikasi video
    frame_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
    frame_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
    fps = int(cap.get(cv2.CAP_PROP_FPS))

    # Buat VideoWriter untuk menyimpan video output
    fourcc = cv2.VideoWriter_fourcc(*"mp4v")
    out = cv2.VideoWriter(output_path, fourcc, fps, (frame_width, frame_height))

    while cap.isOpened():
        ret, frame = cap.read()
        if not ret:
            break

        # Inferensi dengan YOLOv5
        results = model(frame)
        detections = results.pred[0]
        names = model.names

        # Filter hanya label 'person'
        person_detections = [d for d in detections if names[int(d[-1])] == "person"]
        person_count = len(person_detections)

        # Render frame dan buat salinan eksplisit
        annotated_frame = results.render()[0]
        annotated_frame = np.copy(annotated_frame)

        # Tambahkan teks ke frame
        cv2.putText(annotated_frame, f"Person Count: {person_count}", (10, 30),
                    cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)

        # Tulis frame yang telah dianotasi ke video output
        out.write(annotated_frame)

    # Tutup video input dan output
    cap.release()
    out.release()

    return output_path

# Fungsi untuk memproses gambar
def process_image(image_path):
    # Baca gambar
    image = cv2.imread(image_path)

    # Inferensi dengan YOLOv5
    results = model(image)
    detections = results.pred[0]
    names = model.names

    # Filter hanya label 'person'
    person_detections = [d for d in detections if names[int(d[-1])] == "person"]
    person_count = len(person_detections)

    # Render frame dan buat salinan eksplisit
    annotated_image = results.render()[0]
    annotated_image = np.copy(annotated_image)

    # Tambahkan teks ke gambar
    cv2.putText(annotated_image, f"Person Count: {person_count}", (10, 30),
                cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)

    # Simpan gambar hasil
    output_dir = "output_images"
    os.makedirs(output_dir, exist_ok=True)
    output_path = os.path.join(output_dir, "person_counter_output.jpg")
    cv2.imwrite(output_path, annotated_image)

    return output_path

# Fungsi Gradio untuk antarmuka
def gradio_interface(file, is_video):
    if is_video:
        return process_video(file)
    else:
        return process_image(file)

# Antarmuka Gradio
iface = gr.Interface(
    fn=gradio_interface,
    inputs=[
        gr.File(type="filepath", label="Upload File (Image/Video)"),
        gr.Checkbox(label="Is Video?", value=True),
    ],
    outputs=gr.File(label="Processed File"),
    title="Person Counter using YOLOv5",
    description="Upload a video or image file to detect and count the number of people using YOLOv5."
)

# Menjalankan aplikasi
if __name__ == "__main__":
    iface.launch()