Randi-Palguna commited on
Commit
4cf64da
·
0 Parent(s):

Initial commit

Browse files
Files changed (7) hide show
  1. .gitattributes +36 -0
  2. README.md +12 -0
  3. app.py +164 -0
  4. best.pt +3 -0
  5. drone_footage.mp4 +3 -0
  6. drone_footage_result.mp4 +3 -0
  7. requirements.txt +5 -0
.gitattributes ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ *.mp4 filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: Data Sains Sawit Counting
3
+ emoji: 🏃
4
+ colorFrom: indigo
5
+ colorTo: purple
6
+ sdk: gradio
7
+ sdk_version: 6.0.1
8
+ app_file: app.py
9
+ pinned: false
10
+ ---
11
+
12
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,164 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import cv2
3
+ from ultralytics import YOLO
4
+ import os
5
+
6
+ MODEL_PATH = "best.pt"
7
+ TRACKER_FILE = "my_tracker.yaml"
8
+ FOOTAGE_EXAMPLE_PATH = "drone_footage.mp4"
9
+
10
+ tracker_config = """
11
+ tracker_type: bytetrack
12
+ track_high_thresh: 0
13
+ track_low_thresh: 0
14
+ track_buffer: 300
15
+ fuse_score: True
16
+ match_thresh: 0.9
17
+ new_track_thresh: 0.85
18
+ """
19
+ with open(TRACKER_FILE, "w") as f:
20
+ f.write(tracker_config)
21
+
22
+ model = YOLO(MODEL_PATH)
23
+
24
+ def process_video(video_path, conf_threshold, iou_threshold):
25
+ if video_path is None:
26
+ return None
27
+
28
+ MIN_FRAMES_TO_COUNT = 60
29
+ class_names = model.names
30
+
31
+ track_history = {}
32
+ class_counts = {name: 0 for name in class_names.values()}
33
+ stable_counted_ids = set()
34
+
35
+ cap = cv2.VideoCapture(video_path)
36
+ w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
37
+ h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
38
+ fps = cap.get(cv2.CAP_PROP_FPS)
39
+
40
+ output_path = "output_counted.mp4"
41
+ fourcc = cv2.VideoWriter_fourcc(*'mp4v')
42
+ out = cv2.VideoWriter(output_path, fourcc, fps, (w, h))
43
+
44
+ print("Processing video...")
45
+
46
+ while cap.isOpened():
47
+ success, frame = cap.read()
48
+ if not success:
49
+ break
50
+
51
+ results = model.track(
52
+ frame,
53
+ persist=True,
54
+ verbose=False,
55
+ tracker=TRACKER_FILE,
56
+ conf=conf_threshold,
57
+ iou=iou_threshold
58
+ )
59
+
60
+ annotated_frame = results[0].plot(line_width=2, font_size=1)
61
+
62
+ if results[0].boxes.id is not None:
63
+ track_ids = results[0].boxes.id.int().tolist()
64
+ class_indices = results[0].boxes.cls.int().tolist()
65
+
66
+ for track_id, cls_index in zip(track_ids, class_indices):
67
+ class_name = class_names[cls_index]
68
+
69
+ if track_id not in track_history:
70
+ track_history[track_id] = {
71
+ 'frame_count': 1,
72
+ 'class_votes': {class_name: 1}
73
+ }
74
+ else:
75
+ track_history[track_id]['frame_count'] += 1
76
+ votes = track_history[track_id]['class_votes']
77
+ votes[class_name] = votes.get(class_name, 0) + 1
78
+
79
+ if track_history[track_id]['frame_count'] >= MIN_FRAMES_TO_COUNT and track_id not in stable_counted_ids:
80
+ stable_counted_ids.add(track_id)
81
+
82
+ votes = track_history[track_id]['class_votes']
83
+ stable_class = max(votes, key=votes.get)
84
+
85
+ class_counts[stable_class] += 1
86
+
87
+ total_stable_count = len(stable_counted_ids)
88
+
89
+ text_lines = [f'Total FFBs Counted: {total_stable_count}']
90
+ for class_name, count in class_counts.items():
91
+ if count > 0:
92
+ text_lines.append(f'{class_name}: {count}')
93
+
94
+ font_scale = 1.0
95
+ thickness = 2
96
+ font = cv2.FONT_HERSHEY_SIMPLEX
97
+
98
+ (text_w, text_h), _ = cv2.getTextSize('Test', font, font_scale, thickness)
99
+ line_height = text_h + 10
100
+
101
+ x_pos, y_pos = 10, 10
102
+
103
+ max_line_w = 0
104
+ for line in text_lines:
105
+ (line_w, _), _ = cv2.getTextSize(line, font, font_scale, thickness)
106
+ if line_w > max_line_w:
107
+ max_line_w = line_w
108
+
109
+ total_block_h = 10 + (line_height * len(text_lines)) - 5
110
+ total_block_w = 10 + max_line_w + 10
111
+
112
+ cv2.rectangle(annotated_frame, (x_pos, y_pos), (total_block_w, total_block_h), (0, 0, 0), -1)
113
+
114
+ current_y = y_pos + text_h + 5
115
+ for line in text_lines:
116
+ cv2.putText(annotated_frame, line, (x_pos + 5, current_y), font, font_scale, (255, 255, 255), thickness)
117
+ current_y += line_height
118
+
119
+ out.write(annotated_frame)
120
+
121
+ cap.release()
122
+ out.release()
123
+
124
+ print(f"Final Count: {len(stable_counted_ids)}")
125
+ print(f"Class Counts: {class_counts}")
126
+
127
+ final_output_path = "final_web_ready.mp4"
128
+ os.system(f"ffmpeg -y -i {output_path} -vcodec libx264 {final_output_path}")
129
+
130
+ return final_output_path
131
+
132
+
133
+ description_html = """
134
+ <p>Upload a video **(preferably drone footage)** showing Oil Palm Fresh Fruit Bunches (FFB). The model will count the detected FFBs.</p>
135
+ <h3>Demo Result:</h3>
136
+ <div style="display: flex; justify-content: center;">
137
+ <video width="640" height="360" controls autoplay loop muted>
138
+ <source src="drone_footage_result.mp4" type="video/mp4">
139
+ Your browser does not support the video tag.
140
+ </video>
141
+ </div>
142
+ """
143
+
144
+ iface = gr.Interface(
145
+ fn=process_video,
146
+ inputs=[
147
+ gr.Video(label="Upload Video"),
148
+ gr.Slider(minimum=0.0, maximum=1.0, value=0.25, step=0.01, label="Confidence Threshold"),
149
+ gr.Slider(minimum=0.0, maximum=1.0, value=0.45, step=0.01, label="IoU Threshold"),
150
+ ],
151
+ outputs=gr.Video(label="Processed Result"),
152
+ title="Oil Palm Fresh Fruit Bunch Classification and Counter",
153
+ description=description_html,
154
+
155
+ # Drone Footage Example
156
+ examples=[
157
+ # Format: [Video_Path, Conf_Value, IoU_Value]
158
+ [FOOTAGE_EXAMPLE_PATH, 0.25, 0.45]
159
+ ],
160
+ cache_examples=True
161
+ )
162
+
163
+ if __name__ == "__main__":
164
+ iface.launch(ssr_mode=False)
best.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3a6ab4f4b5411ed2e020f7e473d93c99c0a02ded67c59be9802cb05e67a56325
3
+ size 5501267
drone_footage.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f9a2b945b5101929e3edf0d8568c617d2805ab24882fa61040f17e2c718f7be0
3
+ size 131613067
drone_footage_result.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dec0f1a75f79184bbb706627aafab72363ff227b6edef30258d6672f7962ccad
3
+ size 380493944
requirements.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ ultralytics
2
+ gradio
3
+ opencv-python-headless
4
+ pillow
5
+ numpy<2