vagheshpatel commited on
Commit
9e708c0
·
verified ·
1 Parent(s): 4dfd5e4

Sync vehicle-detection from metro-analytics-catalog

Browse files
README.md CHANGED
@@ -202,10 +202,10 @@ Detected vehicles: 1
202
  ### DLStreamer Sample
203
 
204
  The pipeline below runs the FP16 YOLO26 detector on the sample video via
205
- `gvadetect`, filters detections to vehicle classes in a buffer probe using
206
- the DLStreamer Python bindings (`gstgva.VideoFrame`), overlays bounding boxes,
207
- saves the annotated result to `output_dlstreamer.mp4`, and prints the vehicle count per
208
- frame.
209
 
210
  > **Notes on running this sample:**
211
  >
@@ -222,10 +222,13 @@ frame.
222
  > ```
223
 
224
  ```python
 
 
 
 
225
  import gi
226
 
227
  gi.require_version("Gst", "1.0")
228
- gi.require_version("GstVideo", "1.0")
229
  from gi.repository import Gst
230
  from gstgva import VideoFrame
231
 
@@ -233,6 +236,10 @@ Gst.init(None)
233
 
234
  INPUT_VIDEO = "test_video.mp4"
235
  VEHICLE_LABELS = {"car", "motorcycle", "bus", "truck"}
 
 
 
 
236
 
237
  # For CPU: change device=GPU to device=CPU.
238
  # For NPU: change device=GPU to device=NPU (batch-size=1, nireq=4 recommended).
@@ -242,36 +249,73 @@ pipeline_str = (
242
  "gvadetect model=yolo26n_openvino_model/yolo26n.xml "
243
  "device=GPU "
244
  "threshold=0.4 ! queue ! "
245
- "gvawatermark ! videoconvert ! video/x-raw,format=I420 ! "
246
- "openh264enc ! h264parse ! "
247
- "mp4mux ! filesink name=sink location=output_dlstreamer.mp4"
248
  )
249
  pipeline = Gst.parse_launch(pipeline_str)
 
250
 
 
251
 
252
- def on_buffer(pad, info):
253
- buf = info.get_buffer()
254
- caps = pad.get_current_caps()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
255
  frame = VideoFrame(buf, caps=caps)
256
- vehicles = [r for r in frame.regions() if r.label() in VEHICLE_LABELS]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
257
  if vehicles:
258
  print(f"Vehicle count: {len(vehicles)}", flush=True)
259
- for v in vehicles:
260
- print(f" {v.label()} at ({v.rect().x},{v.rect().y})", flush=True)
261
- return Gst.PadProbeReturn.OK
262
-
263
 
264
- sink = pipeline.get_by_name("sink")
265
- sink_pad = sink.get_static_pad("sink")
266
- sink_pad.add_probe(Gst.PadProbeType.BUFFER, on_buffer)
267
 
268
- pipeline.set_state(Gst.State.PLAYING)
269
- bus = pipeline.get_bus()
270
- bus.timed_pop_filtered(
271
- Gst.CLOCK_TIME_NONE,
272
- Gst.MessageType.EOS | Gst.MessageType.ERROR,
273
- )
274
  pipeline.set_state(Gst.State.NULL)
 
 
 
 
275
  ```
276
 
277
  #### Expected Output
 
202
  ### DLStreamer Sample
203
 
204
  The pipeline below runs the FP16 YOLO26 detector on the sample video via
205
+ `gvadetect`, filters detections to vehicle classes using the DLStreamer
206
+ Python bindings (`gstgva.VideoFrame`), draws only vehicle bounding boxes
207
+ with OpenCV, saves the annotated result to `output_dlstreamer.mp4`, and
208
+ prints the vehicle count per frame.
209
 
210
  > **Notes on running this sample:**
211
  >
 
222
  > ```
223
 
224
  ```python
225
+ import subprocess
226
+
227
+ import cv2
228
+ import numpy as np
229
  import gi
230
 
231
  gi.require_version("Gst", "1.0")
 
232
  from gi.repository import Gst
233
  from gstgva import VideoFrame
234
 
 
236
 
237
  INPUT_VIDEO = "test_video.mp4"
238
  VEHICLE_LABELS = {"car", "motorcycle", "bus", "truck"}
239
+ COLORS = {
240
+ "car": (0, 255, 0), "motorcycle": (255, 128, 0),
241
+ "bus": (0, 128, 255), "truck": (128, 0, 255),
242
+ }
243
 
244
  # For CPU: change device=GPU to device=CPU.
245
  # For NPU: change device=GPU to device=NPU (batch-size=1, nireq=4 recommended).
 
249
  "gvadetect model=yolo26n_openvino_model/yolo26n.xml "
250
  "device=GPU "
251
  "threshold=0.4 ! queue ! "
252
+ "videoconvert ! video/x-raw,format=BGR ! "
253
+ "appsink name=sink emit-signals=false sync=false"
 
254
  )
255
  pipeline = Gst.parse_launch(pipeline_str)
256
+ appsink = pipeline.get_by_name("sink")
257
 
258
+ pipeline.set_state(Gst.State.PLAYING)
259
 
260
+ proc = None
261
+
262
+ while True:
263
+ sample = appsink.emit("pull-sample")
264
+ if sample is None:
265
+ break
266
+
267
+ buf = sample.get_buffer()
268
+ caps = sample.get_caps()
269
+ struct = caps.get_structure(0)
270
+ width = struct.get_value("width")
271
+ height = struct.get_value("height")
272
+
273
+ # Start ffmpeg encoder on the first frame.
274
+ if proc is None:
275
+ ok, fps_num, fps_den = struct.get_fraction("framerate")
276
+ fps = fps_num / fps_den if ok and fps_den > 0 else 30.0
277
+ proc = subprocess.Popen(
278
+ ["ffmpeg", "-y", "-f", "rawvideo", "-pix_fmt", "bgr24",
279
+ "-s", f"{width}x{height}", "-r", str(fps),
280
+ "-i", "pipe:0", "-c:v", "libx264", "-pix_fmt", "yuv420p",
281
+ "-movflags", "+faststart", "output_dlstreamer.mp4"],
282
+ stdin=subprocess.PIPE, stderr=subprocess.DEVNULL,
283
+ )
284
+
285
+ # Read detection metadata and filter to vehicle classes.
286
  frame = VideoFrame(buf, caps=caps)
287
+ vehicles = [(r.label(), r.rect()) for r in frame.regions()
288
+ if r.label() in VEHICLE_LABELS]
289
+
290
+ # Map buffer read-only and copy pixels to a writable numpy array.
291
+ success, map_info = buf.map(Gst.MapFlags.READ)
292
+ if not success:
293
+ continue
294
+ arr = np.ndarray((height, width, 3), dtype=np.uint8,
295
+ buffer=map_info.data).copy()
296
+ buf.unmap(map_info)
297
+
298
+ # Draw vehicle bounding boxes only.
299
+ for label, rect in vehicles:
300
+ x1, y1 = int(rect.x), int(rect.y)
301
+ x2, y2 = int(rect.x + rect.w), int(rect.y + rect.h)
302
+ color = COLORS.get(label, (0, 255, 0))
303
+ cv2.rectangle(arr, (x1, y1), (x2, y2), color, 2)
304
+ cv2.putText(arr, label, (x1, max(y1 - 6, 0)),
305
+ cv2.FONT_HERSHEY_SIMPLEX, 0.6, color, 2)
306
+
307
  if vehicles:
308
  print(f"Vehicle count: {len(vehicles)}", flush=True)
309
+ for label, rect in vehicles:
310
+ print(f" {label} at ({int(rect.x)},{int(rect.y)})", flush=True)
 
 
311
 
312
+ proc.stdin.write(arr.tobytes())
 
 
313
 
 
 
 
 
 
 
314
  pipeline.set_state(Gst.State.NULL)
315
+ if proc:
316
+ proc.stdin.close()
317
+ proc.wait()
318
+ print("Wrote output_dlstreamer.mp4", flush=True)
319
  ```
320
 
321
  #### Expected Output
expected_output_dlstreamer.gif CHANGED

Git LFS Details

  • SHA256: 19ba89f40be3dfef27d57b138e9e5bb6514db99ead8a65bde0480f03df731f9b
  • Pointer size: 131 Bytes
  • Size of remote file: 870 kB

Git LFS Details

  • SHA256: 13f040352abd92d5c62e15c832489b6b48c99dcd691a06a31d4ece7bc7f0a5a0
  • Pointer size: 133 Bytes
  • Size of remote file: 11.4 MB
export_and_quantize.sh CHANGED
@@ -47,7 +47,7 @@ fi
47
  echo "--- Downloading sample test video ---"
48
  if [[ ! -f test_video.mp4 ]]; then
49
  wget -q -O test_video.mp4 \
50
- https://github.com/intel-iot-devkit/sample-videos/raw/master/car-detection.mp4
51
  echo "Downloaded: test_video.mp4"
52
  else
53
  echo "Already present: test_video.mp4"
 
47
  echo "--- Downloading sample test video ---"
48
  if [[ ! -f test_video.mp4 ]]; then
49
  wget -q -O test_video.mp4 \
50
+ "https://www.pexels.com/download/video/34505889?fps=29.97&h=360&w=640"
51
  echo "Downloaded: test_video.mp4"
52
  else
53
  echo "Already present: test_video.mp4"