MTerryJack commited on
Commit
5929bb5
·
verified ·
1 Parent(s): d55ef00

Upload 10 files

Browse files
class_names.txt ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ big bus
2
+ big truck
3
+ bus-l-
4
+ bus-s-
5
+ car
6
+ mid truck
7
+ small bus
8
+ small truck
9
+ truck-l-
10
+ truck-m-
11
+ truck-s-
12
+ truck-xl-
element.yaml ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ version: 0.1.0
2
+ element_type: Detect
3
+ main: main.py
4
+ source: https://universe.roboflow.com/roboflow-100/vehicles-q0x2v
5
+ objects:
6
+ - big bus
7
+ - big truck
8
+ - bus-l-
9
+ - bus-s-
10
+ - car
11
+ - mid truck
12
+ - small bus
13
+ - small truck
14
+ - truck-l-
15
+ - truck-m-
16
+ - truck-s-
17
+ - truck-xl-
environment.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"PROJECT": "roboflow-platform", "DATASET_OWNER": "pwYAXv9BTpqLyFfgQoPZ", "DATASET_ID": "peHMC7FkjCfPPfu0wVn3", "DATASET_VERSION_ID": "1", "ENDPOINT": "peHMC7FkjCfPPfu0wVn3/1", "RESOLUTION": [640], "BATCH_SIZE": -1, "PREPROCESSING": "{\"auto-orient\": {\"enabled\": true}, \"resize\": {\"format\": \"Stretch to\", \"width\": \"640\", \"enabled\": true, \"height\": \"640\"}}", "CLASS_MAP": {"0": "big bus", "1": "big truck", "2": "bus-l-", "3": "bus-s-", "4": "car", "5": "mid truck", "6": "small bus", "7": "small truck", "8": "truck-l-", "9": "truck-m-", "10": "truck-s-", "11": "truck-xl-"}, "COLORS": {"big bus": "#C7FC00", "big truck": "#8622FF", "bus-l-": "#FE0056", "bus-s-": "#00FFCE", "car": "#FF8000", "mid truck": "#00B7EB", "small bus": "#FFFF00", "small truck": "#FF00FF", "truck-l-": "#0E7AFE", "truck-m-": "#FFABAB", "truck-s-": "#0000FF", "truck-xl-": "#a0522d"}}
example.jpg ADDED
main.py ADDED
@@ -0,0 +1,172 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import json
3
+ import sys
4
+ from io import BytesIO
5
+ from pathlib import Path
6
+ from typing import Any, Dict, List
7
+
8
+ import numpy as np
9
+ from PIL import Image
10
+ import onnxruntime as ort
11
+
12
+
13
+ def load_class_names(base_dir: Path) -> dict[int, str]:
14
+ labels_path = base_dir / "class_names.txt"
15
+ if not labels_path.exists():
16
+ return {}
17
+ names: dict[int, str] = {}
18
+ for idx, raw in enumerate(labels_path.read_text().splitlines()):
19
+ label = raw.strip()
20
+ if label:
21
+ names[idx] = label
22
+ return names
23
+
24
+
25
+ def load_image(frame: Any, base_dir: Path) -> Image.Image:
26
+ if isinstance(frame, (bytes, bytearray, memoryview)):
27
+ return Image.open(BytesIO(frame)).convert("RGB")
28
+
29
+ path = Path(str(frame))
30
+ if not path.is_absolute():
31
+ path = (Path.cwd() / path).resolve()
32
+ if not path.exists():
33
+ candidate = (base_dir / str(frame)).resolve()
34
+ if candidate.exists():
35
+ path = candidate
36
+ return Image.open(path).convert("RGB")
37
+
38
+
39
+ def load_model(*_args: Any, **_kwargs: Any):
40
+ base_dir = Path(__file__).resolve().parent
41
+ model_path = base_dir / "yolov5s_weights.onnx"
42
+ if not model_path.exists():
43
+ return None
44
+ session = ort.InferenceSession(str(model_path), providers=["CPUExecutionProvider"])
45
+ return {
46
+ "session": session,
47
+ "input_name": session.get_inputs()[0].name,
48
+ "names": load_class_names(base_dir),
49
+ "size": 640,
50
+ }
51
+
52
+
53
+ def _nms(boxes: np.ndarray, scores: np.ndarray, iou_thresh: float) -> List[int]:
54
+ if boxes.size == 0:
55
+ return []
56
+ x1, y1, x2, y2 = boxes.T
57
+ areas = (x2 - x1) * (y2 - y1)
58
+ order = scores.argsort()[::-1]
59
+ keep: List[int] = []
60
+ while order.size > 0:
61
+ i = int(order[0])
62
+ keep.append(i)
63
+ if order.size == 1:
64
+ break
65
+ xx1 = np.maximum(x1[i], x1[order[1:]])
66
+ yy1 = np.maximum(y1[i], y1[order[1:]])
67
+ xx2 = np.minimum(x2[i], x2[order[1:]])
68
+ yy2 = np.minimum(y2[i], y2[order[1:]])
69
+ w = np.clip(xx2 - xx1, 0, None)
70
+ h = np.clip(yy2 - yy1, 0, None)
71
+ inter = w * h
72
+ iou = inter / (areas[i] + areas[order[1:]] - inter + 1e-6)
73
+ inds = np.where(iou <= iou_thresh)[0]
74
+ order = order[inds + 1]
75
+ return keep
76
+
77
+
78
+ def run_model(model, frame: "np.ndarray") -> List[Dict[str, Any]]:
79
+ if not isinstance(model, dict):
80
+ return []
81
+ session: ort.InferenceSession = model["session"]
82
+ input_name = model["input_name"]
83
+ names: dict[int, str] = model["names"]
84
+ size = int(model["size"])
85
+
86
+ image = Image.fromarray(frame).convert("RGB")
87
+ orig_w, orig_h = image.size
88
+ resized = image.resize((size, size))
89
+ inp = np.array(resized).astype("float32") / 255.0
90
+ inp = np.transpose(inp, (2, 0, 1))[None, ...]
91
+
92
+ outputs = session.run(None, {input_name: inp})
93
+ preds = outputs[0][0] # (25200, 17)
94
+ if preds.shape[1] < 6:
95
+ return []
96
+
97
+ boxes = preds[:, :4]
98
+ objectness = preds[:, 4]
99
+ class_scores = preds[:, 5:]
100
+ class_ids = np.argmax(class_scores, axis=1)
101
+ class_conf = class_scores[np.arange(class_scores.shape[0]), class_ids]
102
+ scores = objectness * class_conf
103
+
104
+ conf_thresh = 0.25
105
+ keep = scores > conf_thresh
106
+ boxes = boxes[keep]
107
+ scores = scores[keep]
108
+ class_ids = class_ids[keep]
109
+
110
+ if boxes.size == 0:
111
+ return []
112
+
113
+ # xywh -> xyxy
114
+ x, y, w, h = boxes.T
115
+ x1 = x - w / 2
116
+ y1 = y - h / 2
117
+ x2 = x + w / 2
118
+ y2 = y + h / 2
119
+ boxes_xyxy = np.stack([x1, y1, x2, y2], axis=1)
120
+
121
+ keep_idx = _nms(boxes_xyxy, scores, 0.45)
122
+ detections: List[Dict[str, Any]] = []
123
+ for det_idx, i in enumerate(keep_idx):
124
+ xyxy = boxes_xyxy[i]
125
+ # map back to original size
126
+ scale_x = orig_w / size
127
+ scale_y = orig_h / size
128
+ xyxy = np.array([xyxy[0] * scale_x, xyxy[1] * scale_y, xyxy[2] * scale_x, xyxy[3] * scale_y])
129
+ class_id = int(class_ids[i])
130
+ label = names.get(class_id, str(class_id))
131
+ detections.append(
132
+ {
133
+ "frame_idx": 0,
134
+ "class": label,
135
+ "bbox": [float(v) for v in xyxy],
136
+ "score": float(scores[i]),
137
+ "track_id": f"f0-d{det_idx}",
138
+ }
139
+ )
140
+
141
+ return detections
142
+
143
+
144
+ def build_parser() -> argparse.ArgumentParser:
145
+ parser = argparse.ArgumentParser(description="Run vehicle detection (YOLOv5 ONNX).")
146
+ parser.add_argument(
147
+ "--stdin-raw",
148
+ action="store_true",
149
+ default=True,
150
+ help="Read raw image bytes from stdin.",
151
+ )
152
+ return parser
153
+
154
+
155
+ if __name__ == "__main__":
156
+ build_parser().parse_args()
157
+
158
+ base_dir = Path(__file__).resolve().parent
159
+ model = load_model()
160
+ if model is None:
161
+ print("[]")
162
+ sys.exit(0)
163
+
164
+ try:
165
+ image = load_image(sys.stdin.buffer.read(), base_dir)
166
+ except Exception:
167
+ print("[]")
168
+ sys.exit(0)
169
+
170
+ frame = np.array(image)
171
+ output = run_model(model, frame)
172
+ print(json.dumps(output))
model_type.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "project_task_type": "object-detection",
3
+ "model_type": "yolov5"
4
+ }
pyproject.toml ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [project]
2
+ name = "vehicles-q0x2v-1"
3
+ version = "0.1.0"
4
+ requires-python = ">=3.11"
5
+ dependencies = [
6
+ "numpy>=1.26",
7
+ "pillow>=10.0",
8
+ "ultralytics>=8.0.0",
9
+ "onnx>=1.16",
10
+ "onnxruntime>=1.17",
11
+ ]
uv.lock ADDED
The diff for this file is too large to render. See raw diff
 
weights_with_metadata.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b21b538155c43a429ded4f493c2fc924027a2b5139a167773e578e9b36c08e3c
3
+ size 28617883
yolov5s_weights.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8e95fddc9f1c09b1f2c59d5d690b7fc4371cee3446bff34f1a6052bbcf65d762
3
+ size 28617841